Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
b1e0d698ac
|
@ -24,7 +24,7 @@ buildscript {
|
|||
}
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.3'
|
||||
classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.4'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,9 +44,8 @@ task test(type: Test, overwrite: true)
|
|||
|
||||
dependencies {
|
||||
compile("org.elasticsearch:elasticsearch:${version}") {
|
||||
// JMH ships with the conflicting version 4.6 (JMH will not update this dependency as it is Java 6 compatible and joptsimple is one
|
||||
// of the most recent compatible version). This prevents us from using jopt-simple in benchmarks (which should be ok) but allows us
|
||||
// to invoke the JMH uberjar as usual.
|
||||
// JMH ships with the conflicting version 4.6. This prevents us from using jopt-simple in benchmarks (which should be ok) but allows
|
||||
// us to invoke the JMH uberjar as usual.
|
||||
exclude group: 'net.sf.jopt-simple', module: 'jopt-simple'
|
||||
}
|
||||
compile "org.openjdk.jmh:jmh-core:$versions.jmh"
|
||||
|
|
|
@ -216,7 +216,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JNANatives.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JVMCheck.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]JarHell.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Seccomp.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
|
||||
|
|
|
@ -21,4 +21,4 @@ commonscodec = 1.10
|
|||
hamcrest = 1.3
|
||||
securemock = 1.2
|
||||
# benchmark dependencies
|
||||
jmh = 1.15
|
||||
jmh = 1.17.3
|
||||
|
|
|
@ -152,7 +152,7 @@ final class RequestLogger {
|
|||
httpResponse.setEntity(entity);
|
||||
ContentType contentType = ContentType.get(entity);
|
||||
Charset charset = StandardCharsets.UTF_8;
|
||||
if (contentType != null) {
|
||||
if (contentType != null && contentType.getCharset() != null) {
|
||||
charset = contentType.getCharset();
|
||||
}
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent(), charset))) {
|
||||
|
|
|
@ -44,6 +44,7 @@ import java.io.ByteArrayInputStream;
|
|||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -51,7 +52,6 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertThat;
|
||||
|
||||
public class RequestLoggerTests extends RestClientTestCase {
|
||||
|
||||
public void testTraceRequest() throws IOException, URISyntaxException {
|
||||
HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https");
|
||||
String expectedEndpoint = "/index/type/_api";
|
||||
|
@ -69,7 +69,7 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
expected += " -d '" + requestBody + "'";
|
||||
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
|
||||
HttpEntity entity;
|
||||
switch(randomIntBetween(0, 3)) {
|
||||
switch(randomIntBetween(0, 4)) {
|
||||
case 0:
|
||||
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
|
||||
break;
|
||||
|
@ -82,6 +82,10 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
case 3:
|
||||
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8));
|
||||
break;
|
||||
case 4:
|
||||
// Evil entity without a charset
|
||||
entity = new StringEntity(requestBody, (Charset) null);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
@ -116,11 +120,20 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
expected += "\n# \"field\": \"value\"";
|
||||
expected += "\n# }";
|
||||
HttpEntity entity;
|
||||
if (getRandom().nextBoolean()) {
|
||||
switch(randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
|
||||
} else {
|
||||
break;
|
||||
case 1:
|
||||
//test a non repeatable entity
|
||||
entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
|
||||
break;
|
||||
case 2:
|
||||
// Evil entity without a charset
|
||||
entity = new StringEntity(responseBody, (Charset) null);
|
||||
break;
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
httpResponse.setEntity(entity);
|
||||
}
|
||||
|
|
|
@ -310,7 +310,6 @@ import org.elasticsearch.rest.action.search.RestExplainAction;
|
|||
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchAction;
|
||||
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
|
||||
import org.elasticsearch.rest.action.search.RestSuggestAction;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
|
@ -550,7 +549,6 @@ public class ActionModule extends AbstractModule {
|
|||
registerRestHandler(handlers, RestMultiGetAction.class);
|
||||
registerRestHandler(handlers, RestDeleteAction.class);
|
||||
registerRestHandler(handlers, org.elasticsearch.rest.action.document.RestCountAction.class);
|
||||
registerRestHandler(handlers, RestSuggestAction.class);
|
||||
registerRestHandler(handlers, RestTermVectorsAction.class);
|
||||
registerRestHandler(handlers, RestMultiTermVectorsAction.class);
|
||||
registerRestHandler(handlers, RestBulkAction.class);
|
||||
|
|
|
@ -34,8 +34,6 @@ import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -46,6 +44,8 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public class TransportClusterHealthAction extends TransportMasterNodeReadAction<ClusterHealthRequest, ClusterHealthResponse> {
|
||||
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
|
@ -142,19 +142,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
|
||||
assert waitFor >= 0;
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||
final ClusterServiceState observedState = observer.observedState();
|
||||
final ClusterState state = observedState.getClusterState();
|
||||
final ClusterState state = observer.observedState();
|
||||
if (request.timeout().millis() == 0) {
|
||||
listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
|
||||
return;
|
||||
}
|
||||
final int concreteWaitFor = waitFor;
|
||||
final ClusterStateObserver.ChangePredicate validationPredicate = new ClusterStateObserver.ValidationPredicate() {
|
||||
@Override
|
||||
protected boolean validate(ClusterServiceState newState) {
|
||||
return newState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, newState.getClusterState(), concreteWaitFor);
|
||||
}
|
||||
};
|
||||
final Predicate<ClusterState> validationPredicate = newState -> validateRequest(request, newState, concreteWaitFor);
|
||||
|
||||
final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -174,7 +168,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
listener.onResponse(response);
|
||||
}
|
||||
};
|
||||
if (observedState.getClusterStateStatus() == ClusterStateStatus.APPLIED && validateRequest(request, state, concreteWaitFor)) {
|
||||
if (validationPredicate.test(state)) {
|
||||
stateListener.onNewClusterState(state);
|
||||
} else {
|
||||
observer.waitForNextChange(stateListener, validationPredicate, request.timeout());
|
||||
|
|
|
@ -101,10 +101,10 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
|
|||
// When there is a master failure after a restore has been started, this listener might not be registered
|
||||
// on the current master and as such it might miss some intermediary cluster states due to batching.
|
||||
// Clean up listener in that case and acknowledge completion of restore operation to client.
|
||||
clusterService.remove(this);
|
||||
clusterService.removeListener(this);
|
||||
listener.onResponse(new RestoreSnapshotResponse(null));
|
||||
} else if (newEntry == null) {
|
||||
clusterService.remove(this);
|
||||
clusterService.removeListener(this);
|
||||
ImmutableOpenMap<ShardId, RestoreInProgress.ShardRestoreStatus> shards = prevEntry.shards();
|
||||
assert prevEntry.state().completed() : "expected completed snapshot state but was " + prevEntry.state();
|
||||
assert RestoreService.completed(shards) : "expected all restore entries to be completed";
|
||||
|
@ -121,7 +121,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
|
|||
}
|
||||
};
|
||||
|
||||
clusterService.addLast(clusterStateListener);
|
||||
clusterService.addListener(clusterStateListener);
|
||||
} else {
|
||||
listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo()));
|
||||
}
|
||||
|
|
|
@ -46,6 +46,9 @@ import org.elasticsearch.indices.IndicesQueryCache;
|
|||
import org.elasticsearch.search.suggest.completion.CompletionStats;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class CommonStats implements Writeable, ToXContent {
|
||||
|
||||
|
@ -225,45 +228,19 @@ public class CommonStats implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
public CommonStats(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
docs = DocsStats.readDocStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
store = StoreStats.readStoreStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
indexing = IndexingStats.readIndexingStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
get = GetStats.readGetStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
search = SearchStats.readSearchStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
merge = MergeStats.readMergeStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
refresh = RefreshStats.readRefreshStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
flush = FlushStats.readFlushStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
warmer = WarmerStats.readWarmerStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
queryCache = QueryCacheStats.readQueryCacheStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
segments = SegmentsStats.readSegmentsStats(in);
|
||||
}
|
||||
docs = in.readOptionalStreamable(DocsStats::new);
|
||||
store = in.readOptionalStreamable(StoreStats::new);
|
||||
indexing = in.readOptionalStreamable(IndexingStats::new);
|
||||
get = in.readOptionalStreamable(GetStats::new);
|
||||
search = in.readOptionalStreamable(SearchStats::new);
|
||||
merge = in.readOptionalStreamable(MergeStats::new);
|
||||
refresh = in.readOptionalStreamable(RefreshStats::new);
|
||||
flush = in.readOptionalStreamable(FlushStats::new);
|
||||
warmer = in.readOptionalStreamable(WarmerStats::new);
|
||||
queryCache = in.readOptionalStreamable(QueryCacheStats::new);
|
||||
fieldData = in.readOptionalStreamable(FieldDataStats::new);
|
||||
completion = in.readOptionalStreamable(CompletionStats::new);
|
||||
segments = in.readOptionalStreamable(SegmentsStats::new);
|
||||
translog = in.readOptionalStreamable(TranslogStats::new);
|
||||
requestCache = in.readOptionalStreamable(RequestCacheStats::new);
|
||||
recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
|
||||
|
@ -271,84 +248,19 @@ public class CommonStats implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (docs == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
docs.writeTo(out);
|
||||
}
|
||||
if (store == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
store.writeTo(out);
|
||||
}
|
||||
if (indexing == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
indexing.writeTo(out);
|
||||
}
|
||||
if (get == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
get.writeTo(out);
|
||||
}
|
||||
if (search == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
search.writeTo(out);
|
||||
}
|
||||
if (merge == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
merge.writeTo(out);
|
||||
}
|
||||
if (refresh == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
refresh.writeTo(out);
|
||||
}
|
||||
if (flush == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
flush.writeTo(out);
|
||||
}
|
||||
if (warmer == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
warmer.writeTo(out);
|
||||
}
|
||||
if (queryCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
queryCache.writeTo(out);
|
||||
}
|
||||
if (fieldData == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
fieldData.writeTo(out);
|
||||
}
|
||||
if (completion == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
completion.writeTo(out);
|
||||
}
|
||||
if (segments == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
segments.writeTo(out);
|
||||
}
|
||||
out.writeOptionalStreamable(docs);
|
||||
out.writeOptionalStreamable(store);
|
||||
out.writeOptionalStreamable(indexing);
|
||||
out.writeOptionalStreamable(get);
|
||||
out.writeOptionalStreamable(search);
|
||||
out.writeOptionalStreamable(merge);
|
||||
out.writeOptionalStreamable(refresh);
|
||||
out.writeOptionalStreamable(flush);
|
||||
out.writeOptionalStreamable(warmer);
|
||||
out.writeOptionalStreamable(queryCache);
|
||||
out.writeOptionalStreamable(fieldData);
|
||||
out.writeOptionalStreamable(completion);
|
||||
out.writeOptionalStreamable(segments);
|
||||
out.writeOptionalStreamable(translog);
|
||||
out.writeOptionalStreamable(requestCache);
|
||||
out.writeOptionalStreamable(recoveryStats);
|
||||
|
@ -590,53 +502,12 @@ public class CommonStats implements Writeable, ToXContent {
|
|||
// note, requires a wrapping object
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (docs != null) {
|
||||
docs.toXContent(builder, params);
|
||||
}
|
||||
if (store != null) {
|
||||
store.toXContent(builder, params);
|
||||
}
|
||||
if (indexing != null) {
|
||||
indexing.toXContent(builder, params);
|
||||
}
|
||||
if (get != null) {
|
||||
get.toXContent(builder, params);
|
||||
}
|
||||
if (search != null) {
|
||||
search.toXContent(builder, params);
|
||||
}
|
||||
if (merge != null) {
|
||||
merge.toXContent(builder, params);
|
||||
}
|
||||
if (refresh != null) {
|
||||
refresh.toXContent(builder, params);
|
||||
}
|
||||
if (flush != null) {
|
||||
flush.toXContent(builder, params);
|
||||
}
|
||||
if (warmer != null) {
|
||||
warmer.toXContent(builder, params);
|
||||
}
|
||||
if (queryCache != null) {
|
||||
queryCache.toXContent(builder, params);
|
||||
}
|
||||
if (fieldData != null) {
|
||||
fieldData.toXContent(builder, params);
|
||||
}
|
||||
if (completion != null) {
|
||||
completion.toXContent(builder, params);
|
||||
}
|
||||
if (segments != null) {
|
||||
segments.toXContent(builder, params);
|
||||
}
|
||||
if (translog != null) {
|
||||
translog.toXContent(builder, params);
|
||||
}
|
||||
if (requestCache != null) {
|
||||
requestCache.toXContent(builder, params);
|
||||
}
|
||||
if (recoveryStats != null) {
|
||||
recoveryStats.toXContent(builder, params);
|
||||
final Stream<ToXContent> stream = Arrays.stream(new ToXContent[] {
|
||||
docs, store, indexing, get, search, merge, refresh, flush, warmer, queryCache,
|
||||
fieldData, completion, segments, translog, requestCache, recoveryStats})
|
||||
.filter(Objects::nonNull);
|
||||
for (ToXContent toXContent : ((Iterable<ToXContent>)stream::iterator)) {
|
||||
toXContent.toXContent(builder, params);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -135,19 +135,13 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shards = new ShardStats[in.readVInt()];
|
||||
for (int i = 0; i < shards.length; i++) {
|
||||
shards[i] = ShardStats.readShardStats(in);
|
||||
}
|
||||
shards = in.readArray(ShardStats::readShardStats, (size) -> new ShardStats[size]);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(shards.length);
|
||||
for (ShardStats shard : shards) {
|
||||
shard.writeTo(out);
|
||||
}
|
||||
out.writeArray(shards);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
|
@ -32,7 +33,7 @@ import org.elasticsearch.index.shard.ShardPath;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ShardStats implements Streamable, ToXContent {
|
||||
public class ShardStats implements Streamable, Writeable, ToXContent {
|
||||
private ShardRouting shardRouting;
|
||||
private CommonStats commonStats;
|
||||
@Nullable
|
||||
|
|
|
@ -19,19 +19,6 @@
|
|||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
|
@ -72,6 +59,18 @@ import org.elasticsearch.tasks.Task;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Groups bulk request items by shard, optionally creating non-existent indices and
|
||||
* delegates to {@link TransportShardBulkAction} for shard-level bulk execution
|
||||
|
@ -115,7 +114,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
this.ingestForwarder = new IngestActionForwarder(transportService);
|
||||
clusterService.add(this.ingestForwarder);
|
||||
clusterService.addStateApplier(this.ingestForwarder);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.index.engine.Engine;
|
|||
import org.elasticsearch.index.engine.EngineClosedException;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.seqno.GlobalCheckpointSyncAction;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardClosedException;
|
||||
|
@ -150,6 +151,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
final long version = indexResult.getVersion();
|
||||
indexRequest.version(version);
|
||||
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
indexRequest.seqNo(indexResult.getSeqNo());
|
||||
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
|
||||
response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(), indexResult.getSeqNo(),
|
||||
indexResult.getVersion(), indexResult.isCreated());
|
||||
|
@ -173,6 +175,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
// update the request with the version so it will go to the replicas
|
||||
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
deleteRequest.version(deleteResult.getVersion());
|
||||
deleteRequest.seqNo(deleteResult.getSeqNo());
|
||||
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
|
||||
response = new DeleteResponse(request.shardId(), deleteRequest.type(), deleteRequest.id(), deleteResult.getSeqNo(),
|
||||
deleteResult.getVersion(), deleteResult.isFound());
|
||||
|
@ -182,6 +185,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
break;
|
||||
default: throw new IllegalStateException("unexpected opType [" + itemRequest.opType() + "] found");
|
||||
}
|
||||
|
||||
// update the bulk item request because update request execution can mutate the bulk item request
|
||||
request.items()[requestIndex] = replicaRequest;
|
||||
if (operationResult == null) { // in case of noop update operation
|
||||
|
@ -282,6 +286,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
final long version = updateOperationResult.getVersion();
|
||||
indexRequest.version(version);
|
||||
indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
indexRequest.seqNo(updateOperationResult.getSeqNo());
|
||||
assert indexRequest.versionType().validateVersionForWrites(indexRequest.version());
|
||||
}
|
||||
break;
|
||||
|
@ -292,6 +297,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
// update the request with the version so it will go to the replicas
|
||||
deleteRequest.versionType(deleteRequest.versionType().versionTypeForReplicationAndRecovery());
|
||||
deleteRequest.version(updateOperationResult.getVersion());
|
||||
deleteRequest.seqNo(updateOperationResult.getSeqNo());
|
||||
assert deleteRequest.versionType().validateVersionForWrites(deleteRequest.version());
|
||||
}
|
||||
break;
|
||||
|
@ -342,6 +348,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
replicaRequest = new BulkItemRequest(request.items()[requestIndex].id(), updateDeleteRequest);
|
||||
break;
|
||||
}
|
||||
assert (replicaRequest.request() instanceof IndexRequest
|
||||
&& ((IndexRequest) replicaRequest.request()).seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) ||
|
||||
(replicaRequest.request() instanceof DeleteRequest
|
||||
&& ((DeleteRequest) replicaRequest.request()).seqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO);
|
||||
// successful operation
|
||||
break; // out of retry loop
|
||||
} else if (updateOperationResult.getFailure() instanceof VersionConflictEngineException == false) {
|
||||
|
@ -364,10 +374,10 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
switch (docWriteRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
operationResult = executeIndexRequestOnReplica(((IndexRequest) docWriteRequest), replica);
|
||||
operationResult = executeIndexRequestOnReplica((IndexRequest) docWriteRequest, replica);
|
||||
break;
|
||||
case DELETE:
|
||||
operationResult = executeDeleteRequestOnReplica(((DeleteRequest) docWriteRequest), replica);
|
||||
operationResult = executeDeleteRequestOnReplica((DeleteRequest) docWriteRequest, replica);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unexpected request operation type on replica: "
|
||||
|
|
|
@ -129,6 +129,7 @@ public class TransportDeleteAction extends TransportWriteAction<DeleteRequest, D
|
|||
// update the request with the version so it will go to the replicas
|
||||
request.versionType(request.versionType().versionTypeForReplicationAndRecovery());
|
||||
request.version(result.getVersion());
|
||||
request.seqNo(result.getSeqNo());
|
||||
assert request.versionType().validateVersionForWrites(request.version());
|
||||
response = new DeleteResponse(
|
||||
primary.shardId(),
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.index;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
|
@ -48,7 +49,6 @@ import org.elasticsearch.index.mapper.Mapping;
|
|||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.ResourceAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -91,7 +91,7 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
this.clusterService = clusterService;
|
||||
this.ingestService = ingestService;
|
||||
this.ingestForwarder = new IngestActionForwarder(transportService);
|
||||
clusterService.add(this.ingestForwarder);
|
||||
clusterService.addStateApplier(this.ingestForwarder);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,24 +19,24 @@
|
|||
|
||||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* A utility for forwarding ingest requests to ingest nodes in a round-robin fashion.
|
||||
*
|
||||
* TODO: move this into IngestService and make index/bulk actions call that
|
||||
*/
|
||||
public final class IngestActionForwarder implements ClusterStateListener {
|
||||
public final class IngestActionForwarder implements ClusterStateApplier {
|
||||
|
||||
private final TransportService transportService;
|
||||
private final AtomicInteger ingestNodeGenerator = new AtomicInteger(Randomness.get().nextInt());
|
||||
|
@ -62,7 +62,7 @@ public final class IngestActionForwarder implements ClusterStateListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
public void applyClusterState(ClusterChangedEvent event) {
|
||||
ingestNodes = event.state().getNodes().getIngestNodes().values().toArray(DiscoveryNode.class);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ import java.util.function.Function;
|
|||
|
||||
|
||||
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
private static final float DEFAULT_INDEX_BOOST = 1.0f;
|
||||
|
||||
protected final Logger logger;
|
||||
protected final SearchTransportService searchTransportService;
|
||||
|
@ -66,6 +67,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private final Map<String, AliasFilter> aliasFilter;
|
||||
private final Map<String, Float> concreteIndexBoosts;
|
||||
private final long clusterStateVersion;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
|
@ -73,9 +75,9 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
|
||||
protected AbstractSearchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
|
||||
Map<String, AliasFilter> aliasFilter, Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) {
|
||||
super(startTime);
|
||||
this.logger = logger;
|
||||
this.searchTransportService = searchTransportService;
|
||||
|
@ -91,6 +93,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
this.aliasFilter = aliasFilter;
|
||||
this.concreteIndexBoosts = concreteIndexBoosts;
|
||||
}
|
||||
|
||||
public void start() {
|
||||
|
@ -125,8 +128,10 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
} else {
|
||||
AliasFilter filter = this.aliasFilter.get(shard.index().getUUID());
|
||||
assert filter != null;
|
||||
|
||||
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
|
||||
ShardSearchTransportRequest transportRequest = new ShardSearchTransportRequest(request, shardIt.shardId(), shardsIts.size(),
|
||||
filter, startTime());
|
||||
filter, indexBoost, startTime());
|
||||
sendExecuteFirstPhase(node, transportRequest , new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
|
|
|
@ -47,10 +47,11 @@ class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSea
|
|||
private final SearchPhaseController searchPhaseController;
|
||||
SearchDfsQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
|
||||
Map<String, AliasFilter> aliasFilter, SearchPhaseController searchPhaseController,
|
||||
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion, SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts,
|
||||
long startTime, long clusterStateVersion, SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
|
|
|
@ -55,11 +55,11 @@ class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSe
|
|||
|
||||
SearchDfsQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
|
||||
Map<String, AliasFilter> aliasFilter, SearchPhaseController searchPhaseController,
|
||||
Executor executor, SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, long startTime,
|
||||
long clusterStateVersion, SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
|
|
|
@ -40,12 +40,12 @@ class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetc
|
|||
|
||||
SearchQueryAndFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
|
||||
Map<String, AliasFilter> aliasFilter,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor,
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor,
|
||||
request, listener, shardsIts, startTime, clusterStateVersion, task);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
|
||||
|
|
|
@ -50,13 +50,13 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySea
|
|||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(Logger logger, SearchTransportService searchTransportService,
|
||||
Function<String, DiscoveryNode> nodeIdToDiscoveryNode, Map<String,
|
||||
AliasFilter> aliasFilter,
|
||||
Function<String, DiscoveryNode> nodeIdToDiscoveryNode,
|
||||
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
|
||||
SearchPhaseController searchPhaseController, Executor executor,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener,
|
||||
GroupShardsIterator shardsIts, long startTime, long clusterStateVersion,
|
||||
SearchTask task) {
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, executor, request, listener,
|
||||
super(logger, searchTransportService, nodeIdToDiscoveryNode, aliasFilter, concreteIndexBoosts, executor, request, listener,
|
||||
shardsIts, startTime, clusterStateVersion, task);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.internal.AliasFilter;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -84,6 +85,29 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
return aliasFilterMap;
|
||||
}
|
||||
|
||||
private Map<String, Float> resolveIndexBoosts(SearchRequest searchRequest, ClusterState clusterState) {
|
||||
if (searchRequest.source() == null) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
SearchSourceBuilder source = searchRequest.source();
|
||||
if (source.indexBoosts() == null) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
Map<String, Float> concreteIndexBoosts = new HashMap<>();
|
||||
for (SearchSourceBuilder.IndexBoost ib : source.indexBoosts()) {
|
||||
Index[] concreteIndices =
|
||||
indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), ib.getIndex());
|
||||
|
||||
for (Index concreteIndex : concreteIndices) {
|
||||
concreteIndexBoosts.putIfAbsent(concreteIndex.getUUID(), ib.getBoost());
|
||||
}
|
||||
}
|
||||
|
||||
return Collections.unmodifiableMap(concreteIndexBoosts);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
// pure paranoia if time goes backwards we are at least positive
|
||||
|
@ -107,6 +131,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
searchRequest.preference());
|
||||
failIfOverShardCountLimit(clusterService, shardIterators.size());
|
||||
|
||||
Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState);
|
||||
|
||||
// optimize search type for cases where there is only one shard group to search on
|
||||
if (shardIterators.size() == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
|
@ -125,7 +151,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
}
|
||||
|
||||
searchAsyncAction((SearchTask)task, searchRequest, shardIterators, startTimeInMillis, clusterState,
|
||||
Collections.unmodifiableMap(aliasFilter), listener).start();
|
||||
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -135,6 +161,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
|
||||
private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest, GroupShardsIterator shardIterators,
|
||||
long startTime, ClusterState state, Map<String, AliasFilter> aliasFilter,
|
||||
Map<String, Float> concreteIndexBoosts,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
final Function<String, DiscoveryNode> nodesLookup = state.nodes()::get;
|
||||
final long clusterStateVersion = state.version();
|
||||
|
@ -143,22 +170,22 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
switch(searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup,
|
||||
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchTransportService, nodesLookup,
|
||||
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
case DFS_QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup,
|
||||
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
case QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchTransportService, nodesLookup,
|
||||
aliasFilter, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
aliasFilter, concreteIndexBoosts, searchPhaseController, executor, searchRequest, listener, shardIterators, startTime,
|
||||
clusterStateVersion, task);
|
||||
break;
|
||||
default:
|
||||
|
@ -177,5 +204,4 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
+ "] to a greater value if you really want to query that many shards at the same time.");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -31,6 +30,7 @@ import org.elasticsearch.node.NodeClosedException;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* This class provides primitives for waiting for a configured number of shards
|
||||
|
@ -70,16 +70,10 @@ public class ActiveShardsObserver extends AbstractComponent {
|
|||
}
|
||||
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||
if (activeShardCount.enoughShardsActive(observer.observedState().getClusterState(), indexName)) {
|
||||
if (activeShardCount.enoughShardsActive(observer.observedState(), indexName)) {
|
||||
onResult.accept(true);
|
||||
} else {
|
||||
final ClusterStateObserver.ChangePredicate shardsAllocatedPredicate =
|
||||
new ClusterStateObserver.ValidationPredicate() {
|
||||
@Override
|
||||
protected boolean validate(final ClusterServiceState newState) {
|
||||
return activeShardCount.enoughShardsActive(newState.getClusterState(), indexName);
|
||||
}
|
||||
};
|
||||
final Predicate<ClusterState> shardsAllocatedPredicate = newState -> activeShardCount.enoughShardsActive(newState, indexName);
|
||||
|
||||
final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
|
|
@ -505,11 +505,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
indicesLevelRequest = readRequestFrom(in);
|
||||
int size = in.readVInt();
|
||||
shards = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
shards.add(new ShardRouting(in));
|
||||
}
|
||||
shards = in.readList(ShardRouting::new);
|
||||
nodeId = in.readString();
|
||||
}
|
||||
|
||||
|
@ -517,11 +513,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
indicesLevelRequest.writeTo(out);
|
||||
int size = shards.size();
|
||||
out.writeVInt(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
shards.get(i).writeTo(out);
|
||||
}
|
||||
out.writeList(shards);
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
}
|
||||
|
@ -566,18 +558,9 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
super.readFrom(in);
|
||||
nodeId = in.readString();
|
||||
totalShards = in.readVInt();
|
||||
int resultsSize = in.readVInt();
|
||||
results = new ArrayList<>(resultsSize);
|
||||
for (; resultsSize > 0; resultsSize--) {
|
||||
final ShardOperationResult result = in.readBoolean() ? readShardResult(in) : null;
|
||||
results.add(result);
|
||||
}
|
||||
results = in.readList((stream) -> stream.readBoolean() ? readShardResult(stream) : null);
|
||||
if (in.readBoolean()) {
|
||||
int failureShards = in.readVInt();
|
||||
exceptions = new ArrayList<>(failureShards);
|
||||
for (int i = 0; i < failureShards; i++) {
|
||||
exceptions.add(new BroadcastShardOperationFailedException(in));
|
||||
}
|
||||
exceptions = in.readList(BroadcastShardOperationFailedException::new);
|
||||
} else {
|
||||
exceptions = null;
|
||||
}
|
||||
|
@ -594,11 +577,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
}
|
||||
out.writeBoolean(exceptions != null);
|
||||
if (exceptions != null) {
|
||||
int failureShards = exceptions.size();
|
||||
out.writeVInt(failureShards);
|
||||
for (int i = 0; i < failureShards; i++) {
|
||||
exceptions.get(i).writeTo(out);
|
||||
}
|
||||
out.writeList(exceptions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
|
@ -46,6 +45,7 @@ import org.elasticsearch.transport.ConnectTransportException;
|
|||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
|
@ -111,14 +111,6 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
private volatile ClusterStateObserver observer;
|
||||
private final Task task;
|
||||
|
||||
private final ClusterStateObserver.ChangePredicate retryableOrNoBlockPredicate = new ClusterStateObserver.ValidationPredicate() {
|
||||
@Override
|
||||
protected boolean validate(ClusterServiceState newState) {
|
||||
ClusterBlockException blockException = checkBlock(request, newState.getClusterState());
|
||||
return (blockException == null || !blockException.retryable());
|
||||
}
|
||||
};
|
||||
|
||||
AsyncSingleAction(Task task, Request request, ActionListener<Response> listener) {
|
||||
this.task = task;
|
||||
this.request = request;
|
||||
|
@ -134,7 +126,8 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
}
|
||||
|
||||
protected void doStart() {
|
||||
final ClusterState clusterState = observer.observedState().getClusterState();
|
||||
final ClusterState clusterState = observer.observedState();
|
||||
final Predicate<ClusterState> masterChangePredicate = MasterNodeChangePredicate.build(clusterState);
|
||||
final DiscoveryNodes nodes = clusterState.nodes();
|
||||
if (nodes.isLocalNodeElectedMaster() || localExecute(request)) {
|
||||
// check for block, if blocked, retry, else, execute locally
|
||||
|
@ -144,7 +137,10 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
listener.onFailure(blockException);
|
||||
} else {
|
||||
logger.trace("can't execute due to a cluster block, retrying", blockException);
|
||||
retry(blockException, retryableOrNoBlockPredicate);
|
||||
retry(blockException, newState -> {
|
||||
ClusterBlockException newException = checkBlock(request, newState);
|
||||
return (newException == null || !newException.retryable());
|
||||
});
|
||||
}
|
||||
} else {
|
||||
ActionListener<Response> delegate = new ActionListener<Response>() {
|
||||
|
@ -158,7 +154,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
if (t instanceof Discovery.FailedToCommitClusterStateException
|
||||
|| (t instanceof NotMasterException)) {
|
||||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("master could not publish cluster state or stepped down before publishing action [{}], scheduling a retry", actionName), t);
|
||||
retry(t, MasterNodeChangePredicate.INSTANCE);
|
||||
retry(t, masterChangePredicate);
|
||||
} else {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
@ -168,14 +164,14 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
threadPool.executor(executor).execute(new ActionRunnable(delegate) {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
masterOperation(task, request, clusterService.state(), delegate);
|
||||
masterOperation(task, request, clusterState, delegate);
|
||||
}
|
||||
});
|
||||
}
|
||||
} else {
|
||||
if (nodes.getMasterNode() == null) {
|
||||
logger.debug("no known master node, scheduling a retry");
|
||||
retry(null, MasterNodeChangePredicate.INSTANCE);
|
||||
retry(null, masterChangePredicate);
|
||||
} else {
|
||||
taskManager.registerChildTask(task, nodes.getMasterNode().getId());
|
||||
transportService.sendRequest(nodes.getMasterNode(), actionName, request, new ActionListenerResponseHandler<Response>(listener, TransportMasterNodeAction.this::newResponse) {
|
||||
|
@ -186,7 +182,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
// we want to retry here a bit to see if a new master is elected
|
||||
logger.debug("connection exception while trying to forward request with action name [{}] to master node [{}], scheduling a retry. Error: [{}]",
|
||||
actionName, nodes.getMasterNode(), exp.getDetailedMessage());
|
||||
retry(cause, MasterNodeChangePredicate.INSTANCE);
|
||||
retry(cause, masterChangePredicate);
|
||||
} else {
|
||||
listener.onFailure(exp);
|
||||
}
|
||||
|
@ -196,7 +192,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
}
|
||||
}
|
||||
|
||||
private void retry(final Throwable failure, final ClusterStateObserver.ChangePredicate changePredicate) {
|
||||
private void retry(final Throwable failure, final Predicate<ClusterState> statePredicate) {
|
||||
observer.waitForNextChange(
|
||||
new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -214,7 +210,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
|||
logger.debug((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("timed out while retrying [{}] after failure (timeout [{}])", actionName, timeout), failure);
|
||||
listener.onFailure(new MasterNotDiscoveredException(failure));
|
||||
}
|
||||
}, changePredicate
|
||||
}, statePredicate
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,6 +67,12 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
public static class ShardInfo implements Streamable, ToXContent {
|
||||
|
||||
private static final String _SHARDS = "_shards";
|
||||
private static final String TOTAL = "total";
|
||||
private static final String SUCCESSFUL = "successful";
|
||||
private static final String FAILED = "failed";
|
||||
private static final String FAILURES = "failures";
|
||||
|
||||
private int total;
|
||||
private int successful;
|
||||
private Failure[] failures = EMPTY;
|
||||
|
@ -165,12 +171,12 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields._SHARDS);
|
||||
builder.field(Fields.TOTAL, total);
|
||||
builder.field(Fields.SUCCESSFUL, successful);
|
||||
builder.field(Fields.FAILED, getFailed());
|
||||
builder.startObject(_SHARDS);
|
||||
builder.field(TOTAL, total);
|
||||
builder.field(SUCCESSFUL, successful);
|
||||
builder.field(FAILED, getFailed());
|
||||
if (failures.length > 0) {
|
||||
builder.startArray(Fields.FAILURES);
|
||||
builder.startArray(FAILURES);
|
||||
for (Failure failure : failures) {
|
||||
failure.toXContent(builder, params);
|
||||
}
|
||||
|
@ -197,6 +203,13 @@ public class ReplicationResponse extends ActionResponse {
|
|||
|
||||
public static class Failure implements ShardOperationFailedException, ToXContent {
|
||||
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _SHARD = "_shard";
|
||||
private static final String _NODE = "_node";
|
||||
private static final String REASON = "reason";
|
||||
private static final String STATUS = "status";
|
||||
private static final String PRIMARY = "primary";
|
||||
|
||||
private ShardId shardId;
|
||||
private String nodeId;
|
||||
private Exception cause;
|
||||
|
@ -313,39 +326,18 @@ public class ReplicationResponse extends ActionResponse {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields._INDEX, shardId.getIndexName());
|
||||
builder.field(Fields._SHARD, shardId.id());
|
||||
builder.field(Fields._NODE, nodeId);
|
||||
builder.field(Fields.REASON);
|
||||
builder.field(_INDEX, shardId.getIndexName());
|
||||
builder.field(_SHARD, shardId.id());
|
||||
builder.field(_NODE, nodeId);
|
||||
builder.field(REASON);
|
||||
builder.startObject();
|
||||
ElasticsearchException.toXContent(builder, params, cause);
|
||||
builder.endObject();
|
||||
builder.field(Fields.STATUS, status);
|
||||
builder.field(Fields.PRIMARY, primary);
|
||||
builder.field(STATUS, status);
|
||||
builder.field(PRIMARY, primary);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static class Fields {
|
||||
|
||||
private static final String _INDEX = "_index";
|
||||
private static final String _SHARD = "_shard";
|
||||
private static final String _NODE = "_node";
|
||||
private static final String REASON = "reason";
|
||||
private static final String STATUS = "status";
|
||||
private static final String PRIMARY = "primary";
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
private static class Fields {
|
||||
|
||||
private static final String _SHARDS = "_shards";
|
||||
private static final String TOTAL = "total";
|
||||
private static final String SUCCESSFUL = "successful";
|
||||
private static final String FAILED = "failed";
|
||||
private static final String FAILURES = "failures";
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -630,7 +630,7 @@ public abstract class TransportReplicationAction<
|
|||
@Override
|
||||
protected void doRun() {
|
||||
setPhase(task, "routing");
|
||||
final ClusterState state = observer.observedState().getClusterState();
|
||||
final ClusterState state = observer.observedState();
|
||||
if (handleBlockExceptions(state)) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -39,12 +39,12 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.function.Supplier;
|
||||
|
@ -124,7 +124,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
}
|
||||
|
||||
protected void doStart() {
|
||||
final ClusterState clusterState = observer.observedState().getClusterState();
|
||||
final ClusterState clusterState = observer.observedState();
|
||||
nodes = clusterState.nodes();
|
||||
try {
|
||||
ClusterBlockException blockException = checkGlobalBlock(clusterState);
|
||||
|
|
|
@ -30,11 +30,13 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -56,7 +58,9 @@ import java.net.URISyntaxException;
|
|||
import java.nio.file.Path;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
|
@ -93,7 +97,7 @@ final class Bootstrap {
|
|||
}
|
||||
|
||||
/** initialize native resources */
|
||||
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean seccomp, boolean ctrlHandler) {
|
||||
public static void initializeNatives(Path tmpFile, boolean mlockAll, boolean systemCallFilter, boolean ctrlHandler) {
|
||||
final Logger logger = Loggers.getLogger(Bootstrap.class);
|
||||
|
||||
// check if the user is running as root, and bail
|
||||
|
@ -101,9 +105,9 @@ final class Bootstrap {
|
|||
throw new RuntimeException("can not run elasticsearch as root");
|
||||
}
|
||||
|
||||
// enable secure computing mode
|
||||
if (seccomp) {
|
||||
Natives.trySeccomp(tmpFile);
|
||||
// enable system call filter
|
||||
if (systemCallFilter) {
|
||||
Natives.tryInstallSystemCallFilter(tmpFile);
|
||||
}
|
||||
|
||||
// mlockall if requested
|
||||
|
@ -177,7 +181,7 @@ final class Bootstrap {
|
|||
initializeNatives(
|
||||
environment.tmpFile(),
|
||||
BootstrapSettings.MEMORY_LOCK_SETTING.get(settings),
|
||||
BootstrapSettings.SECCOMP_SETTING.get(settings),
|
||||
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings),
|
||||
BootstrapSettings.CTRLHANDLER_SETTING.get(settings));
|
||||
|
||||
// initialize probes before the security manager is installed
|
||||
|
|
|
@ -166,7 +166,7 @@ final class BootstrapChecks {
|
|||
}
|
||||
checks.add(new ClientJvmCheck());
|
||||
checks.add(new UseSerialGCCheck());
|
||||
checks.add(new SystemCallFilterCheck(BootstrapSettings.SECCOMP_SETTING.get(settings)));
|
||||
checks.add(new SystemCallFilterCheck(BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.get(settings)));
|
||||
checks.add(new OnErrorCheck());
|
||||
checks.add(new OnOutOfMemoryErrorCheck());
|
||||
checks.add(new G1GCCheck());
|
||||
|
@ -463,12 +463,12 @@ final class BootstrapChecks {
|
|||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return areSystemCallFiltersEnabled && !isSeccompInstalled();
|
||||
return areSystemCallFiltersEnabled && !isSystemCallFilterInstalled();
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
boolean isSeccompInstalled() {
|
||||
return Natives.isSeccompInstalled();
|
||||
boolean isSystemCallFilterInstalled() {
|
||||
return Natives.isSystemCallFilterInstalled();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -483,12 +483,12 @@ final class BootstrapChecks {
|
|||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return isSeccompInstalled() && mightFork();
|
||||
return isSystemCallFilterInstalled() && mightFork();
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
boolean isSeccompInstalled() {
|
||||
return Natives.isSeccompInstalled();
|
||||
boolean isSystemCallFilterInstalled() {
|
||||
return Natives.isSystemCallFilterInstalled();
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
|
@ -521,7 +521,7 @@ final class BootstrapChecks {
|
|||
"OnError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
|
||||
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
|
||||
onError(),
|
||||
BootstrapSettings.SECCOMP_SETTING.getKey());
|
||||
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.getKey());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -546,7 +546,7 @@ final class BootstrapChecks {
|
|||
"OnOutOfMemoryError [%s] requires forking but is prevented by system call filters ([%s=true]);" +
|
||||
" upgrade to at least Java 8u92 and use ExitOnOutOfMemoryError",
|
||||
onOutOfMemoryError(),
|
||||
BootstrapSettings.SECCOMP_SETTING.getKey());
|
||||
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING.getKey());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -24,16 +24,16 @@ import org.elasticsearch.common.SuppressForbidden;
|
|||
import java.util.Dictionary;
|
||||
import java.util.Enumeration;
|
||||
|
||||
/**
|
||||
* Exposes system startup information
|
||||
/**
|
||||
* Exposes system startup information
|
||||
*/
|
||||
@SuppressForbidden(reason = "exposes read-only view of system properties")
|
||||
public final class BootstrapInfo {
|
||||
|
||||
/** no instantiation */
|
||||
private BootstrapInfo() {}
|
||||
|
||||
/**
|
||||
|
||||
/**
|
||||
* Returns true if we successfully loaded native libraries.
|
||||
* <p>
|
||||
* If this returns false, then native operations such as locking
|
||||
|
@ -42,19 +42,19 @@ public final class BootstrapInfo {
|
|||
public static boolean isNativesAvailable() {
|
||||
return Natives.JNA_AVAILABLE;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
/**
|
||||
* Returns true if we were able to lock the process's address space.
|
||||
*/
|
||||
public static boolean isMemoryLocked() {
|
||||
return Natives.isMemoryLocked();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns true if secure computing mode is enabled (supported systems only)
|
||||
* Returns true if system call filter is installed (supported systems only)
|
||||
*/
|
||||
public static boolean isSeccompInstalled() {
|
||||
return Natives.isSeccompInstalled();
|
||||
public static boolean isSystemCallFilterInstalled() {
|
||||
return Natives.isSystemCallFilterInstalled();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -33,8 +33,8 @@ public final class BootstrapSettings {
|
|||
|
||||
public static final Setting<Boolean> MEMORY_LOCK_SETTING =
|
||||
Setting.boolSetting("bootstrap.memory_lock", false, Property.NodeScope);
|
||||
public static final Setting<Boolean> SECCOMP_SETTING =
|
||||
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> SYSTEM_CALL_FILTER_SETTING =
|
||||
Setting.boolSetting("bootstrap.system_call_filter", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> CTRLHANDLER_SETTING =
|
||||
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
|
||||
|
||||
|
|
|
@ -43,11 +43,11 @@ class JNANatives {
|
|||
|
||||
// Set to true, in case native mlockall call was successful
|
||||
static boolean LOCAL_MLOCKALL = false;
|
||||
// Set to true, in case native seccomp call was successful
|
||||
static boolean LOCAL_SECCOMP = false;
|
||||
// Set to true, in case native system call filter install was successful
|
||||
static boolean LOCAL_SYSTEM_CALL_FILTER = false;
|
||||
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
|
||||
// otherwise they are only inherited for new threads (ES app threads)
|
||||
static boolean LOCAL_SECCOMP_ALL = false;
|
||||
static boolean LOCAL_SYSTEM_CALL_FILTER_ALL = false;
|
||||
// set to the maximum number of threads that can be created for
|
||||
// the user ID that owns the running Elasticsearch process
|
||||
static long MAX_NUMBER_OF_THREADS = -1;
|
||||
|
@ -210,12 +210,12 @@ class JNANatives {
|
|||
}
|
||||
}
|
||||
|
||||
static void trySeccomp(Path tmpFile) {
|
||||
static void tryInstallSystemCallFilter(Path tmpFile) {
|
||||
try {
|
||||
int ret = Seccomp.init(tmpFile);
|
||||
LOCAL_SECCOMP = true;
|
||||
int ret = SystemCallFilter.init(tmpFile);
|
||||
LOCAL_SYSTEM_CALL_FILTER = true;
|
||||
if (ret == 1) {
|
||||
LOCAL_SECCOMP_ALL = true;
|
||||
LOCAL_SYSTEM_CALL_FILTER_ALL = true;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// this is likely to happen unless the kernel is newish, its a best effort at the moment
|
||||
|
|
|
@ -91,12 +91,12 @@ final class Natives {
|
|||
return JNANatives.LOCAL_MLOCKALL;
|
||||
}
|
||||
|
||||
static void trySeccomp(Path tmpFile) {
|
||||
static void tryInstallSystemCallFilter(Path tmpFile) {
|
||||
if (!JNA_AVAILABLE) {
|
||||
logger.warn("cannot install syscall filters because JNA is not available");
|
||||
logger.warn("cannot install system call filter because JNA is not available");
|
||||
return;
|
||||
}
|
||||
JNANatives.trySeccomp(tmpFile);
|
||||
JNANatives.tryInstallSystemCallFilter(tmpFile);
|
||||
}
|
||||
|
||||
static void trySetMaxNumberOfThreads() {
|
||||
|
@ -115,10 +115,10 @@ final class Natives {
|
|||
JNANatives.trySetMaxSizeVirtualMemory();
|
||||
}
|
||||
|
||||
static boolean isSeccompInstalled() {
|
||||
static boolean isSystemCallFilterInstalled() {
|
||||
if (!JNA_AVAILABLE) {
|
||||
return false;
|
||||
}
|
||||
return JNANatives.LOCAL_SECCOMP;
|
||||
return JNANatives.LOCAL_SYSTEM_CALL_FILTER;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
|
||||
/**
|
||||
* Spawns native plugin controller processes if present. Will only work prior to seccomp being set up.
|
||||
* Spawns native plugin controller processes if present. Will only work prior to a system call filter being installed.
|
||||
*/
|
||||
final class Spawner implements Closeable {
|
||||
|
||||
|
|
|
@ -43,8 +43,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Installs a limited form of secure computing mode,
|
||||
* to filters system calls to block process execution.
|
||||
* Installs a system call filter to block process execution.
|
||||
* <p>
|
||||
* This is supported on Linux, Solaris, FreeBSD, OpenBSD, Mac OS X, and Windows.
|
||||
* <p>
|
||||
|
@ -91,8 +90,8 @@ import java.util.Map;
|
|||
* https://docs.oracle.com/cd/E23824_01/html/821-1456/prbac-2.html</a>
|
||||
*/
|
||||
// not an example of how to write code!!!
|
||||
final class Seccomp {
|
||||
private static final Logger logger = Loggers.getLogger(Seccomp.class);
|
||||
final class SystemCallFilter {
|
||||
private static final Logger logger = Loggers.getLogger(SystemCallFilter.class);
|
||||
|
||||
// Linux implementation, based on seccomp(2) or prctl(2) with bpf filtering
|
||||
|
||||
|
@ -269,7 +268,8 @@ final class Seccomp {
|
|||
|
||||
// we couldn't link methods, could be some really ancient kernel (e.g. < 2.1.57) or some bug
|
||||
if (linux_libc == null) {
|
||||
throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
throw new UnsupportedOperationException("seccomp unavailable: could not link methods. requires kernel 3.5+ " +
|
||||
"with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
}
|
||||
|
||||
// pure paranoia:
|
||||
|
@ -319,7 +319,8 @@ final class Seccomp {
|
|||
switch (errno) {
|
||||
case ENOSYS: break; // ok
|
||||
case EINVAL: break; // ok
|
||||
default: throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): " + JNACLibrary.strerror(errno));
|
||||
default: throw new UnsupportedOperationException("seccomp(SECCOMP_SET_MODE_FILTER, BOGUS_FLAG): "
|
||||
+ JNACLibrary.strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -346,7 +347,8 @@ final class Seccomp {
|
|||
int errno = Native.getLastError();
|
||||
if (errno == EINVAL) {
|
||||
// friendly error, this will be the typical case for an old kernel
|
||||
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
throw new UnsupportedOperationException("seccomp unavailable: requires kernel 3.5+ with" +
|
||||
" CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER compiled in");
|
||||
} else {
|
||||
throw new UnsupportedOperationException("prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(errno));
|
||||
}
|
||||
|
@ -358,7 +360,8 @@ final class Seccomp {
|
|||
default:
|
||||
int errno = Native.getLastError();
|
||||
if (errno == EINVAL) {
|
||||
throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
|
||||
throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP not compiled into kernel," +
|
||||
" CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
|
||||
} else {
|
||||
throw new UnsupportedOperationException("prctl(PR_GET_SECCOMP): " + JNACLibrary.strerror(errno));
|
||||
}
|
||||
|
@ -368,7 +371,8 @@ final class Seccomp {
|
|||
int errno = Native.getLastError();
|
||||
switch (errno) {
|
||||
case EFAULT: break; // available
|
||||
case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
|
||||
case EINVAL: throw new UnsupportedOperationException("seccomp unavailable: CONFIG_SECCOMP_FILTER not" +
|
||||
" compiled into kernel, CONFIG_SECCOMP and CONFIG_SECCOMP_FILTER are needed");
|
||||
default: throw new UnsupportedOperationException("prctl(PR_SET_SECCOMP): " + JNACLibrary.strerror(errno));
|
||||
}
|
||||
}
|
||||
|
@ -380,10 +384,12 @@ final class Seccomp {
|
|||
|
||||
// check it worked
|
||||
if (linux_prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) {
|
||||
throw new UnsupportedOperationException("seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " + JNACLibrary.strerror(Native.getLastError()));
|
||||
throw new UnsupportedOperationException("seccomp filter did not really succeed: prctl(PR_GET_NO_NEW_PRIVS): " +
|
||||
JNACLibrary.strerror(Native.getLastError()));
|
||||
}
|
||||
|
||||
// BPF installed to check arch, limit, then syscall. See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
|
||||
// BPF installed to check arch, limit, then syscall.
|
||||
// See https://www.kernel.org/doc/Documentation/prctl/seccomp_filter.txt for details.
|
||||
SockFilter insns[] = {
|
||||
/* 1 */ BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_ARCH_OFFSET), //
|
||||
/* 2 */ BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, arch.audit, 0, 7), // if (arch != audit) goto fail;
|
||||
|
@ -408,7 +414,8 @@ final class Seccomp {
|
|||
method = 0;
|
||||
int errno1 = Native.getLastError();
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...", JNACLibrary.strerror(errno1));
|
||||
logger.debug("seccomp(SECCOMP_SET_MODE_FILTER): {}, falling back to prctl(PR_SET_SECCOMP)...",
|
||||
JNACLibrary.strerror(errno1));
|
||||
}
|
||||
if (linux_prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, pointer, 0, 0) != 0) {
|
||||
int errno2 = Native.getLastError();
|
||||
|
@ -419,7 +426,8 @@ final class Seccomp {
|
|||
|
||||
// now check that the filter was really installed, we should be in filter mode.
|
||||
if (linux_prctl(PR_GET_SECCOMP, 0, 0, 0, 0) != 2) {
|
||||
throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): " + JNACLibrary.strerror(Native.getLastError()));
|
||||
throw new UnsupportedOperationException("seccomp filter installation did not really succeed. seccomp(PR_GET_SECCOMP): "
|
||||
+ JNACLibrary.strerror(Native.getLastError()));
|
||||
}
|
||||
|
||||
logger.debug("Linux seccomp filter installation successful, threads: [{}]", method == 1 ? "all" : "app" );
|
|
@ -17,10 +17,18 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.service;
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
public enum ClusterStateStatus {
|
||||
UNKNOWN,
|
||||
BEING_APPLIED,
|
||||
APPLIED;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
||||
/**
|
||||
* A component that is in charge of applying an incoming cluster state to the node internal data structures.
|
||||
* The single apply method is called before the cluster state becomes visible via {@link ClusterService#state()}.
|
||||
*/
|
||||
public interface ClusterStateApplier {
|
||||
|
||||
/**
|
||||
* Called when a new cluster state ({@link ClusterChangedEvent#state()} needs to be applied
|
||||
*/
|
||||
void applyClusterState(ClusterChangedEvent event);
|
||||
}
|
|
@ -22,13 +22,12 @@ package org.elasticsearch.cluster;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A utility class which simplifies interacting with the cluster state in cases where
|
||||
|
@ -39,20 +38,14 @@ public class ClusterStateObserver {
|
|||
|
||||
protected final Logger logger;
|
||||
|
||||
public final ChangePredicate MATCH_ALL_CHANGES_PREDICATE = new EventPredicate() {
|
||||
|
||||
@Override
|
||||
public boolean apply(ClusterChangedEvent changedEvent) {
|
||||
return changedEvent.previousState().version() != changedEvent.state().version();
|
||||
}
|
||||
};
|
||||
private final Predicate<ClusterState> MATCH_ALL_CHANGES_PREDICATE = state -> true;
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadContext contextHolder;
|
||||
volatile TimeValue timeOutValue;
|
||||
|
||||
|
||||
final AtomicReference<ClusterServiceState> lastObservedState;
|
||||
final AtomicReference<ClusterState> lastObservedState;
|
||||
final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener();
|
||||
// observingContext is not null when waiting on cluster state changes
|
||||
final AtomicReference<ObservingContext> observingContext = new AtomicReference<>(null);
|
||||
|
@ -70,8 +63,17 @@ public class ClusterStateObserver {
|
|||
* to wait indefinitely
|
||||
*/
|
||||
public ClusterStateObserver(ClusterService clusterService, @Nullable TimeValue timeout, Logger logger, ThreadContext contextHolder) {
|
||||
this(clusterService.state(), clusterService, timeout, logger, contextHolder);
|
||||
}
|
||||
/**
|
||||
* @param timeout a global timeout for this observer. After it has expired the observer
|
||||
* will fail any existing or new #waitForNextChange calls. Set to null
|
||||
* to wait indefinitely
|
||||
*/
|
||||
public ClusterStateObserver(ClusterState initialState, ClusterService clusterService, @Nullable TimeValue timeout, Logger logger,
|
||||
ThreadContext contextHolder) {
|
||||
this.clusterService = clusterService;
|
||||
this.lastObservedState = new AtomicReference<>(clusterService.clusterServiceState());
|
||||
this.lastObservedState = new AtomicReference<>(initialState);
|
||||
this.timeOutValue = timeout;
|
||||
if (timeOutValue != null) {
|
||||
this.startTimeNS = System.nanoTime();
|
||||
|
@ -81,8 +83,8 @@ public class ClusterStateObserver {
|
|||
}
|
||||
|
||||
/** last cluster state and status observed by this observer. Note that this may not be the current one */
|
||||
public ClusterServiceState observedState() {
|
||||
ClusterServiceState state = lastObservedState.get();
|
||||
public ClusterState observedState() {
|
||||
ClusterState state = lastObservedState.get();
|
||||
assert state != null;
|
||||
return state;
|
||||
}
|
||||
|
@ -100,18 +102,18 @@ public class ClusterStateObserver {
|
|||
waitForNextChange(listener, MATCH_ALL_CHANGES_PREDICATE, timeOutValue);
|
||||
}
|
||||
|
||||
public void waitForNextChange(Listener listener, ChangePredicate changePredicate) {
|
||||
waitForNextChange(listener, changePredicate, null);
|
||||
public void waitForNextChange(Listener listener, Predicate<ClusterState> statePredicate) {
|
||||
waitForNextChange(listener, statePredicate, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for the next cluster state which satisfies changePredicate
|
||||
* Wait for the next cluster state which satisfies statePredicate
|
||||
*
|
||||
* @param listener callback listener
|
||||
* @param changePredicate predicate to check whether cluster state changes are relevant and the callback should be called
|
||||
* @param statePredicate predicate to check whether cluster state changes are relevant and the callback should be called
|
||||
* @param timeOutValue a timeout for waiting. If null the global observer timeout will be used.
|
||||
*/
|
||||
public void waitForNextChange(Listener listener, ChangePredicate changePredicate, @Nullable TimeValue timeOutValue) {
|
||||
public void waitForNextChange(Listener listener, Predicate<ClusterState> statePredicate, @Nullable TimeValue timeOutValue) {
|
||||
|
||||
if (observingContext.get() != null) {
|
||||
throw new ElasticsearchException("already waiting for a cluster state change");
|
||||
|
@ -128,7 +130,7 @@ public class ClusterStateObserver {
|
|||
logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
|
||||
// update to latest, in case people want to retry
|
||||
timedOut = true;
|
||||
lastObservedState.set(clusterService.clusterServiceState());
|
||||
lastObservedState.set(clusterService.state());
|
||||
listener.onTimeout(timeOutValue);
|
||||
return;
|
||||
}
|
||||
|
@ -143,33 +145,24 @@ public class ClusterStateObserver {
|
|||
}
|
||||
|
||||
// sample a new state
|
||||
ClusterServiceState newState = clusterService.clusterServiceState();
|
||||
ClusterServiceState lastState = lastObservedState.get();
|
||||
if (changePredicate.apply(lastState, newState)) {
|
||||
ClusterState newState = clusterService.state();
|
||||
ClusterState lastState = lastObservedState.get();
|
||||
if (newState != lastState && statePredicate.test(newState)) {
|
||||
// good enough, let's go.
|
||||
logger.trace("observer: sampled state accepted by predicate ({})", newState);
|
||||
lastObservedState.set(newState);
|
||||
listener.onNewClusterState(newState.getClusterState());
|
||||
listener.onNewClusterState(newState);
|
||||
} else {
|
||||
logger.trace("observer: sampled state rejected by predicate ({}). adding listener to ClusterService", newState);
|
||||
ObservingContext context = new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), changePredicate);
|
||||
ObservingContext context =
|
||||
new ObservingContext(new ContextPreservingListener(listener, contextHolder.newStoredContext()), statePredicate);
|
||||
if (!observingContext.compareAndSet(null, context)) {
|
||||
throw new ElasticsearchException("already waiting for a cluster state change");
|
||||
}
|
||||
clusterService.add(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener);
|
||||
clusterService.addTimeoutListener(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* reset this observer to the give cluster state. Any pending waits will be canceled.
|
||||
*/
|
||||
public void reset(ClusterServiceState state) {
|
||||
if (observingContext.getAndSet(null) != null) {
|
||||
clusterService.remove(clusterStateListener);
|
||||
}
|
||||
lastObservedState.set(state);
|
||||
}
|
||||
|
||||
class ObserverClusterStateListener implements TimeoutClusterStateListener {
|
||||
|
||||
@Override
|
||||
|
@ -179,18 +172,18 @@ public class ClusterStateObserver {
|
|||
// No need to remove listener as it is the responsibility of the thread that set observingContext to null
|
||||
return;
|
||||
}
|
||||
if (context.changePredicate.apply(event)) {
|
||||
final ClusterState state = event.state();
|
||||
if (context.statePredicate.test(state)) {
|
||||
if (observingContext.compareAndSet(context, null)) {
|
||||
clusterService.remove(this);
|
||||
ClusterServiceState state = new ClusterServiceState(event.state(), ClusterStateStatus.APPLIED);
|
||||
clusterService.removeTimeoutListener(this);
|
||||
logger.trace("observer: accepting cluster state change ({})", state);
|
||||
lastObservedState.set(state);
|
||||
context.listener.onNewClusterState(state.getClusterState());
|
||||
context.listener.onNewClusterState(state);
|
||||
} else {
|
||||
logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", event.state().version());
|
||||
logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", state.version());
|
||||
}
|
||||
} else {
|
||||
logger.trace("observer: predicate rejected change (new cluster state version [{}])", event.state().version());
|
||||
logger.trace("observer: predicate rejected change (new cluster state version [{}])", state.version());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,15 +194,15 @@ public class ClusterStateObserver {
|
|||
// No need to remove listener as it is the responsibility of the thread that set observingContext to null
|
||||
return;
|
||||
}
|
||||
ClusterServiceState newState = clusterService.clusterServiceState();
|
||||
ClusterServiceState lastState = lastObservedState.get();
|
||||
if (context.changePredicate.apply(lastState, newState)) {
|
||||
ClusterState newState = clusterService.state();
|
||||
ClusterState lastState = lastObservedState.get();
|
||||
if (newState != lastState && context.statePredicate.test(newState)) {
|
||||
// double check we're still listening
|
||||
if (observingContext.compareAndSet(context, null)) {
|
||||
logger.trace("observer: post adding listener: accepting current cluster state ({})", newState);
|
||||
clusterService.remove(this);
|
||||
clusterService.removeTimeoutListener(this);
|
||||
lastObservedState.set(newState);
|
||||
context.listener.onNewClusterState(newState.getClusterState());
|
||||
context.listener.onNewClusterState(newState);
|
||||
} else {
|
||||
logger.trace("observer: postAdded - predicate approved state but observing context has changed - ignoring ({})", newState);
|
||||
}
|
||||
|
@ -224,7 +217,7 @@ public class ClusterStateObserver {
|
|||
|
||||
if (context != null) {
|
||||
logger.trace("observer: cluster service closed. notifying listener.");
|
||||
clusterService.remove(this);
|
||||
clusterService.removeTimeoutListener(this);
|
||||
context.listener.onClusterServiceClose();
|
||||
}
|
||||
}
|
||||
|
@ -233,11 +226,11 @@ public class ClusterStateObserver {
|
|||
public void onTimeout(TimeValue timeout) {
|
||||
ObservingContext context = observingContext.getAndSet(null);
|
||||
if (context != null) {
|
||||
clusterService.remove(this);
|
||||
clusterService.removeTimeoutListener(this);
|
||||
long timeSinceStartMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeNS);
|
||||
logger.trace("observer: timeout notification from cluster service. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS));
|
||||
// update to latest, in case people want to retry
|
||||
lastObservedState.set(clusterService.clusterServiceState());
|
||||
lastObservedState.set(clusterService.state());
|
||||
timedOut = true;
|
||||
context.listener.onTimeout(timeOutValue);
|
||||
}
|
||||
|
@ -255,58 +248,13 @@ public class ClusterStateObserver {
|
|||
void onTimeout(TimeValue timeout);
|
||||
}
|
||||
|
||||
public interface ChangePredicate {
|
||||
|
||||
/**
|
||||
* a rough check used when starting to monitor for a new change. Called infrequently can be less accurate.
|
||||
*
|
||||
* @return true if newState should be accepted
|
||||
*/
|
||||
boolean apply(ClusterServiceState previousState,
|
||||
ClusterServiceState newState);
|
||||
|
||||
/**
|
||||
* called to see whether a cluster change should be accepted
|
||||
*
|
||||
* @return true if changedEvent.state() should be accepted
|
||||
*/
|
||||
boolean apply(ClusterChangedEvent changedEvent);
|
||||
}
|
||||
|
||||
|
||||
public abstract static class ValidationPredicate implements ChangePredicate {
|
||||
|
||||
@Override
|
||||
public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) {
|
||||
return (previousState.getClusterState() != newState.getClusterState() ||
|
||||
previousState.getClusterStateStatus() != newState.getClusterStateStatus()) &&
|
||||
validate(newState);
|
||||
}
|
||||
|
||||
protected abstract boolean validate(ClusterServiceState newState);
|
||||
|
||||
@Override
|
||||
public boolean apply(ClusterChangedEvent changedEvent) {
|
||||
return changedEvent.previousState().version() != changedEvent.state().version() &&
|
||||
validate(new ClusterServiceState(changedEvent.state(), ClusterStateStatus.APPLIED));
|
||||
}
|
||||
}
|
||||
|
||||
public abstract static class EventPredicate implements ChangePredicate {
|
||||
@Override
|
||||
public boolean apply(ClusterServiceState previousState, ClusterServiceState newState) {
|
||||
return previousState.getClusterState() != newState.getClusterState() || previousState.getClusterStateStatus() != newState.getClusterStateStatus();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class ObservingContext {
|
||||
public final Listener listener;
|
||||
public final ChangePredicate changePredicate;
|
||||
public final Predicate<ClusterState> statePredicate;
|
||||
|
||||
public ObservingContext(Listener listener, ChangePredicate changePredicate) {
|
||||
public ObservingContext(Listener listener, Predicate<ClusterState> statePredicate) {
|
||||
this.listener = listener;
|
||||
this.changePredicate = changePredicate;
|
||||
this.statePredicate = statePredicate;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,11 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
|
@ -53,6 +48,11 @@ import org.elasticsearch.monitor.fs.FsInfo;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.ReceiveTimeoutTransportException;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* InternalClusterInfoService provides the ClusterInfoService interface,
|
||||
* routinely updated on a timer. The timer can be dynamically changed by
|
||||
|
@ -64,7 +64,8 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException;
|
|||
* Every time the timer runs, gathers information about the disk usage and
|
||||
* shard sizes across the cluster.
|
||||
*/
|
||||
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
|
||||
public class InternalClusterInfoService extends AbstractComponent
|
||||
implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
|
||||
|
||||
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING =
|
||||
Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10),
|
||||
|
@ -105,9 +106,9 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled);
|
||||
|
||||
// Add InternalClusterInfoService to listen for Master changes
|
||||
this.clusterService.add((LocalNodeMasterListener)this);
|
||||
this.clusterService.addLocalNodeMasterListener(this);
|
||||
// Add to listen for state changes (when nodes are added)
|
||||
this.clusterService.add((ClusterStateListener)this);
|
||||
this.clusterService.addListener(this);
|
||||
}
|
||||
|
||||
private void setEnabled(boolean enabled) {
|
||||
|
@ -167,7 +168,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
|||
}
|
||||
}
|
||||
|
||||
if (this.isMaster && dataNodeAdded && clusterService.state().getNodes().getDataNodes().size() > 1) {
|
||||
if (this.isMaster && dataNodeAdded && event.state().getNodes().getDataNodes().size() > 1) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("data node was added, retrieving new cluster info");
|
||||
}
|
||||
|
|
|
@ -19,23 +19,32 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.cluster.service.ClusterServiceState;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public enum MasterNodeChangePredicate implements ClusterStateObserver.ChangePredicate {
|
||||
INSTANCE;
|
||||
public final class MasterNodeChangePredicate {
|
||||
|
||||
private MasterNodeChangePredicate() {
|
||||
|
||||
@Override
|
||||
public boolean apply(
|
||||
ClusterServiceState previousState,
|
||||
ClusterServiceState newState) {
|
||||
// checking if the masterNodeId changed is insufficient as the
|
||||
// same master node might get re-elected after a disruption
|
||||
return newState.getClusterState().nodes().getMasterNodeId() != null &&
|
||||
newState.getClusterState() != previousState.getClusterState();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean apply(ClusterChangedEvent changedEvent) {
|
||||
return changedEvent.nodesDelta().masterNodeChanged();
|
||||
/**
|
||||
* builds a predicate that will accept a cluster state only if it was generated after the current has
|
||||
* (re-)joined the master
|
||||
*/
|
||||
public static Predicate<ClusterState> build(ClusterState currentState) {
|
||||
final long currentVersion = currentState.version();
|
||||
final String currentMaster = currentState.nodes().getMasterNodeId();
|
||||
return newState -> {
|
||||
final String newMaster = newState.nodes().getMasterNodeId();
|
||||
final boolean accept;
|
||||
if (newMaster == null) {
|
||||
accept = false;
|
||||
} else if (newMaster.equals(currentMaster) == false){
|
||||
accept = true;
|
||||
} else {
|
||||
accept = newState.version() > currentVersion;
|
||||
}
|
||||
return accept;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
private void sendShardAction(final String actionName, final ClusterStateObserver observer, final ShardEntry shardEntry, final Listener listener) {
|
||||
DiscoveryNode masterNode = observer.observedState().getClusterState().nodes().getMasterNode();
|
||||
DiscoveryNode masterNode = observer.observedState().nodes().getMasterNode();
|
||||
if (masterNode == null) {
|
||||
logger.warn("{} no master known for action [{}] for shard entry [{}]", shardEntry.shardId, actionName, shardEntry);
|
||||
waitForNewMasterAndRetry(actionName, observer, shardEntry, listener);
|
||||
|
@ -142,18 +142,27 @@ public class ShardStateAction extends AbstractComponent {
|
|||
*/
|
||||
public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
assert primaryTerm > 0L : "primary term should be strictly positive";
|
||||
shardFailed(shardId, allocationId, primaryTerm, message, failure, listener);
|
||||
shardFailed(shardId, allocationId, primaryTerm, message, failure, listener, clusterService.state());
|
||||
}
|
||||
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
|
||||
*/
|
||||
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener);
|
||||
localShardFailed(shardRouting, message, failure, listener, clusterService.state());
|
||||
}
|
||||
|
||||
private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message, @Nullable final Exception failure, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
/**
|
||||
* Send a shard failed request to the master node to update the cluster state when a shard on the local node failed.
|
||||
*/
|
||||
public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener,
|
||||
final ClusterState currentState) {
|
||||
shardFailed(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, listener, currentState);
|
||||
}
|
||||
|
||||
private void shardFailed(final ShardId shardId, String allocationId, long primaryTerm, final String message,
|
||||
@Nullable final Exception failure, Listener listener, ClusterState currentState) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardEntry shardEntry = new ShardEntry(shardId, allocationId, primaryTerm, message, failure);
|
||||
sendShardAction(SHARD_FAILED_ACTION_NAME, observer, shardEntry, listener);
|
||||
}
|
||||
|
@ -180,7 +189,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
// we wait indefinitely for a new master
|
||||
assert false;
|
||||
}
|
||||
}, MasterNodeChangePredicate.INSTANCE);
|
||||
}, MasterNodeChangePredicate.build(observer.observedState()));
|
||||
}
|
||||
|
||||
private static class ShardFailedTransportHandler implements TransportRequestHandler<ShardEntry> {
|
||||
|
@ -342,7 +351,10 @@ public class ShardStateAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext());
|
||||
shardStarted(shardRouting, message, listener, clusterService.state());
|
||||
}
|
||||
public void shardStarted(final ShardRouting shardRouting, final String message, Listener listener, ClusterState currentState) {
|
||||
ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext());
|
||||
ShardEntry shardEntry = new ShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, null);
|
||||
sendShardAction(SHARD_STARTED_ACTION_NAME, observer, shardEntry, listener);
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.indices.IndexCreationException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
@ -223,7 +224,8 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
@Override
|
||||
public ClusterState execute(ClusterState currentState) throws Exception {
|
||||
Index createdIndex = null;
|
||||
String removalReason = null;
|
||||
String removalExtraInfo = null;
|
||||
IndexRemovalReason removalReason = IndexRemovalReason.FAILURE;
|
||||
try {
|
||||
validate(request, currentState);
|
||||
|
||||
|
@ -356,7 +358,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
try {
|
||||
mapperService.merge(mappings, request.updateAllTypes());
|
||||
} catch (MapperParsingException mpe) {
|
||||
removalReason = "failed on parsing default mapping/mappings on index creation";
|
||||
removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
|
||||
throw mpe;
|
||||
}
|
||||
|
||||
|
@ -407,7 +409,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
try {
|
||||
indexMetaData = indexMetaDataBuilder.build();
|
||||
} catch (Exception e) {
|
||||
removalReason = "failed to build index metadata";
|
||||
removalExtraInfo = "failed to build index metadata";
|
||||
throw e;
|
||||
}
|
||||
|
||||
|
@ -440,12 +442,13 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(),
|
||||
"index [" + request.index() + "] created");
|
||||
}
|
||||
removalReason = "cleaning up after validating index on master";
|
||||
removalExtraInfo = "cleaning up after validating index on master";
|
||||
removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED;
|
||||
return updatedState;
|
||||
} finally {
|
||||
if (createdIndex != null) {
|
||||
// Index was already partially created - need to clean up
|
||||
indicesService.removeIndex(createdIndex, removalReason != null ? removalReason : "failed to create index");
|
||||
indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
|
||||
|
@ -50,6 +49,7 @@ import java.util.Set;
|
|||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting add and remove aliases requests
|
||||
|
@ -172,7 +172,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
return currentState;
|
||||
} finally {
|
||||
for (Index index : indicesToClose) {
|
||||
indicesService.removeIndex(index, "created for alias processing");
|
||||
indicesService.removeIndex(index, NO_LONGER_ASSIGNED, "created for alias processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
|
@ -53,6 +52,8 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting index templates updates
|
||||
*/
|
||||
|
@ -225,7 +226,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
|
||||
} finally {
|
||||
if (createdIndex != null) {
|
||||
indicesService.removeIndex(createdIndex, " created for parsing template mapping");
|
||||
indicesService.removeIndex(createdIndex, NO_LONGER_ASSIGNED, " created for parsing template mapping");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,6 +52,9 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
|
||||
|
||||
/**
|
||||
* Service responsible for submitting mapping changes
|
||||
*/
|
||||
|
@ -158,7 +161,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
} finally {
|
||||
if (removeIndex) {
|
||||
indicesService.removeIndex(index, "created for mapping processing");
|
||||
indicesService.removeIndex(index, NO_LONGER_ASSIGNED, "created for mapping processing");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
IndexScopedSettings indexScopedSettings, IndicesService indicesService) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.clusterService.add(this);
|
||||
this.clusterService.addListener(this);
|
||||
this.allocationService = allocationService;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
this.indicesService = indicesService;
|
||||
|
|
|
@ -133,7 +133,7 @@ public class DelayedAllocationService extends AbstractLifecycleComponent impleme
|
|||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
this.allocationService = allocationService;
|
||||
clusterService.addFirst(this);
|
||||
clusterService.addListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -146,7 +146,7 @@ public class DelayedAllocationService extends AbstractLifecycleComponent impleme
|
|||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
clusterService.remove(this);
|
||||
clusterService.removeListener(this);
|
||||
removeTaskAndCancel();
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.cluster.ClusterChangedEvent;
|
|||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterState.Builder;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskConfig;
|
||||
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
|
||||
|
@ -77,6 +78,7 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.Future;
|
||||
|
@ -87,6 +89,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
import java.util.function.BiConsumer;
|
||||
import java.util.function.UnaryOperator;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
||||
|
@ -113,20 +116,22 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
/**
|
||||
* Those 3 state listeners are changing infrequently - CopyOnWriteArrayList is just fine
|
||||
*/
|
||||
private final Collection<ClusterStateListener> priorityClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateListener> lastClusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateApplier> highPriorityStateAppliers = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateApplier> normalPriorityStateAppliers = new CopyOnWriteArrayList<>();
|
||||
private final Collection<ClusterStateApplier> lowPriorityStateAppliers = new CopyOnWriteArrayList<>();
|
||||
final Map<ClusterStateTaskExecutor, LinkedHashSet<UpdateTask>> updateTasksPerExecutor = new HashMap<>();
|
||||
// TODO this is rather frequently changing I guess a Synced Set would be better here and a dedicated remove API
|
||||
private final Collection<ClusterStateListener> postAppliedListeners = new CopyOnWriteArrayList<>();
|
||||
private final Iterable<ClusterStateListener> preAppliedListeners = Iterables.concat(priorityClusterStateListeners,
|
||||
clusterStateListeners, lastClusterStateListeners);
|
||||
private final Iterable<ClusterStateApplier> clusterStateAppliers = Iterables.concat(highPriorityStateAppliers,
|
||||
normalPriorityStateAppliers, lowPriorityStateAppliers);
|
||||
|
||||
private final Collection<ClusterStateListener> clusterStateListeners = new CopyOnWriteArrayList<>();
|
||||
private final Collection<TimeoutClusterStateListener> timeoutClusterStateListeners =
|
||||
Collections.newSetFromMap(new ConcurrentHashMap<TimeoutClusterStateListener, Boolean>());
|
||||
|
||||
private final LocalNodeMasterListeners localNodeMasterListeners;
|
||||
|
||||
private final Queue<NotifyTimeout> onGoingTimeouts = ConcurrentCollections.newQueue();
|
||||
|
||||
private final AtomicReference<ClusterServiceState> state;
|
||||
private final AtomicReference<ClusterState> state;
|
||||
|
||||
private final ClusterBlocks.Builder initialBlocks;
|
||||
|
||||
|
@ -140,7 +145,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
this.clusterSettings = clusterSettings;
|
||||
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
|
||||
// will be replaced on doStart.
|
||||
this.state = new AtomicReference<>(new ClusterServiceState(ClusterState.builder(clusterName).build(), ClusterStateStatus.UNKNOWN));
|
||||
this.state = new AtomicReference<>(ClusterState.builder(clusterName).build());
|
||||
|
||||
this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
|
||||
this::setSlowTaskLoggingThreshold);
|
||||
|
@ -161,43 +166,15 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
public synchronized void setLocalNode(DiscoveryNode localNode) {
|
||||
assert clusterServiceState().getClusterState().nodes().getLocalNodeId() == null : "local node is already set";
|
||||
updateState(css -> {
|
||||
ClusterState clusterState = css.getClusterState();
|
||||
assert state().nodes().getLocalNodeId() == null : "local node is already set";
|
||||
updateState(clusterState -> {
|
||||
DiscoveryNodes nodes = DiscoveryNodes.builder(clusterState.nodes()).add(localNode).localNodeId(localNode.getId()).build();
|
||||
return new ClusterServiceState(ClusterState.builder(clusterState).nodes(nodes).build(), css.getClusterStateStatus());
|
||||
return ClusterState.builder(clusterState).nodes(nodes).build();
|
||||
});
|
||||
}
|
||||
|
||||
private void updateState(UnaryOperator<ClusterServiceState> updateFunction) {
|
||||
this.state.getAndUpdate(oldClusterServiceState -> {
|
||||
ClusterServiceState newClusterServiceState = updateFunction.apply(oldClusterServiceState);
|
||||
assert validStateTransition(oldClusterServiceState, newClusterServiceState) :
|
||||
"Invalid cluster service state transition from " + oldClusterServiceState + " to " + newClusterServiceState;
|
||||
return newClusterServiceState;
|
||||
});
|
||||
}
|
||||
|
||||
private static boolean validStateTransition(ClusterServiceState oldClusterServiceState, ClusterServiceState newClusterServiceState) {
|
||||
if (oldClusterServiceState == null || newClusterServiceState == null) {
|
||||
return false;
|
||||
}
|
||||
ClusterStateStatus oldStatus = oldClusterServiceState.getClusterStateStatus();
|
||||
ClusterStateStatus newStatus = newClusterServiceState.getClusterStateStatus();
|
||||
// only go from UNKNOWN to UNKNOWN or BEING_APPLIED
|
||||
if (oldStatus == ClusterStateStatus.UNKNOWN && newStatus == ClusterStateStatus.APPLIED) {
|
||||
return false;
|
||||
}
|
||||
// only go from BEING_APPLIED to APPLIED
|
||||
if (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus != ClusterStateStatus.APPLIED) {
|
||||
return false;
|
||||
}
|
||||
// only go from APPLIED to BEING_APPLIED
|
||||
if (oldStatus == ClusterStateStatus.APPLIED && newStatus != ClusterStateStatus.BEING_APPLIED) {
|
||||
return false;
|
||||
}
|
||||
boolean identicalClusterState = oldClusterServiceState.getClusterState() == newClusterServiceState.getClusterState();
|
||||
return identicalClusterState == (oldStatus == ClusterStateStatus.BEING_APPLIED && newStatus == ClusterStateStatus.APPLIED);
|
||||
private void updateState(UnaryOperator<ClusterState> updateFunction) {
|
||||
this.state.getAndUpdate(updateFunction);
|
||||
}
|
||||
|
||||
public synchronized void setNodeConnectionsService(NodeConnectionsService nodeConnectionsService) {
|
||||
|
@ -235,12 +212,10 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
@Override
|
||||
protected synchronized void doStart() {
|
||||
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
|
||||
Objects.requireNonNull(clusterServiceState().getClusterState().nodes().getLocalNode(), "please set the local node before starting");
|
||||
Objects.requireNonNull(state().nodes().getLocalNode(), "please set the local node before starting");
|
||||
Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
|
||||
add(localNodeMasterListeners);
|
||||
updateState(css -> new ClusterServiceState(
|
||||
ClusterState.builder(css.getClusterState()).blocks(initialBlocks).build(),
|
||||
css.getClusterStateStatus()));
|
||||
addListener(localNodeMasterListeners);
|
||||
updateState(state -> ClusterState.builder(state).blocks(initialBlocks).build());
|
||||
this.threadPoolExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME),
|
||||
threadPool.getThreadContext());
|
||||
}
|
||||
|
@ -258,12 +233,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
ThreadPool.terminate(threadPoolExecutor, 10, TimeUnit.SECONDS);
|
||||
// close timeout listeners that did not have an ongoing timeout
|
||||
postAppliedListeners
|
||||
.stream()
|
||||
.filter(listener -> listener instanceof TimeoutClusterStateListener)
|
||||
.map(listener -> (TimeoutClusterStateListener) listener)
|
||||
.forEach(TimeoutClusterStateListener::onClose);
|
||||
remove(localNodeMasterListeners);
|
||||
timeoutClusterStateListeners.forEach(TimeoutClusterStateListener::onClose);
|
||||
removeListener(localNodeMasterListeners);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -289,45 +260,59 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
* The current cluster state.
|
||||
*/
|
||||
public ClusterState state() {
|
||||
return clusterServiceState().getClusterState();
|
||||
}
|
||||
|
||||
/**
|
||||
* The current cluster service state comprising cluster state and cluster state status.
|
||||
*/
|
||||
public ClusterServiceState clusterServiceState() {
|
||||
assert assertNotCalledFromClusterStateApplier("the applied cluster state is not yet available");
|
||||
return this.state.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a priority listener for updated cluster states.
|
||||
* Adds a high priority applier of updated cluster states.
|
||||
*/
|
||||
public void addFirst(ClusterStateListener listener) {
|
||||
priorityClusterStateListeners.add(listener);
|
||||
public void addHighPriorityApplier(ClusterStateApplier applier) {
|
||||
highPriorityStateAppliers.add(applier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds last listener.
|
||||
* Adds an applier which will be called after all high priority and normal appliers have been called.
|
||||
*/
|
||||
public void addLast(ClusterStateListener listener) {
|
||||
lastClusterStateListeners.add(listener);
|
||||
public void addLowPriorityApplier(ClusterStateApplier applier) {
|
||||
lowPriorityStateAppliers.add(applier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a listener for updated cluster states.
|
||||
* Adds a applier of updated cluster states.
|
||||
*/
|
||||
public void add(ClusterStateListener listener) {
|
||||
public void addStateApplier(ClusterStateApplier applier) {
|
||||
normalPriorityStateAppliers.add(applier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes an applier of updated cluster states.
|
||||
*/
|
||||
public void removeApplier(ClusterStateApplier applier) {
|
||||
normalPriorityStateAppliers.remove(applier);
|
||||
highPriorityStateAppliers.remove(applier);
|
||||
lowPriorityStateAppliers.remove(applier);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a listener for updated cluster states
|
||||
*/
|
||||
public void addListener(ClusterStateListener listener) {
|
||||
clusterStateListeners.add(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a listener for updated cluster states.
|
||||
*/
|
||||
public void remove(ClusterStateListener listener) {
|
||||
public void removeListener(ClusterStateListener listener) {
|
||||
clusterStateListeners.remove(listener);
|
||||
priorityClusterStateListeners.remove(listener);
|
||||
lastClusterStateListeners.remove(listener);
|
||||
postAppliedListeners.remove(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a timeout listener for updated cluster states.
|
||||
*/
|
||||
public void removeTimeoutListener(TimeoutClusterStateListener listener) {
|
||||
timeoutClusterStateListeners.remove(listener);
|
||||
for (Iterator<NotifyTimeout> it = onGoingTimeouts.iterator(); it.hasNext(); ) {
|
||||
NotifyTimeout timeout = it.next();
|
||||
if (timeout.listener.equals(listener)) {
|
||||
|
@ -340,25 +325,24 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
/**
|
||||
* Add a listener for on/off local node master events
|
||||
*/
|
||||
public void add(LocalNodeMasterListener listener) {
|
||||
public void addLocalNodeMasterListener(LocalNodeMasterListener listener) {
|
||||
localNodeMasterListeners.add(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the given listener for on/off local master events
|
||||
*/
|
||||
public void remove(LocalNodeMasterListener listener) {
|
||||
public void removeLocalNodeMasterListener(LocalNodeMasterListener listener) {
|
||||
localNodeMasterListeners.remove(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a cluster state listener that will timeout after the provided timeout,
|
||||
* and is executed after the clusterstate has been successfully applied ie. is
|
||||
* in state {@link ClusterStateStatus#APPLIED}
|
||||
* NOTE: a {@code null} timeout means that the listener will never be removed
|
||||
* automatically
|
||||
* Adds a cluster state listener that is expected to be removed during a short period of time.
|
||||
* If provided, the listener will be notified once a specific time has elapsed.
|
||||
*
|
||||
* NOTE: the listener is not remmoved on timeout. This is the responsibility of the caller.
|
||||
*/
|
||||
public void add(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) {
|
||||
public void addTimeoutListener(@Nullable final TimeValue timeout, final TimeoutClusterStateListener listener) {
|
||||
if (lifecycle.stoppedOrClosed()) {
|
||||
listener.onClose();
|
||||
return;
|
||||
|
@ -373,7 +357,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout);
|
||||
onGoingTimeouts.add(notifyTimeout);
|
||||
}
|
||||
postAppliedListeners.add(listener);
|
||||
timeoutClusterStateListeners.add(listener);
|
||||
listener.postAdded();
|
||||
}
|
||||
});
|
||||
|
@ -572,6 +556,19 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
return true;
|
||||
}
|
||||
|
||||
/** asserts that the current stack trace does <b>NOT</b> invlove a cluster state applier */
|
||||
private static boolean assertNotCalledFromClusterStateApplier(String reason) {
|
||||
if (Thread.currentThread().getName().contains(UPDATE_THREAD_NAME)) {
|
||||
for (StackTraceElement element: Thread.currentThread().getStackTrace()) {
|
||||
if (element.getClassName().equals(ClusterService.class.getName())
|
||||
&& element.getMethodName().equals("callClusterStateAppliers")) {
|
||||
throw new AssertionError("should not be called by a cluster state applier. reason [" + reason + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
public ClusterName getClusterName() {
|
||||
return clusterName;
|
||||
}
|
||||
|
@ -596,8 +593,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
logger.debug("processing [{}]: execute", taskInputs.summary);
|
||||
ClusterServiceState previousClusterServiceState = clusterServiceState();
|
||||
ClusterState previousClusterState = previousClusterServiceState.getClusterState();
|
||||
ClusterState previousClusterState = state();
|
||||
|
||||
if (!previousClusterState.nodes().isLocalNodeElectedMaster() && taskInputs.runOnlyOnMaster()) {
|
||||
logger.debug("failing [{}]: local node is no longer master", taskInputs.summary);
|
||||
|
@ -606,7 +602,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
long startTimeNS = currentTimeInNanos();
|
||||
TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterServiceState, startTimeNS);
|
||||
TaskOutputs taskOutputs = calculateTaskOutputs(taskInputs, previousClusterState, startTimeNS);
|
||||
taskOutputs.notifyFailedTasks();
|
||||
|
||||
if (taskOutputs.clusterStateUnchanged()) {
|
||||
|
@ -615,7 +611,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
logger.debug("processing [{}]: took [{}] no change in cluster_state", taskInputs.summary, executionTime);
|
||||
warnAboutSlowTaskIfNeeded(executionTime, taskInputs.summary);
|
||||
} else {
|
||||
ClusterState newClusterState = taskOutputs.newClusterServiceState.getClusterState();
|
||||
ClusterState newClusterState = taskOutputs.newClusterState;
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("cluster state updated, source [{}]\n{}", taskInputs.summary, newClusterState);
|
||||
} else if (logger.isDebugEnabled()) {
|
||||
|
@ -646,8 +642,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterServiceState previousClusterServiceState, long startTimeNS) {
|
||||
ClusterState previousClusterState = previousClusterServiceState.getClusterState();
|
||||
public TaskOutputs calculateTaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState, long startTimeNS) {
|
||||
BatchResult<Object> batchResult = executeTasks(taskInputs, startTimeNS, previousClusterState);
|
||||
ClusterState newClusterState = batchResult.resultingState;
|
||||
// extract those that are waiting for results
|
||||
|
@ -662,9 +657,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
newClusterState = patchVersions(previousClusterState, newClusterState);
|
||||
|
||||
ClusterServiceState newClusterServiceState = new ClusterServiceState(newClusterState, ClusterStateStatus.BEING_APPLIED);
|
||||
|
||||
return new TaskOutputs(taskInputs, previousClusterServiceState, newClusterServiceState, nonFailedTasks,
|
||||
return new TaskOutputs(taskInputs, previousClusterState, newClusterState, nonFailedTasks,
|
||||
batchResult.executionResults);
|
||||
}
|
||||
|
||||
|
@ -728,8 +721,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
private void publishAndApplyChanges(TaskInputs taskInputs, TaskOutputs taskOutputs) {
|
||||
ClusterState previousClusterState = taskOutputs.previousClusterServiceState.getClusterState();
|
||||
ClusterState newClusterState = taskOutputs.newClusterServiceState.getClusterState();
|
||||
ClusterState previousClusterState = taskOutputs.previousClusterState;
|
||||
ClusterState newClusterState = taskOutputs.newClusterState;
|
||||
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent(taskInputs.summary, newClusterState, previousClusterState);
|
||||
// new cluster state, notify all listeners
|
||||
|
@ -767,9 +760,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
|
||||
// update the current cluster state
|
||||
updateState(css -> taskOutputs.newClusterServiceState);
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
logger.debug("applying cluster state version {}", newClusterState.version());
|
||||
try {
|
||||
// nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
|
||||
if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) {
|
||||
|
@ -779,27 +770,22 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
} catch (Exception ex) {
|
||||
logger.warn("failed to apply cluster settings", ex);
|
||||
}
|
||||
for (ClusterStateListener listener : preAppliedListeners) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug("set local cluster state to version {}", newClusterState.version());
|
||||
callClusterStateAppliers(newClusterState, clusterChangedEvent);
|
||||
|
||||
nodeConnectionsService.disconnectFromNodes(clusterChangedEvent.nodesDelta().removedNodes());
|
||||
|
||||
updateState(css -> new ClusterServiceState(css.getClusterState(), ClusterStateStatus.APPLIED));
|
||||
updateState(css -> newClusterState);
|
||||
|
||||
for (ClusterStateListener listener : postAppliedListeners) {
|
||||
Stream.concat(clusterStateListeners.stream(), timeoutClusterStateListeners.stream()).forEach(listener -> {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", listener, newClusterState.version());
|
||||
listener.clusterChanged(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateListener", ex);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
//manual ack only from the master at the end of the publish
|
||||
if (newClusterState.nodes().isLocalNodeElectedMaster()) {
|
||||
|
@ -826,6 +812,17 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private void callClusterStateAppliers(ClusterState newClusterState, ClusterChangedEvent clusterChangedEvent) {
|
||||
for (ClusterStateApplier applier : clusterStateAppliers) {
|
||||
try {
|
||||
logger.trace("calling [{}] with change to version [{}]", applier, newClusterState.version());
|
||||
applier.applyClusterState(clusterChangedEvent);
|
||||
} catch (Exception ex) {
|
||||
logger.warn("failed to notify ClusterStateApplier", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents a set of tasks to be processed together with their executor
|
||||
*/
|
||||
|
@ -854,17 +851,17 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
*/
|
||||
class TaskOutputs {
|
||||
public final TaskInputs taskInputs;
|
||||
public final ClusterServiceState previousClusterServiceState;
|
||||
public final ClusterServiceState newClusterServiceState;
|
||||
public final ClusterState previousClusterState;
|
||||
public final ClusterState newClusterState;
|
||||
public final List<UpdateTask> nonFailedTasks;
|
||||
public final Map<Object, ClusterStateTaskExecutor.TaskResult> executionResults;
|
||||
|
||||
public TaskOutputs(TaskInputs taskInputs, ClusterServiceState previousClusterServiceState,
|
||||
ClusterServiceState newClusterServiceState, List<UpdateTask> nonFailedTasks,
|
||||
public TaskOutputs(TaskInputs taskInputs, ClusterState previousClusterState,
|
||||
ClusterState newClusterState, List<UpdateTask> nonFailedTasks,
|
||||
Map<Object, ClusterStateTaskExecutor.TaskResult> executionResults) {
|
||||
this.taskInputs = taskInputs;
|
||||
this.previousClusterServiceState = previousClusterServiceState;
|
||||
this.newClusterServiceState = newClusterServiceState;
|
||||
this.previousClusterState = previousClusterState;
|
||||
this.newClusterState = newClusterState;
|
||||
this.nonFailedTasks = nonFailedTasks;
|
||||
this.executionResults = executionResults;
|
||||
}
|
||||
|
@ -907,7 +904,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
public boolean clusterStateUnchanged() {
|
||||
return previousClusterServiceState.getClusterState() == newClusterServiceState.getClusterState();
|
||||
return previousClusterState == newClusterState;
|
||||
}
|
||||
|
||||
public void notifyFailedTasks() {
|
||||
|
@ -922,13 +919,12 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
}
|
||||
|
||||
public void notifySuccessfulTasksOnUnchangedClusterState() {
|
||||
ClusterState clusterState = newClusterServiceState.getClusterState();
|
||||
nonFailedTasks.forEach(task -> {
|
||||
if (task.listener instanceof AckedClusterStateTaskListener) {
|
||||
//no need to wait for ack if nothing changed, the update can be counted as acknowledged
|
||||
((AckedClusterStateTaskListener) task.listener).onAllNodesAcked(null);
|
||||
}
|
||||
task.listener.clusterStateProcessed(task.source, clusterState, clusterState);
|
||||
task.listener.clusterStateProcessed(task.source, newClusterState, newClusterState);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.service;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
||||
/**
|
||||
* A simple immutable container class that comprises a cluster state and cluster state status. Used by {@link ClusterService}
|
||||
* to provide a snapshot view on which cluster state is currently being applied / already applied.
|
||||
*/
|
||||
public class ClusterServiceState {
|
||||
private final ClusterState clusterState;
|
||||
private final ClusterStateStatus clusterStateStatus;
|
||||
|
||||
public ClusterServiceState(ClusterState clusterState, ClusterStateStatus clusterStateStatus) {
|
||||
this.clusterState = clusterState;
|
||||
this.clusterStateStatus = clusterStateStatus;
|
||||
}
|
||||
|
||||
public ClusterState getClusterState() {
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
public ClusterStateStatus getClusterStateStatus() {
|
||||
return clusterStateStatus;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "version [" + clusterState.version() + "], status [" + clusterStateStatus + "]";
|
||||
}
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Iterator;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A reusable class to encode <tt>field -> memory size</tt> mappings
|
||||
*/
|
||||
public final class FieldMemoryStats implements Writeable, Iterable<ObjectLongCursor<String>>{
|
||||
|
||||
private final ObjectLongHashMap<String> stats;
|
||||
|
||||
/**
|
||||
* Creates a new FieldMemoryStats instance
|
||||
*/
|
||||
public FieldMemoryStats(ObjectLongHashMap<String> stats) {
|
||||
this.stats = Objects.requireNonNull(stats, "status must be non-null");
|
||||
assert !stats.containsKey(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new FieldMemoryStats instance from a stream
|
||||
*/
|
||||
public FieldMemoryStats(StreamInput input) throws IOException {
|
||||
int size = input.readVInt();
|
||||
stats = new ObjectLongHashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
stats.put(input.readString(), input.readVLong());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds / merges the given field memory stats into this stats instance
|
||||
*/
|
||||
public void add(FieldMemoryStats fieldMemoryStats) {
|
||||
for (ObjectLongCursor<String> entry : fieldMemoryStats.stats) {
|
||||
stats.addTo(entry.key, entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(stats.size());
|
||||
for (ObjectLongCursor<String> entry : stats) {
|
||||
out.writeString(entry.key);
|
||||
out.writeVLong(entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates x-content into the given builder for each of the fields in this stats instance
|
||||
* @param builder the builder to generated on
|
||||
* @param key the top level key for this stats object
|
||||
* @param rawKey the raw byte key for each of the fields byte sizes
|
||||
* @param readableKey the readable key for each of the fields byte sizes
|
||||
*/
|
||||
public void toXContent(XContentBuilder builder, String key, String rawKey, String readableKey) throws IOException {
|
||||
builder.startObject(key);
|
||||
for (ObjectLongCursor<String> entry : stats) {
|
||||
builder.startObject(entry.key);
|
||||
builder.byteSizeField(rawKey, readableKey, entry.value);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a deep copy of this stats instance
|
||||
*/
|
||||
public FieldMemoryStats copy() {
|
||||
return new FieldMemoryStats(stats.clone());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
FieldMemoryStats that = (FieldMemoryStats) o;
|
||||
return Objects.equals(stats, that.stats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(stats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<ObjectLongCursor<String>> iterator() {
|
||||
return stats.iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the fields value in bytes or <code>0</code> if it's not present in the stats
|
||||
*/
|
||||
public long get(String field) {
|
||||
return stats.get(field);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the given field is in the stats
|
||||
*/
|
||||
public boolean containsField(String field) {
|
||||
return stats.containsKey(field);
|
||||
}
|
||||
}
|
|
@ -467,16 +467,32 @@ public abstract class StreamOutput extends OutputStream {
|
|||
* @param keyWriter The key writer
|
||||
* @param valueWriter The value writer
|
||||
*/
|
||||
public <K, V> void writeMapOfLists(final Map<K, List<V>> map, final Writer<K> keyWriter, final Writer<V> valueWriter)
|
||||
public final <K, V> void writeMapOfLists(final Map<K, List<V>> map, final Writer<K> keyWriter, final Writer<V> valueWriter)
|
||||
throws IOException {
|
||||
writeVInt(map.size());
|
||||
|
||||
for (final Map.Entry<K, List<V>> entry : map.entrySet()) {
|
||||
keyWriter.write(this, entry.getKey());
|
||||
writeVInt(entry.getValue().size());
|
||||
for (final V value : entry.getValue()) {
|
||||
writeMap(map, keyWriter, (stream, list) -> {
|
||||
writeVInt(list.size());
|
||||
for (final V value : list) {
|
||||
valueWriter.write(this, value);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Write a {@link Map} of {@code K}-type keys to {@code V}-type.
|
||||
* <pre><code>
|
||||
* Map<String, String> map = ...;
|
||||
* out.writeMap(map, StreamOutput::writeString, StreamOutput::writeString);
|
||||
* </code></pre>
|
||||
*
|
||||
* @param keyWriter The key writer
|
||||
* @param valueWriter The value writer
|
||||
*/
|
||||
public final <K, V> void writeMap(final Map<K, V> map, final Writer<K> keyWriter, final Writer<V> valueWriter)
|
||||
throws IOException {
|
||||
writeVInt(map.size());
|
||||
for (final Map.Entry<K, V> entry : map.entrySet()) {
|
||||
keyWriter.write(this, entry.getKey());
|
||||
valueWriter.write(this, entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -390,7 +390,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
PluginsService.MANDATORY_SETTING,
|
||||
BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING,
|
||||
BootstrapSettings.MEMORY_LOCK_SETTING,
|
||||
BootstrapSettings.SECCOMP_SETTING,
|
||||
BootstrapSettings.SYSTEM_CALL_FILTER_SETTING,
|
||||
BootstrapSettings.CTRLHANDLER_SETTING,
|
||||
IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING,
|
||||
IndexingMemoryController.MIN_INDEX_BUFFER_SIZE_SETTING,
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -33,6 +34,27 @@ import java.util.Set;
|
|||
*/
|
||||
public interface XContent {
|
||||
|
||||
/*
|
||||
* NOTE: This comment is only meant for maintainers of the Elasticsearch code base and is intentionally not a Javadoc comment as it
|
||||
* describes an undocumented system property.
|
||||
*
|
||||
*
|
||||
* Determines whether the XContent parser will always check for duplicate keys. This behavior is enabled by default but
|
||||
* can be disabled by setting the otherwise undocumented system property "es.xcontent.strict_duplicate_detection to "false".
|
||||
*
|
||||
* Before we've enabled this mode, we had custom duplicate checks in various parts of the code base. As the user can still disable this
|
||||
* mode and fall back to the legacy duplicate checks, we still need to keep the custom duplicate checks around and we also need to keep
|
||||
* the tests around.
|
||||
*
|
||||
* If this fallback via system property is removed one day in the future you can remove all tests that call this method and also remove
|
||||
* the corresponding custom duplicate check code.
|
||||
*
|
||||
*/
|
||||
static boolean isStrictDuplicateDetectionEnabled() {
|
||||
// Don't allow duplicate keys in JSON content by default but let the user opt out
|
||||
return Booleans.parseBooleanExact(System.getProperty("es.xcontent.strict_duplicate_detection", "true"));
|
||||
}
|
||||
|
||||
/**
|
||||
* The type this content handles and produces.
|
||||
*/
|
||||
|
|
|
@ -141,7 +141,12 @@ public class XContentFactory {
|
|||
|
||||
/**
|
||||
* Guesses the content type based on the provided char sequence.
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContentType xContentType(CharSequence content) {
|
||||
int length = content.length() < GUESS_HEADER_LENGTH ? content.length() : GUESS_HEADER_LENGTH;
|
||||
if (length == 0) {
|
||||
|
@ -174,8 +179,13 @@ public class XContentFactory {
|
|||
}
|
||||
|
||||
/**
|
||||
* Guesses the content (type) based on the provided char sequence.
|
||||
* Guesses the content (type) based on the provided char sequence and returns the corresponding {@link XContent}
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContent xContent(CharSequence content) {
|
||||
XContentType type = xContentType(content);
|
||||
if (type == null) {
|
||||
|
@ -185,15 +195,24 @@ public class XContentFactory {
|
|||
}
|
||||
|
||||
/**
|
||||
* Guesses the content type based on the provided bytes.
|
||||
* Guesses the content type based on the provided bytes and returns the corresponding {@link XContent}
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContent xContent(byte[] data) {
|
||||
return xContent(data, 0, data.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Guesses the content type based on the provided bytes.
|
||||
* Guesses the content type based on the provided bytes and returns the corresponding {@link XContent}
|
||||
*
|
||||
* @deprecated guessing the content type should not be needed ideally. We should rather know the content type upfront or read it
|
||||
* from headers. Till we fixed the REST layer to read the Content-Type header, that should be the only place where guessing is needed.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContent xContent(byte[] data, int offset, int length) {
|
||||
XContentType type = xContentType(data, offset, length);
|
||||
if (type == null) {
|
||||
|
@ -204,14 +223,24 @@ public class XContentFactory {
|
|||
|
||||
/**
|
||||
* Guesses the content type based on the provided bytes.
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContentType xContentType(byte[] data) {
|
||||
return xContentType(data, 0, data.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Guesses the content type based on the provided input stream without consuming it.
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContentType xContentType(InputStream si) throws IOException {
|
||||
if (si.markSupported() == false) {
|
||||
throw new IllegalArgumentException("Cannot guess the xcontent type without mark/reset support on " + si.getClass());
|
||||
|
@ -228,11 +257,24 @@ public class XContentFactory {
|
|||
|
||||
/**
|
||||
* Guesses the content type based on the provided bytes.
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContentType xContentType(byte[] data, int offset, int length) {
|
||||
return xContentType(new BytesArray(data, offset, length));
|
||||
}
|
||||
|
||||
/**
|
||||
* Guesses the content type based on the provided bytes and returns the corresponding {@link XContent}
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContent xContent(BytesReference bytes) {
|
||||
XContentType type = xContentType(bytes);
|
||||
if (type == null) {
|
||||
|
@ -243,7 +285,12 @@ public class XContentFactory {
|
|||
|
||||
/**
|
||||
* Guesses the content type based on the provided bytes.
|
||||
*
|
||||
* @deprecated the content type should not be guessed except for few cases where we effectively don't know the content type.
|
||||
* The REST layer should move to reading the Content-Type header instead. There are other places where auto-detection may be needed.
|
||||
* This method is deprecated to prevent usages of it from spreading further without specific reasons.
|
||||
*/
|
||||
@Deprecated
|
||||
public static XContentType xContentType(BytesReference bytes) {
|
||||
int length = bytes.length();
|
||||
if (length == 0) {
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent.cbor;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonEncoding;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.dataformat.cbor.CBORFactory;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -54,6 +55,7 @@ public class CborXContent implements XContent {
|
|||
cborFactory.configure(CBORFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.cbor.CBORGenerator#close() method
|
||||
cborFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
||||
cborFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled());
|
||||
cborXContent = new CborXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ import com.fasterxml.jackson.core.JsonEncoding;
|
|||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
|
@ -50,27 +49,6 @@ public class JsonXContent implements XContent {
|
|||
|
||||
public static final JsonXContent jsonXContent;
|
||||
|
||||
/*
|
||||
* NOTE: This comment is only meant for maintainers of the Elasticsearch code base and is intentionally not a Javadoc comment as it
|
||||
* describes an undocumented system property.
|
||||
*
|
||||
*
|
||||
* Determines whether the JSON parser will always check for duplicate keys in JSON content. This behavior is enabled by default but
|
||||
* can be disabled by setting the otherwise undocumented system property "es.json.strict_duplicate_detection" to "false".
|
||||
*
|
||||
* Before we've enabled this mode, we had custom duplicate checks in various parts of the code base. As the user can still disable this
|
||||
* mode and fall back to the legacy duplicate checks, we still need to keep the custom duplicate checks around and we also need to keep
|
||||
* the tests around.
|
||||
*
|
||||
* If this fallback via system property is removed one day in the future you can remove all tests that call this method and also remove
|
||||
* the corresponding custom duplicate check code.
|
||||
*
|
||||
*/
|
||||
public static boolean isStrictDuplicateDetectionEnabled() {
|
||||
// Don't allow duplicate keys in JSON content by default but let the user opt out
|
||||
return Booleans.parseBooleanExact(System.getProperty("es.json.strict_duplicate_detection", "true"));
|
||||
}
|
||||
|
||||
static {
|
||||
jsonFactory = new JsonFactory();
|
||||
jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);
|
||||
|
@ -78,7 +56,7 @@ public class JsonXContent implements XContent {
|
|||
jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.core.json.UTF8JsonGenerator#close() method
|
||||
jsonFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
||||
jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, isStrictDuplicateDetectionEnabled());
|
||||
jsonFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled());
|
||||
jsonXContent = new JsonXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.xcontent.smile;
|
|||
|
||||
import com.fasterxml.jackson.core.JsonEncoding;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileFactory;
|
||||
import com.fasterxml.jackson.dataformat.smile.SmileGenerator;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -55,6 +56,7 @@ public class SmileXContent implements XContent {
|
|||
smileFactory.configure(SmileFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
// Do not automatically close unclosed objects/arrays in com.fasterxml.jackson.dataformat.smile.SmileGenerator#close() method
|
||||
smileFactory.configure(JsonGenerator.Feature.AUTO_CLOSE_JSON_CONTENT, false);
|
||||
smileFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled());
|
||||
smileXContent = new SmileXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.common.xcontent.yaml;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonEncoding;
|
||||
import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -50,6 +51,7 @@ public class YamlXContent implements XContent {
|
|||
|
||||
static {
|
||||
yamlFactory = new YAMLFactory();
|
||||
yamlFactory.configure(JsonParser.Feature.STRICT_DUPLICATE_DETECTION, XContent.isStrictDuplicateDetectionEnabled());
|
||||
yamlXContent = new YamlXContent();
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.cluster.Diff;
|
|||
import org.elasticsearch.cluster.IncompatibleClusterStateVersionException;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterStateStatus;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
|
|
|
@ -20,9 +20,12 @@
|
|||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.metadata.IndexGraveyard;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -48,7 +51,7 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* their state written on disk, but don't exists in the metadata of the cluster), and importing
|
||||
* them into the cluster.
|
||||
*/
|
||||
public class DanglingIndicesState extends AbstractComponent {
|
||||
public class DanglingIndicesState extends AbstractComponent implements ClusterStateListener {
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final MetaStateService metaStateService;
|
||||
|
@ -58,11 +61,12 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
|
||||
@Inject
|
||||
public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService,
|
||||
LocalAllocateDangledIndices allocateDangledIndices) {
|
||||
LocalAllocateDangledIndices allocateDangledIndices, ClusterService clusterService) {
|
||||
super(settings);
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.metaStateService = metaStateService;
|
||||
this.allocateDangledIndices = allocateDangledIndices;
|
||||
clusterService.addListener(this);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -174,4 +178,11 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
logger.warn("failed to send allocate dangled", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
if (event.state().blocks().disableStatePersistence() == false) {
|
||||
processDanglingIndices(event.state().metaData());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
@ -40,7 +40,7 @@ import java.util.Arrays;
|
|||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class Gateway extends AbstractComponent implements ClusterStateListener {
|
||||
public class Gateway extends AbstractComponent implements ClusterStateApplier {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
|
@ -60,7 +60,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
this.metaState = metaState;
|
||||
this.listGatewayMetaState = listGatewayMetaState;
|
||||
this.minimumMasterNodesProvider = discovery::getMinimumMasterNodes;
|
||||
clusterService.addLast(this);
|
||||
clusterService.addLowPriorityApplier(this);
|
||||
}
|
||||
|
||||
public void performStateRecovery(final GatewayStateRecoveredListener listener) throws GatewayException {
|
||||
|
@ -176,10 +176,10 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(final ClusterChangedEvent event) {
|
||||
public void applyClusterState(final ClusterChangedEvent event) {
|
||||
// order is important, first metaState, and then shardsState
|
||||
// so dangling indices will be recorded
|
||||
metaState.clusterChanged(event);
|
||||
metaState.applyClusterState(event);
|
||||
}
|
||||
|
||||
public interface GatewayStateRecoveredListener {
|
||||
|
|
|
@ -22,8 +22,6 @@ package org.elasticsearch.gateway;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
|
@ -79,24 +77,21 @@ public class GatewayAllocator extends AbstractComponent {
|
|||
|
||||
public void setReallocation(final ClusterService clusterService, final RoutingService routingService) {
|
||||
this.routingService = routingService;
|
||||
clusterService.add(new ClusterStateListener() {
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
boolean cleanCache = false;
|
||||
DiscoveryNode localNode = event.state().nodes().getLocalNode();
|
||||
if (localNode != null) {
|
||||
if (localNode.isMasterNode() && event.localNodeMaster() == false) {
|
||||
cleanCache = true;
|
||||
}
|
||||
} else {
|
||||
clusterService.addStateApplier(event -> {
|
||||
boolean cleanCache = false;
|
||||
DiscoveryNode localNode = event.state().nodes().getLocalNode();
|
||||
if (localNode != null) {
|
||||
if (localNode.isMasterNode() && event.localNodeMaster() == false) {
|
||||
cleanCache = true;
|
||||
}
|
||||
if (cleanCache) {
|
||||
Releasables.close(asyncFetchStarted.values());
|
||||
asyncFetchStarted.clear();
|
||||
Releasables.close(asyncFetchStore.values());
|
||||
asyncFetchStore.clear();
|
||||
}
|
||||
} else {
|
||||
cleanCache = true;
|
||||
}
|
||||
if (cleanCache) {
|
||||
Releasables.close(asyncFetchStarted.values());
|
||||
asyncFetchStarted.clear();
|
||||
Releasables.close(asyncFetchStore.values());
|
||||
asyncFetchStore.clear();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
|
||||
|
@ -54,11 +54,10 @@ import java.util.Set;
|
|||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
||||
public class GatewayMetaState extends AbstractComponent implements ClusterStateListener {
|
||||
public class GatewayMetaState extends AbstractComponent implements ClusterStateApplier {
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final MetaStateService metaStateService;
|
||||
private final DanglingIndicesState danglingIndicesState;
|
||||
|
||||
@Nullable
|
||||
private volatile MetaData previousMetaData;
|
||||
|
@ -67,13 +66,12 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
|
||||
@Inject
|
||||
public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService,
|
||||
DanglingIndicesState danglingIndicesState, TransportNodesListGatewayMetaState nodesListGatewayMetaState,
|
||||
TransportNodesListGatewayMetaState nodesListGatewayMetaState,
|
||||
MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader)
|
||||
throws Exception {
|
||||
super(settings);
|
||||
this.nodeEnv = nodeEnv;
|
||||
this.metaStateService = metaStateService;
|
||||
this.danglingIndicesState = danglingIndicesState;
|
||||
nodesListGatewayMetaState.init(this);
|
||||
|
||||
if (DiscoveryNode.isDataNode(settings)) {
|
||||
|
@ -117,7 +115,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
|
||||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
public void applyClusterState(ClusterChangedEvent event) {
|
||||
|
||||
final ClusterState state = event.state();
|
||||
if (state.blocks().disableStatePersistence()) {
|
||||
|
@ -181,7 +179,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
danglingIndicesState.processDanglingIndices(newMetaData);
|
||||
if (success) {
|
||||
previousMetaData = newMetaData;
|
||||
previouslyWrittenIndices = unmodifiableSet(relevantIndices);
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
|
@ -130,12 +129,13 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste
|
|||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
clusterService.addLast(this);
|
||||
// use post applied so that the state will be visible to the background recovery thread we spawn in performStateRecovery
|
||||
clusterService.addListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
clusterService.remove(this);
|
||||
clusterService.removeListener(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.index.shard.IndexEventListener;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
|
@ -176,48 +177,24 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void beforeIndexClosed(IndexService indexService) {
|
||||
public void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) {
|
||||
for (IndexEventListener listener : listeners) {
|
||||
try {
|
||||
listener.beforeIndexClosed(indexService);
|
||||
listener.beforeIndexRemoved(indexService, reason);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to invoke before index closed callback", e);
|
||||
logger.warn("failed to invoke before index removed callback", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeIndexDeleted(IndexService indexService) {
|
||||
public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) {
|
||||
for (IndexEventListener listener : listeners) {
|
||||
try {
|
||||
listener.beforeIndexDeleted(indexService);
|
||||
listener.afterIndexRemoved(index, indexSettings, reason);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to invoke before index deleted callback", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterIndexDeleted(Index index, Settings indexSettings) {
|
||||
for (IndexEventListener listener : listeners) {
|
||||
try {
|
||||
listener.afterIndexDeleted(index, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to invoke after index deleted callback", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterIndexClosed(Index index, Settings indexSettings) {
|
||||
for (IndexEventListener listener : listeners) {
|
||||
try {
|
||||
listener.afterIndexClosed(index, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to invoke after index closed callback", e);
|
||||
logger.warn("failed to invoke after index removed callback", e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -471,7 +471,6 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
|||
shardId, indexSettings, indexCache.bitsetFilterCache(), indexFieldData, mapperService(),
|
||||
similarityService(), scriptService, queryRegistry,
|
||||
client, indexReader,
|
||||
clusterService.state(),
|
||||
nowInMillis);
|
||||
}
|
||||
|
||||
|
|
|
@ -106,13 +106,6 @@ public class QueryCacheStats implements Streamable, ToXContent {
|
|||
return cacheCount - cacheSize;
|
||||
}
|
||||
|
||||
public static QueryCacheStats readQueryCacheStats(StreamInput in) throws IOException {
|
||||
QueryCacheStats stats = new QueryCacheStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
ramBytesUsed = in.readLong();
|
||||
|
|
|
@ -49,13 +49,6 @@ public final class CommitStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
private CommitStats() {
|
||||
|
||||
}
|
||||
|
||||
public static CommitStats readCommitStatsFrom(StreamInput in) throws IOException {
|
||||
CommitStats commitStats = new CommitStats();
|
||||
commitStats.readFrom(in);
|
||||
return commitStats;
|
||||
}
|
||||
|
||||
public static CommitStats readOptionalCommitStatsFrom(StreamInput in) throws IOException {
|
||||
|
|
|
@ -78,6 +78,7 @@ import java.util.Arrays;
|
|||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -86,6 +87,7 @@ import java.util.concurrent.locks.Lock;
|
|||
import java.util.concurrent.locks.ReentrantLock;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.LongSupplier;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
public class InternalEngine extends Engine {
|
||||
|
||||
|
@ -175,9 +177,18 @@ public class InternalEngine extends Engine {
|
|||
throw new IllegalArgumentException(openMode.toString());
|
||||
}
|
||||
logger.trace("recovered [{}]", seqNoStats);
|
||||
indexWriter = writer;
|
||||
seqNoService = sequenceNumberService(shardId, engineConfig.getIndexSettings(), seqNoStats);
|
||||
translog = openTranslog(engineConfig, writer, seqNoService::getGlobalCheckpoint);
|
||||
// norelease
|
||||
/*
|
||||
* We have no guarantees that all operations above the local checkpoint are in the Lucene commit or the translog. This means
|
||||
* that we there might be operations greater than the local checkpoint that will not be replayed. Here we force the local
|
||||
* checkpoint to the maximum sequence number in the commit (at the potential expense of correctness).
|
||||
*/
|
||||
while (seqNoService().getLocalCheckpoint() < seqNoService().getMaxSeqNo()) {
|
||||
seqNoService().markSeqNoAsCompleted(seqNoService().getLocalCheckpoint() + 1);
|
||||
}
|
||||
indexWriter = writer;
|
||||
translog = openTranslog(engineConfig, writer, () -> seqNoService().getGlobalCheckpoint());
|
||||
assert translog.getGeneration() != null;
|
||||
} catch (IOException | TranslogCorruptedException e) {
|
||||
throw new EngineCreationFailureException(shardId, "failed to create engine", e);
|
||||
|
@ -412,7 +423,7 @@ public class InternalEngine extends Engine {
|
|||
|
||||
@Override
|
||||
public GetResult get(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (get.realtime()) {
|
||||
VersionValue versionValue = versionMap.getUnderLock(get.uid());
|
||||
|
@ -434,11 +445,28 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean checkVersionConflict(
|
||||
final Operation op,
|
||||
final long currentVersion,
|
||||
final long expectedVersion,
|
||||
final boolean deleted) {
|
||||
/**
|
||||
* Checks for version conflicts. If a version conflict exists, the optional return value represents the operation result. Otherwise, if
|
||||
* no conflicts are found, the optional return value is not present.
|
||||
*
|
||||
* @param <T> the result type
|
||||
* @param op the operation
|
||||
* @param currentVersion the current version
|
||||
* @param expectedVersion the expected version
|
||||
* @param deleted {@code true} if the current version is not found or represents a delete
|
||||
* @param onSuccess if there is a version conflict that can be ignored, the result of the operation
|
||||
* @param onFailure if there is a version conflict that can not be ignored, the result of the operation
|
||||
* @return if there is a version conflict, the optional value is present and represents the operation result, otherwise the return value
|
||||
* is not present
|
||||
*/
|
||||
private <T extends Result> Optional<T> checkVersionConflict(
|
||||
final Operation op,
|
||||
final long currentVersion,
|
||||
final long expectedVersion,
|
||||
final boolean deleted,
|
||||
final Supplier<T> onSuccess,
|
||||
final Function<VersionConflictEngineException, T> onFailure) {
|
||||
final T result;
|
||||
if (op.versionType() == VersionType.FORCE) {
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
// If index was created in 5.0 or later, 'force' is not allowed at all
|
||||
|
@ -452,14 +480,22 @@ public class InternalEngine extends Engine {
|
|||
if (op.versionType().isVersionConflictForWrites(currentVersion, expectedVersion, deleted)) {
|
||||
if (op.origin().isRecovery()) {
|
||||
// version conflict, but okay
|
||||
return true;
|
||||
result = onSuccess.get();
|
||||
} else {
|
||||
// fatal version conflict
|
||||
throw new VersionConflictEngineException(shardId, op.type(), op.id(),
|
||||
final VersionConflictEngineException e =
|
||||
new VersionConflictEngineException(
|
||||
shardId,
|
||||
op.type(),
|
||||
op.id(),
|
||||
op.versionType().explainConflictForWrites(currentVersion, expectedVersion, deleted));
|
||||
result = onFailure.apply(e);
|
||||
}
|
||||
|
||||
return Optional.of(result);
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private long checkDeletedAndGCed(VersionValue versionValue) {
|
||||
|
@ -475,7 +511,7 @@ public class InternalEngine extends Engine {
|
|||
@Override
|
||||
public IndexResult index(Index index) {
|
||||
IndexResult result;
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (index.origin().isRecovery()) {
|
||||
// Don't throttle recovery operations
|
||||
|
@ -573,7 +609,7 @@ public class InternalEngine extends Engine {
|
|||
assert assertSequenceNumber(index.origin(), index.seqNo());
|
||||
final Translog.Location location;
|
||||
final long updatedVersion;
|
||||
IndexResult indexResult = null;
|
||||
long seqNo = index.seqNo();
|
||||
try (Releasable ignored = acquireLock(index.uid())) {
|
||||
lastWriteNanos = index.startTime();
|
||||
/* if we have an autoGeneratedID that comes into the engine we can potentially optimize
|
||||
|
@ -638,28 +674,33 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
}
|
||||
final long expectedVersion = index.version();
|
||||
if (checkVersionConflict(index, currentVersion, expectedVersion, deleted)) {
|
||||
// skip index operation because of version conflict on recovery
|
||||
indexResult = new IndexResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, false);
|
||||
final Optional<IndexResult> checkVersionConflictResult =
|
||||
checkVersionConflict(
|
||||
index,
|
||||
currentVersion,
|
||||
expectedVersion,
|
||||
deleted,
|
||||
() -> new IndexResult(currentVersion, index.seqNo(), false),
|
||||
e -> new IndexResult(e, currentVersion, index.seqNo()));
|
||||
|
||||
final IndexResult indexResult;
|
||||
if (checkVersionConflictResult.isPresent()) {
|
||||
indexResult = checkVersionConflictResult.get();
|
||||
} else {
|
||||
final long seqNo;
|
||||
// no version conflict
|
||||
if (index.origin() == Operation.Origin.PRIMARY) {
|
||||
seqNo = seqNoService.generateSeqNo();
|
||||
} else {
|
||||
seqNo = index.seqNo();
|
||||
seqNo = seqNoService().generateSeqNo();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the document's sequence number and primary term; the sequence number here is derived here from either the sequence
|
||||
* number service if this is on the primary, or the existing document's sequence number if this is on the replica. The
|
||||
* primary term here has already been set, see IndexShard#prepareIndex where the Engine$Index operation is created.
|
||||
*/
|
||||
index.parsedDoc().updateSeqID(seqNo, index.primaryTerm());
|
||||
updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
index.parsedDoc().version().setLongValue(updatedVersion);
|
||||
|
||||
// Update the document's sequence number and primary term, the
|
||||
// sequence number here is derived here from either the sequence
|
||||
// number service if this is on the primary, or the existing
|
||||
// document's sequence number if this is on the replica. The
|
||||
// primary term here has already been set, see
|
||||
// IndexShard.prepareIndex where the Engine.Index operation is
|
||||
// created
|
||||
index.parsedDoc().updateSeqID(seqNo, index.primaryTerm());
|
||||
|
||||
if (currentVersion == Versions.NOT_FOUND && forceUpdateDocument == false) {
|
||||
// document does not exists, we can optimize for create, but double check if assertions are running
|
||||
assert assertDocDoesNotExist(index, canOptimizeAddDocument == false);
|
||||
|
@ -669,8 +710,8 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
indexResult = new IndexResult(updatedVersion, seqNo, deleted);
|
||||
location = index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
|
||||
? translog.add(new Translog.Index(index, indexResult))
|
||||
: null;
|
||||
? translog.add(new Translog.Index(index, indexResult))
|
||||
: null;
|
||||
versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion));
|
||||
indexResult.setTranslogLocation(location);
|
||||
}
|
||||
|
@ -678,8 +719,8 @@ public class InternalEngine extends Engine {
|
|||
indexResult.freeze();
|
||||
return indexResult;
|
||||
} finally {
|
||||
if (indexResult != null && indexResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
seqNoService.markSeqNoAsCompleted(indexResult.getSeqNo());
|
||||
if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
seqNoService().markSeqNoAsCompleted(seqNo);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -724,7 +765,7 @@ public class InternalEngine extends Engine {
|
|||
@Override
|
||||
public DeleteResult delete(Delete delete) {
|
||||
DeleteResult result;
|
||||
try (ReleasableLock lock = readLock.acquire()) {
|
||||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
// NOTE: we don't throttle this when merges fall behind because delete-by-id does not create new segments:
|
||||
result = innerDelete(delete);
|
||||
|
@ -748,7 +789,7 @@ public class InternalEngine extends Engine {
|
|||
final Translog.Location location;
|
||||
final long updatedVersion;
|
||||
final boolean found;
|
||||
DeleteResult deleteResult = null;
|
||||
long seqNo = delete.seqNo();
|
||||
try (Releasable ignored = acquireLock(delete.uid())) {
|
||||
lastWriteNanos = delete.startTime();
|
||||
final long currentVersion;
|
||||
|
@ -764,32 +805,40 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
final long expectedVersion = delete.version();
|
||||
if (checkVersionConflict(delete, currentVersion, expectedVersion, deleted)) {
|
||||
// skip executing delete because of version conflict on recovery
|
||||
deleteResult = new DeleteResult(expectedVersion, SequenceNumbersService.UNASSIGNED_SEQ_NO, true);
|
||||
|
||||
final Optional<DeleteResult> result =
|
||||
checkVersionConflict(
|
||||
delete,
|
||||
currentVersion,
|
||||
expectedVersion,
|
||||
deleted,
|
||||
() -> new DeleteResult(expectedVersion, delete.seqNo(), true),
|
||||
e -> new DeleteResult(e, expectedVersion, delete.seqNo()));
|
||||
|
||||
final DeleteResult deleteResult;
|
||||
if (result.isPresent()) {
|
||||
deleteResult = result.get();
|
||||
} else {
|
||||
final long seqNo;
|
||||
if (delete.origin() == Operation.Origin.PRIMARY) {
|
||||
seqNo = seqNoService.generateSeqNo();
|
||||
} else {
|
||||
seqNo = delete.seqNo();
|
||||
seqNo = seqNoService().generateSeqNo();
|
||||
}
|
||||
|
||||
updatedVersion = delete.versionType().updateVersion(currentVersion, expectedVersion);
|
||||
found = deleteIfFound(delete.uid(), currentVersion, deleted, versionValue);
|
||||
deleteResult = new DeleteResult(updatedVersion, seqNo, found);
|
||||
location = delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY
|
||||
? translog.add(new Translog.Delete(delete, deleteResult))
|
||||
: null;
|
||||
? translog.add(new Translog.Delete(delete, deleteResult))
|
||||
: null;
|
||||
versionMap.putUnderLock(delete.uid().bytes(),
|
||||
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
|
||||
new DeleteVersionValue(updatedVersion, engineConfig.getThreadPool().estimatedTimeInMillis()));
|
||||
deleteResult.setTranslogLocation(location);
|
||||
}
|
||||
deleteResult.setTook(System.nanoTime() - delete.startTime());
|
||||
deleteResult.freeze();
|
||||
return deleteResult;
|
||||
} finally {
|
||||
if (deleteResult != null && deleteResult.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
seqNoService.markSeqNoAsCompleted(deleteResult.getSeqNo());
|
||||
if (seqNo != SequenceNumbersService.UNASSIGNED_SEQ_NO) {
|
||||
seqNoService().markSeqNoAsCompleted(seqNo);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -286,12 +286,6 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
return maxUnsafeAutoIdTimestamp;
|
||||
}
|
||||
|
||||
public static SegmentsStats readSegmentsStats(StreamInput in) throws IOException {
|
||||
SegmentsStats stats = new SegmentsStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.SEGMENTS);
|
||||
|
@ -391,10 +385,9 @@ public class SegmentsStats implements Streamable, ToXContent {
|
|||
out.writeLong(maxUnsafeAutoIdTimestamp);
|
||||
|
||||
out.writeVInt(fileSizes.size());
|
||||
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
|
||||
ObjectObjectCursor<String, Long> entry = it.next();
|
||||
for (ObjectObjectCursor<String, Long> entry : fileSizes) {
|
||||
out.writeString(entry.key);
|
||||
out.writeLong(entry.value);
|
||||
out.writeLong(entry.value.longValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.fielddata;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import org.elasticsearch.common.FieldMemoryStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -29,19 +29,25 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
public class FieldDataStats implements Streamable, ToXContent {
|
||||
|
||||
private static final String FIELDDATA = "fielddata";
|
||||
private static final String MEMORY_SIZE = "memory_size";
|
||||
private static final String MEMORY_SIZE_IN_BYTES = "memory_size_in_bytes";
|
||||
private static final String EVICTIONS = "evictions";
|
||||
private static final String FIELDS = "fields";
|
||||
long memorySize;
|
||||
long evictions;
|
||||
@Nullable
|
||||
ObjectLongHashMap<String> fields;
|
||||
FieldMemoryStats fields;
|
||||
|
||||
public FieldDataStats() {
|
||||
|
||||
}
|
||||
|
||||
public FieldDataStats(long memorySize, long evictions, @Nullable ObjectLongHashMap<String> fields) {
|
||||
public FieldDataStats(long memorySize, long evictions, @Nullable FieldMemoryStats fields) {
|
||||
this.memorySize = memorySize;
|
||||
this.evictions = evictions;
|
||||
this.fields = fields;
|
||||
|
@ -52,16 +58,9 @@ public class FieldDataStats implements Streamable, ToXContent {
|
|||
this.evictions += stats.evictions;
|
||||
if (stats.fields != null) {
|
||||
if (fields == null) {
|
||||
fields = stats.fields.clone();
|
||||
fields = stats.fields.copy();
|
||||
} else {
|
||||
assert !stats.fields.containsKey(null);
|
||||
final Object[] keys = stats.fields.keys;
|
||||
final long[] values = stats.fields.values;
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != null) {
|
||||
fields.addTo((String) keys[i], values[i]);
|
||||
}
|
||||
}
|
||||
fields.add(stats.fields);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,78 +78,48 @@ public class FieldDataStats implements Streamable, ToXContent {
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public ObjectLongHashMap<String> getFields() {
|
||||
public FieldMemoryStats getFields() {
|
||||
return fields;
|
||||
}
|
||||
|
||||
public static FieldDataStats readFieldDataStats(StreamInput in) throws IOException {
|
||||
FieldDataStats stats = new FieldDataStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
memorySize = in.readVLong();
|
||||
evictions = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
int size = in.readVInt();
|
||||
fields = new ObjectLongHashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
fields.put(in.readString(), in.readVLong());
|
||||
}
|
||||
}
|
||||
fields = in.readOptionalWriteable(FieldMemoryStats::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVLong(memorySize);
|
||||
out.writeVLong(evictions);
|
||||
if (fields == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeVInt(fields.size());
|
||||
assert !fields.containsKey(null);
|
||||
final Object[] keys = fields.keys;
|
||||
final long[] values = fields.values;
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != null) {
|
||||
out.writeString((String) keys[i]);
|
||||
out.writeVLong(values[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
out.writeOptionalWriteable(fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.FIELDDATA);
|
||||
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
|
||||
builder.field(Fields.EVICTIONS, getEvictions());
|
||||
builder.startObject(FIELDDATA);
|
||||
builder.byteSizeField(MEMORY_SIZE_IN_BYTES, MEMORY_SIZE, memorySize);
|
||||
builder.field(EVICTIONS, getEvictions());
|
||||
if (fields != null) {
|
||||
builder.startObject(Fields.FIELDS);
|
||||
assert !fields.containsKey(null);
|
||||
final Object[] keys = fields.keys;
|
||||
final long[] values = fields.values;
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != null) {
|
||||
builder.startObject((String) keys[i]);
|
||||
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, values[i]);
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
fields.toXContent(builder, FIELDS, MEMORY_SIZE_IN_BYTES, MEMORY_SIZE);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String FIELDDATA = "fielddata";
|
||||
static final String MEMORY_SIZE = "memory_size";
|
||||
static final String MEMORY_SIZE_IN_BYTES = "memory_size_in_bytes";
|
||||
static final String EVICTIONS = "evictions";
|
||||
static final String FIELDS = "fields";
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
FieldDataStats that = (FieldDataStats) o;
|
||||
return memorySize == that.memorySize &&
|
||||
evictions == that.evictions &&
|
||||
Objects.equals(fields, that.fields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(memorySize, evictions, fields);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index.fielddata;
|
|||
|
||||
import com.carrotsearch.hppc.ObjectLongHashMap;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.common.FieldMemoryStats;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
@ -45,7 +46,8 @@ public class ShardFieldData implements IndexFieldDataCache.Listener {
|
|||
}
|
||||
}
|
||||
}
|
||||
return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals);
|
||||
return new FieldDataStats(totalMetric.count(), evictionsMetric.count(), fieldTotals == null ? null :
|
||||
new FieldMemoryStats(fieldTotals));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -81,12 +81,6 @@ public class FlushStats implements Streamable, ToXContent {
|
|||
return new TimeValue(totalTimeInMillis);
|
||||
}
|
||||
|
||||
public static FlushStats readFlushStats(StreamInput in) throws IOException {
|
||||
FlushStats flushStats = new FlushStats();
|
||||
flushStats.readFrom(in);
|
||||
return flushStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.FLUSH);
|
||||
|
|
|
@ -134,12 +134,6 @@ public class GetStats implements Streamable, ToXContent {
|
|||
static final String CURRENT = "current";
|
||||
}
|
||||
|
||||
public static GetStats readGetStats(StreamInput in) throws IOException {
|
||||
GetStats stats = new GetStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
existsCount = in.readVLong();
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -100,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node,
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node,
|
||||
ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
|
||||
builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer());
|
||||
|
@ -141,8 +142,14 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
return new AllFieldMapper(indexSettings, fieldType);
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
if (fieldType != null) {
|
||||
return new AllFieldMapper(indexSettings, fieldType);
|
||||
} else {
|
||||
return parse(NAME, Collections.emptyMap(), context)
|
||||
.build(new BuilderContext(indexSettings, new ContentPath(1)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -179,7 +186,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
|
|||
private EnabledAttributeMapper enabledState;
|
||||
|
||||
private AllFieldMapper(Settings indexSettings, MappedFieldType existing) {
|
||||
this(existing == null ? Defaults.FIELD_TYPE.clone() : existing.clone(), Defaults.ENABLED, indexSettings);
|
||||
this(existing.clone(), Defaults.ENABLED, indexSettings);
|
||||
}
|
||||
|
||||
private AllFieldMapper(MappedFieldType fieldType, EnabledAttributeMapper enabled, Settings indexSettings) {
|
||||
|
|
|
@ -74,7 +74,8 @@ public class DocumentMapper implements ToXContent {
|
|||
final MetadataFieldMapper metadataMapper;
|
||||
if (existingMetadataMapper == null) {
|
||||
final TypeParser parser = entry.getValue();
|
||||
metadataMapper = parser.getDefault(indexSettings, mapperService.fullName(name), builder.name());
|
||||
metadataMapper = parser.getDefault(mapperService.fullName(name),
|
||||
mapperService.documentMapperParser().parserContext(builder.name()));
|
||||
} else {
|
||||
metadataMapper = existingMetadataMapper;
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -98,7 +99,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
|
||||
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -114,8 +115,14 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
return new FieldNamesFieldMapper(indexSettings, fieldType);
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
if (fieldType != null) {
|
||||
return new FieldNamesFieldMapper(indexSettings, fieldType);
|
||||
} else {
|
||||
return parse(NAME, Collections.emptyMap(), context)
|
||||
.build(new BuilderContext(indexSettings, new ContentPath(1)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,7 +190,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
private FieldNamesFieldMapper(Settings indexSettings, MappedFieldType existing) {
|
||||
this(existing == null ? Defaults.FIELD_TYPE.clone() : existing.clone(), indexSettings);
|
||||
this(existing.clone(), indexSettings);
|
||||
}
|
||||
|
||||
private FieldNamesFieldMapper(MappedFieldType fieldType, Settings indexSettings) {
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.lucene.Lucene;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -79,7 +80,8 @@ public class IdFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new IdFieldMapper(indexSettings, fieldType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -85,7 +84,8 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new IndexFieldMapper(indexSettings, fieldType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns true if the "_all" field is enabled for the type
|
||||
* Returns true if the "_all" field is enabled on any type.
|
||||
*/
|
||||
public boolean allEnabled() {
|
||||
return this.allEnabled;
|
||||
|
@ -377,7 +377,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
this.hasNested = hasNested;
|
||||
this.fullPathObjectMappers = fullPathObjectMappers;
|
||||
this.parentTypes = parentTypes;
|
||||
this.allEnabled = mapper.allFieldMapper().enabled();
|
||||
// this is only correct because types cannot be removed and we do not
|
||||
// allow to disable an existing _all field
|
||||
this.allEnabled |= mapper.allFieldMapper().enabled();
|
||||
|
||||
assert assertSerialization(newMapper);
|
||||
assert assertMappersShareSameFieldType();
|
||||
|
|
|
@ -39,14 +39,13 @@ public abstract class MetadataFieldMapper extends FieldMapper {
|
|||
* Get the default {@link MetadataFieldMapper} to use, if nothing had to be parsed.
|
||||
* @param fieldType null if this is the first root mapper on this index, the existing
|
||||
* fieldType for this index otherwise
|
||||
* @param indexSettings the index-level settings
|
||||
* @param fieldType the existing field type for this meta mapper on the current index
|
||||
* or null if this is the first type being introduced
|
||||
* @param typeName the name of the type that this mapper will be used on
|
||||
* @param parserContext context that may be useful to build the field like analyzers
|
||||
*/
|
||||
// TODO: remove the fieldType parameter which is only used for bw compat with pre-2.0
|
||||
// since settings could be modified
|
||||
MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName);
|
||||
MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext parserContext);
|
||||
}
|
||||
|
||||
public abstract static class Builder<T extends Builder, Y extends MetadataFieldMapper> extends FieldMapper.Builder<T, Y> {
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
*/
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.SortedDocValuesField;
|
||||
import org.apache.lucene.index.DocValuesType;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
|
@ -131,7 +130,9 @@ public class ParentFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
final String typeName = context.type();
|
||||
KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));
|
||||
MappedFieldType childJoinFieldType = new ParentFieldType(Defaults.FIELD_TYPE, typeName);
|
||||
childJoinFieldType.setName(ParentFieldMapper.NAME);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -78,7 +79,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder(parserContext.mapperService().fullName(NAME));
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
Map.Entry<String, Object> entry = iterator.next();
|
||||
|
@ -93,8 +94,14 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
return new RoutingFieldMapper(indexSettings, fieldType);
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
if (fieldType != null) {
|
||||
return new RoutingFieldMapper(indexSettings, fieldType);
|
||||
} else {
|
||||
return parse(NAME, Collections.emptyMap(), context)
|
||||
.build(new BuilderContext(indexSettings, new ContentPath(1)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,7 +128,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
|
|||
private boolean required;
|
||||
|
||||
private RoutingFieldMapper(Settings indexSettings, MappedFieldType existing) {
|
||||
this(existing == null ? Defaults.FIELD_TYPE.clone() : existing.clone(), Defaults.REQUIRED, indexSettings);
|
||||
this(existing.clone(), Defaults.REQUIRED, indexSettings);
|
||||
}
|
||||
|
||||
private RoutingFieldMapper(MappedFieldType fieldType, boolean required, Settings indexSettings) {
|
||||
|
|
|
@ -48,6 +48,7 @@ import org.elasticsearch.index.mapper.Mapper;
|
|||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.Mapper.TypeParser.ParserContext;
|
||||
import org.elasticsearch.index.mapper.ParseContext.Document;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
|
@ -136,7 +137,8 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new SeqNoFieldMapper(indexSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StoredField;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
|
@ -109,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
Builder builder = new Builder();
|
||||
|
||||
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
|
||||
|
@ -144,7 +143,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new SourceFieldMapper(indexSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,12 +76,13 @@ public class TypeFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
public static class TypeParser implements MetadataFieldMapper.TypeParser {
|
||||
@Override
|
||||
public MetadataFieldMapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
public MetadataFieldMapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
throw new MapperParsingException(NAME + " is not configurable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new TypeFieldMapper(indexSettings, fieldType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,8 @@ public class UidFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new UidFieldMapper(indexSettings, fieldType);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,7 +62,8 @@ public class VersionFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
|
||||
@Override
|
||||
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
|
||||
public MetadataFieldMapper getDefault(MappedFieldType fieldType, ParserContext context) {
|
||||
final Settings indexSettings = context.mapperService().getIndexSettings().getSettings();
|
||||
return new VersionFieldMapper(indexSettings);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -182,12 +182,6 @@ public class MergeStats implements Streamable, ToXContent {
|
|||
return new ByteSizeValue(currentSizeInBytes);
|
||||
}
|
||||
|
||||
public static MergeStats readMergeStats(StreamInput in) throws IOException {
|
||||
MergeStats stats = new MergeStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.MERGES);
|
||||
|
|
|
@ -19,11 +19,9 @@
|
|||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -33,9 +31,7 @@ import org.elasticsearch.script.ExecutableScript;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptSettings;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
|
@ -48,19 +44,17 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier {
|
|||
protected final IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
protected final Client client;
|
||||
protected final IndexReader reader;
|
||||
protected final ClusterState clusterState;
|
||||
protected final LongSupplier nowInMillis;
|
||||
|
||||
public QueryRewriteContext(IndexSettings indexSettings, MapperService mapperService, ScriptService scriptService,
|
||||
IndicesQueriesRegistry indicesQueriesRegistry, Client client, IndexReader reader,
|
||||
ClusterState clusterState, LongSupplier nowInMillis) {
|
||||
LongSupplier nowInMillis) {
|
||||
this.mapperService = mapperService;
|
||||
this.scriptService = scriptService;
|
||||
this.indexSettings = indexSettings;
|
||||
this.indicesQueriesRegistry = indicesQueriesRegistry;
|
||||
this.client = client;
|
||||
this.reader = reader;
|
||||
this.clusterState = clusterState;
|
||||
this.nowInMillis = nowInMillis;
|
||||
}
|
||||
|
||||
|
@ -98,13 +92,6 @@ public class QueryRewriteContext implements ParseFieldMatcherSupplier {
|
|||
return this.indexSettings.getParseFieldMatcher();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the cluster state as is when the operation started.
|
||||
*/
|
||||
public ClusterState getClusterState() {
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link QueryParseContext} that wraps the provided parser, using the ParseFieldMatcher settings that
|
||||
* are configured in the index settings. The default script language will always default to Painless.
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.similarities.Similarity;
|
|||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -101,8 +100,8 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
public QueryShardContext(int shardId, IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache,
|
||||
IndexFieldDataService indexFieldDataService, MapperService mapperService, SimilarityService similarityService,
|
||||
ScriptService scriptService, final IndicesQueriesRegistry indicesQueriesRegistry, Client client,
|
||||
IndexReader reader, ClusterState clusterState, LongSupplier nowInMillis) {
|
||||
super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, clusterState, nowInMillis);
|
||||
IndexReader reader, LongSupplier nowInMillis) {
|
||||
super(indexSettings, mapperService, scriptService, indicesQueriesRegistry, client, reader, nowInMillis);
|
||||
this.shardId = shardId;
|
||||
this.indexSettings = indexSettings;
|
||||
this.similarityService = similarityService;
|
||||
|
@ -118,7 +117,7 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
public QueryShardContext(QueryShardContext source) {
|
||||
this(source.shardId, source.indexSettings, source.bitsetFilterCache, source.indexFieldDataService, source.mapperService,
|
||||
source.similarityService, source.scriptService, source.indicesQueriesRegistry, source.client,
|
||||
source.reader, source.clusterState, source.nowInMillis);
|
||||
source.reader, source.nowInMillis);
|
||||
this.types = source.getTypes();
|
||||
}
|
||||
|
||||
|
|
|
@ -425,6 +425,7 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder<FunctionScor
|
|||
newQueryBuilder.scoreMode = scoreMode;
|
||||
newQueryBuilder.minScore = minScore;
|
||||
newQueryBuilder.maxBoost = maxBoost;
|
||||
newQueryBuilder.boostMode = boostMode;
|
||||
return newQueryBuilder;
|
||||
}
|
||||
return this;
|
||||
|
|
|
@ -81,12 +81,6 @@ public class RefreshStats implements Streamable, ToXContent {
|
|||
return new TimeValue(totalTimeInMillis);
|
||||
}
|
||||
|
||||
public static RefreshStats readRefreshStats(StreamInput in) throws IOException {
|
||||
RefreshStats refreshStats = new RefreshStats();
|
||||
refreshStats.readFrom(in);
|
||||
return refreshStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.REFRESH);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.search.stats;
|
||||
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -32,7 +33,7 @@ import java.io.IOException;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class SearchStats implements Streamable, ToXContent {
|
||||
public class SearchStats extends ToXContentToBytes implements Streamable {
|
||||
|
||||
public static class Stats implements Streamable, ToXContent {
|
||||
|
||||
|
@ -338,22 +339,12 @@ public class SearchStats implements Streamable, ToXContent {
|
|||
static final String SUGGEST_CURRENT = "suggest_current";
|
||||
}
|
||||
|
||||
public static SearchStats readSearchStats(StreamInput in) throws IOException {
|
||||
SearchStats searchStats = new SearchStats();
|
||||
searchStats.readFrom(in);
|
||||
return searchStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
totalStats = Stats.readStats(in);
|
||||
openContexts = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
int size = in.readVInt();
|
||||
groupStats = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
groupStats.put(in.readString(), Stats.readStats(in));
|
||||
}
|
||||
groupStats = in.readMap(StreamInput::readString, Stats::readStats);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -365,24 +356,7 @@ public class SearchStats implements Streamable, ToXContent {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeVInt(groupStats.size());
|
||||
for (Map.Entry<String, Stats> entry : groupStats.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
|
||||
builder.startObject();
|
||||
toXContent(builder, EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
return builder.string();
|
||||
} catch (IOException e) {
|
||||
return "{ \"error\" : \"" + e.getMessage() + "\"}";
|
||||
out.writeMap(groupStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -149,12 +149,14 @@ public class GlobalCheckpointService extends AbstractIndexShardComponent {
|
|||
* updates the global checkpoint on a replica shard (after it has been updated by the primary).
|
||||
*/
|
||||
synchronized void updateCheckpointOnReplica(long globalCheckpoint) {
|
||||
/*
|
||||
* The global checkpoint here is a local knowledge which is updated under the mandate of the primary. It can happen that the primary
|
||||
* information is lagging compared to a replica (e.g., if a replica is promoted to primary but has stale info relative to other
|
||||
* replica shards). In these cases, the local knowledge of the global checkpoint could be higher than sync from the lagging primary.
|
||||
*/
|
||||
if (this.globalCheckpoint <= globalCheckpoint) {
|
||||
this.globalCheckpoint = globalCheckpoint;
|
||||
logger.trace("global checkpoint updated from primary to [{}]", globalCheckpoint);
|
||||
} else {
|
||||
throw new IllegalArgumentException("global checkpoint from primary should never decrease. current [" +
|
||||
this.globalCheckpoint + "], got [" + globalCheckpoint + "]");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -57,12 +57,6 @@ public class DocsStats implements Streamable, ToXContent {
|
|||
return this.deleted;
|
||||
}
|
||||
|
||||
public static DocsStats readDocStats(StreamInput in) throws IOException {
|
||||
DocsStats docsStats = new DocsStats();
|
||||
docsStats.readFrom(in);
|
||||
return docsStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
count = in.readVLong();
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason;
|
||||
|
||||
/**
|
||||
* An index event listener is the primary extension point for plugins and build-in services
|
||||
|
@ -103,31 +105,32 @@ public interface IndexEventListener {
|
|||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before the index shard gets created.
|
||||
*/
|
||||
default void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) {
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Called before the index get closed.
|
||||
*
|
||||
* @param indexService The index service
|
||||
* @param reason the reason for index removal
|
||||
*/
|
||||
default void beforeIndexClosed(IndexService indexService) {
|
||||
default void beforeIndexRemoved(IndexService indexService, IndexRemovalReason reason) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the index has been closed.
|
||||
* Called after the index has been removed.
|
||||
*
|
||||
* @param index The index
|
||||
* @param reason the reason for index removal
|
||||
*/
|
||||
default void afterIndexClosed(Index index, Settings indexSettings) {
|
||||
default void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRemovalReason reason) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before the index shard gets created.
|
||||
*/
|
||||
default void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before the index shard gets deleted from disk
|
||||
* Note: this method is only executed on the first attempt of deleting the shard. Retries are will not invoke
|
||||
|
@ -149,28 +152,6 @@ public interface IndexEventListener {
|
|||
default void afterIndexShardDeleted(ShardId shardId, Settings indexSettings) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Called after the index has been deleted.
|
||||
* This listener method is invoked after {@link #afterIndexClosed(org.elasticsearch.index.Index, org.elasticsearch.common.settings.Settings)}
|
||||
* when an index is deleted
|
||||
*
|
||||
* @param index The index
|
||||
*/
|
||||
default void afterIndexDeleted(Index index, Settings indexSettings) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called before the index gets deleted.
|
||||
* This listener method is invoked after
|
||||
* {@link #beforeIndexClosed(org.elasticsearch.index.IndexService)} when an index is deleted
|
||||
*
|
||||
* @param indexService The index service
|
||||
*/
|
||||
default void beforeIndexDeleted(IndexService indexService) {
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Called on the Master node only before the {@link IndexService} instances is created to simulate an index creation.
|
||||
* This happens right before the index and it's metadata is registered in the cluster state
|
||||
|
|
|
@ -1612,7 +1612,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
});
|
||||
} else {
|
||||
final Exception e;
|
||||
final RuntimeException e;
|
||||
if (numShards == -1) {
|
||||
e = new IndexNotFoundException(mergeSourceIndex);
|
||||
} else {
|
||||
|
@ -1620,7 +1620,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
+ " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard "
|
||||
+ shardId());
|
||||
}
|
||||
recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true);
|
||||
throw e;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -143,11 +143,7 @@ public class IndexingStats implements Streamable, ToXContent {
|
|||
indexCount = in.readVLong();
|
||||
indexTimeInMillis = in.readVLong();
|
||||
indexCurrent = in.readVLong();
|
||||
|
||||
if(in.getVersion().onOrAfter(Version.V_2_1_0)){
|
||||
indexFailedCount = in.readVLong();
|
||||
}
|
||||
|
||||
indexFailedCount = in.readVLong();
|
||||
deleteCount = in.readVLong();
|
||||
deleteTimeInMillis = in.readVLong();
|
||||
deleteCurrent = in.readVLong();
|
||||
|
@ -161,11 +157,7 @@ public class IndexingStats implements Streamable, ToXContent {
|
|||
out.writeVLong(indexCount);
|
||||
out.writeVLong(indexTimeInMillis);
|
||||
out.writeVLong(indexCurrent);
|
||||
|
||||
if(out.getVersion().onOrAfter(Version.V_2_1_0)) {
|
||||
out.writeVLong(indexFailedCount);
|
||||
}
|
||||
|
||||
out.writeVLong(indexFailedCount);
|
||||
out.writeVLong(deleteCount);
|
||||
out.writeVLong(deleteTimeInMillis);
|
||||
out.writeVLong(deleteCurrent);
|
||||
|
@ -283,21 +275,11 @@ public class IndexingStats implements Streamable, ToXContent {
|
|||
static final String THROTTLED_TIME = "throttle_time";
|
||||
}
|
||||
|
||||
public static IndexingStats readIndexingStats(StreamInput in) throws IOException {
|
||||
IndexingStats indexingStats = new IndexingStats();
|
||||
indexingStats.readFrom(in);
|
||||
return indexingStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
totalStats = Stats.readStats(in);
|
||||
if (in.readBoolean()) {
|
||||
int size = in.readVInt();
|
||||
typeStats = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
typeStats.put(in.readString(), Stats.readStats(in));
|
||||
}
|
||||
typeStats = in.readMap(StreamInput::readString, Stats::readStats);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -308,11 +290,7 @@ public class IndexingStats implements Streamable, ToXContent {
|
|||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeVInt(typeStats.size());
|
||||
for (Map.Entry<String, Stats> entry : typeStats.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
out.writeMap(typeStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -65,12 +65,6 @@ public class StoreStats implements Streamable, ToXContent {
|
|||
return size();
|
||||
}
|
||||
|
||||
public static StoreStats readStoreStats(StreamInput in) throws IOException {
|
||||
StoreStats store = new StoreStats();
|
||||
store.readFrom(in);
|
||||
return store;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
sizeInBytes = in.readVLong();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue