Merge branch 'master' into rankeval
This commit is contained in:
commit
35688f6441
|
@ -141,7 +141,7 @@ Please follow these formatting guidelines:
|
||||||
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
* Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
|
||||||
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
* Wildcard imports (`import foo.bar.baz.*`) are forbidden and will cause the build to fail. Please attempt to tame your IDE so it doesn't make them and please send a PR against this document with instructions for your IDE if it doesn't contain them.
|
||||||
* Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
* Eclipse: `Preferences->Java->Code Style->Organize Imports`. There are two boxes labeled "`Number of (static )? imports needed for .*`". Set their values to 99999 or some other absurdly high value.
|
||||||
* IntelliJ: `Preferences->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
|
* IntelliJ: `Preferences/Settings->Editor->Code Style->Java->Imports`. There are two configuration options: `Class count to use import with '*'` and `Names count to use static import with '*'`. Set their values to 99999 or some other absurdly high value.
|
||||||
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
* Don't worry too much about import order. Try not to change it but don't worry about fighting your IDE to stop it from doing so.
|
||||||
|
|
||||||
To create a distribution from the source, simply run:
|
To create a distribution from the source, simply run:
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
elasticsearch = 7.0.0-alpha1
|
elasticsearch = 7.0.0-alpha1
|
||||||
lucene = 7.1.0
|
lucene = 7.2.0-snapshot-8c94404
|
||||||
|
|
||||||
# optional dependencies
|
# optional dependencies
|
||||||
spatial4j = 0.6
|
spatial4j = 0.6
|
||||||
|
|
|
@ -38,6 +38,8 @@ public abstract class Command implements Closeable {
|
||||||
/** A description of the command, used in the help output. */
|
/** A description of the command, used in the help output. */
|
||||||
protected final String description;
|
protected final String description;
|
||||||
|
|
||||||
|
private final Runnable beforeMain;
|
||||||
|
|
||||||
/** The option parser for this command. */
|
/** The option parser for this command. */
|
||||||
protected final OptionParser parser = new OptionParser();
|
protected final OptionParser parser = new OptionParser();
|
||||||
|
|
||||||
|
@ -46,8 +48,15 @@ public abstract class Command implements Closeable {
|
||||||
private final OptionSpec<Void> verboseOption =
|
private final OptionSpec<Void> verboseOption =
|
||||||
parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output").availableUnless(silentOption);
|
parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output").availableUnless(silentOption);
|
||||||
|
|
||||||
public Command(String description) {
|
/**
|
||||||
|
* Construct the command with the specified command description and runnable to execute before main is invoked.
|
||||||
|
*
|
||||||
|
* @param description the command description
|
||||||
|
* @param beforeMain the before-main runnable
|
||||||
|
*/
|
||||||
|
public Command(final String description, final Runnable beforeMain) {
|
||||||
this.description = description;
|
this.description = description;
|
||||||
|
this.beforeMain = beforeMain;
|
||||||
}
|
}
|
||||||
|
|
||||||
private Thread shutdownHookThread;
|
private Thread shutdownHookThread;
|
||||||
|
@ -75,7 +84,7 @@ public abstract class Command implements Closeable {
|
||||||
Runtime.getRuntime().addShutdownHook(shutdownHookThread);
|
Runtime.getRuntime().addShutdownHook(shutdownHookThread);
|
||||||
}
|
}
|
||||||
|
|
||||||
beforeExecute();
|
beforeMain.run();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
mainWithoutErrorHandling(args, terminal);
|
mainWithoutErrorHandling(args, terminal);
|
||||||
|
@ -93,12 +102,6 @@ public abstract class Command implements Closeable {
|
||||||
return ExitCodes.OK;
|
return ExitCodes.OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Setup method to be executed before parsing or execution of the command being run. Any exceptions thrown by the
|
|
||||||
* method will not be cleanly caught by the parser.
|
|
||||||
*/
|
|
||||||
protected void beforeExecute() {}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Executes the command, but all errors are thrown.
|
* Executes the command, but all errors are thrown.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -35,8 +35,14 @@ public class MultiCommand extends Command {
|
||||||
|
|
||||||
private final NonOptionArgumentSpec<String> arguments = parser.nonOptions("command");
|
private final NonOptionArgumentSpec<String> arguments = parser.nonOptions("command");
|
||||||
|
|
||||||
public MultiCommand(String description) {
|
/**
|
||||||
super(description);
|
* Construct the multi-command with the specified command description and runnable to execute before main is invoked.
|
||||||
|
*
|
||||||
|
* @param description the multi-command description
|
||||||
|
* @param beforeMain the before-main runnable
|
||||||
|
*/
|
||||||
|
public MultiCommand(final String description, final Runnable beforeMain) {
|
||||||
|
super(description, beforeMain);
|
||||||
parser.posixlyCorrect(true);
|
parser.posixlyCorrect(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
a508bf6b580471ee568dab7d2acfedfa5aadce70
|
|
|
@ -0,0 +1 @@
|
||||||
|
4c515e5152e6938129a5e97c5afb5b3b360faed3
|
|
@ -1 +0,0 @@
|
||||||
804a7ce82bba3d085733486bfde4846ecb77ce01
|
|
|
@ -0,0 +1 @@
|
||||||
|
406c6cc0f8c2a47d42a1e343eaf2ad939fee905c
|
|
@ -1 +0,0 @@
|
||||||
dd291b7ebf4845483895724d2562214dc7f40049
|
|
|
@ -0,0 +1 @@
|
||||||
|
4c93f7fbc6e0caf87f7948b8481d80e0167133bf
|
|
@ -1 +0,0 @@
|
||||||
0732d16c16421fca058a2a07ca4081ec7696365b
|
|
|
@ -0,0 +1 @@
|
||||||
|
b078ca50c6d579085c7755b4fd8de60711964dcc
|
|
@ -1 +0,0 @@
|
||||||
596550daabae765ad685112e0fe7c4f0fdfccb3f
|
|
|
@ -0,0 +1 @@
|
||||||
|
fc5e61c8879f22b65ee053f1665bc9f13af79c1d
|
|
@ -1 +0,0 @@
|
||||||
5f26dd64c195258a81175772ef7fe105e7d60a26
|
|
|
@ -0,0 +1 @@
|
||||||
|
9a10839d3dfe7b369f0af8a78a630ee4d82e678e
|
|
@ -1 +0,0 @@
|
||||||
3ef64c58d0c09ca40d848efa96b585b7476271f2
|
|
|
@ -0,0 +1 @@
|
||||||
|
d45f2f51cf6f47a66ecafddecb83c1e08eb4061f
|
|
@ -1 +0,0 @@
|
||||||
1496ee5fa62206ee5ddf51042a340d6a9ee3b5de
|
|
|
@ -0,0 +1 @@
|
||||||
|
19cb7362be57104ad891259060af80fb4679e92c
|
|
@ -1 +0,0 @@
|
||||||
1554920ab207a3245fa408d022a5c90ad3a1fea3
|
|
|
@ -0,0 +1 @@
|
||||||
|
ae24737048d95f56d0099fea77498324412eef50
|
|
@ -1 +0,0 @@
|
||||||
5767c15c5ee97926829fd8a4337e434fa95f3c08
|
|
|
@ -0,0 +1 @@
|
||||||
|
a9d3422c9a72106026c19a8f76a4f4e62159ff5c
|
|
@ -1 +0,0 @@
|
||||||
691f7b9ac05f3ad2ac7e80733ef70247904bd3ae
|
|
|
@ -0,0 +1 @@
|
||||||
|
66433006587ede3e01899fd6f5e55c8378032c2f
|
|
@ -1 +0,0 @@
|
||||||
6c64c04d802badb800516a8a574cb993929c3805
|
|
|
@ -0,0 +1 @@
|
||||||
|
b6b3082ba845f7bd41641b015624f46d4f20afb6
|
|
@ -1 +0,0 @@
|
||||||
3f1bc1aada8f06b176b782da24b9d7ad9641c41a
|
|
|
@ -0,0 +1 @@
|
||||||
|
7757cac49cb3e9f1a219346ce95fb80f61f7090e
|
|
@ -1 +0,0 @@
|
||||||
8ded650aed23efb775f17be496e3e3870214e23b
|
|
|
@ -0,0 +1 @@
|
||||||
|
92991fdcd185883050d9530ccc0d863b7a08e99c
|
|
@ -1 +0,0 @@
|
||||||
8d0ed1589ebdccf34e888c6efc0134a13a238c85
|
|
|
@ -0,0 +1 @@
|
||||||
|
fb6a94b833a23a17e3721ea2f9679ad770dec48b
|
|
@ -93,6 +93,15 @@ public final class MinDocQuery extends Query {
|
||||||
final DocIdSetIterator disi = new MinDocIterator(segmentMinDoc, maxDoc);
|
final DocIdSetIterator disi = new MinDocIterator(segmentMinDoc, maxDoc);
|
||||||
return new ConstantScoreScorer(this, score(), disi);
|
return new ConstantScoreScorer(this, score(), disi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
// Let's not cache this query, the cached iterator would use more memory
|
||||||
|
// and be slower anyway.
|
||||||
|
// Also, matches in a given segment depend on the other segments, which
|
||||||
|
// makes it a bad candidate for per-segment caching.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,9 +35,7 @@ import org.apache.lucene.search.SortField;
|
||||||
import org.apache.lucene.search.Weight;
|
import org.apache.lucene.search.Weight;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -90,6 +88,14 @@ public class SearchAfterSortedDocQuery extends Query {
|
||||||
final DocIdSetIterator disi = new MinDocQuery.MinDocIterator(firstDoc, maxDoc);
|
final DocIdSetIterator disi = new MinDocQuery.MinDocIterator(firstDoc, maxDoc);
|
||||||
return new ConstantScoreScorer(this, score(), disi);
|
return new ConstantScoreScorer(this, score(), disi);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
// If the sort order includes _doc, then the matches in a segment
|
||||||
|
// may depend on other segments, which makes this query a bad
|
||||||
|
// candidate for caching
|
||||||
|
return false;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -137,7 +137,7 @@ public class Version implements Comparable<Version> {
|
||||||
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
||||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||||
public static final Version V_7_0_0_alpha1 =
|
public static final Version V_7_0_0_alpha1 =
|
||||||
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
|
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_2_0);
|
||||||
public static final Version CURRENT = V_7_0_0_alpha1;
|
public static final Version CURRENT = V_7_0_0_alpha1;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
|
|
@ -33,8 +33,10 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.get.GetResult;
|
import org.elasticsearch.index.get.GetResult;
|
||||||
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.search.SearchService;
|
import org.elasticsearch.search.SearchService;
|
||||||
import org.elasticsearch.search.internal.AliasFilter;
|
import org.elasticsearch.search.internal.AliasFilter;
|
||||||
|
@ -86,6 +88,19 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void asyncShardOperation(ExplainRequest request, ShardId shardId, ActionListener<ExplainResponse> listener) throws IOException {
|
||||||
|
IndexService indexService = searchService.getIndicesService().indexServiceSafe(shardId.getIndex());
|
||||||
|
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||||
|
indexShard.awaitShardSearchActive(b -> {
|
||||||
|
try {
|
||||||
|
super.asyncShardOperation(request, shardId, listener);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
listener.onFailure(ex);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
|
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) throws IOException {
|
||||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
|
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
|
||||||
|
|
|
@ -19,13 +19,13 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.get;
|
package org.elasticsearch.action.get;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.RoutingMissingException;
|
import org.elasticsearch.action.RoutingMissingException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.routing.Preference;
|
|
||||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
|
@ -38,6 +38,8 @@ import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs the get operation.
|
* Performs the get operation.
|
||||||
*/
|
*/
|
||||||
|
@ -76,6 +78,23 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void asyncShardOperation(GetRequest request, ShardId shardId, ActionListener<GetResponse> listener) throws IOException {
|
||||||
|
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
|
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||||
|
if (request.realtime()) { // we are not tied to a refresh cycle here anyway
|
||||||
|
listener.onResponse(shardOperation(request, shardId));
|
||||||
|
} else {
|
||||||
|
indexShard.awaitShardSearchActive(b -> {
|
||||||
|
try {
|
||||||
|
super.asyncShardOperation(request, shardId, listener);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
listener.onFailure(ex);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected GetResponse shardOperation(GetRequest request, ShardId shardId) {
|
protected GetResponse shardOperation(GetRequest request, ShardId shardId) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportChannel;
|
import org.elasticsearch.transport.TransportChannel;
|
||||||
|
@ -47,6 +48,8 @@ import org.elasticsearch.transport.TransportResponseHandler;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
|
import java.util.concurrent.Executor;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
|
import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException;
|
||||||
|
@ -78,7 +81,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||||
if (!isSubAction()) {
|
if (!isSubAction()) {
|
||||||
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());
|
transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler());
|
||||||
}
|
}
|
||||||
transportService.registerRequestHandler(transportShardAction, request, executor, new ShardTransportHandler());
|
transportService.registerRequestHandler(transportShardAction, request, ThreadPool.Names.SAME, new ShardTransportHandler());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -97,6 +100,19 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||||
|
|
||||||
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
|
protected abstract Response shardOperation(Request request, ShardId shardId) throws IOException;
|
||||||
|
|
||||||
|
protected void asyncShardOperation(Request request, ShardId shardId, ActionListener<Response> listener) throws IOException {
|
||||||
|
threadPool.executor(this.executor).execute(new AbstractRunnable() {
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
listener.onFailure(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doRun() throws Exception {
|
||||||
|
listener.onResponse(shardOperation(request, shardId));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
protected abstract Response newResponse();
|
protected abstract Response newResponse();
|
||||||
|
|
||||||
protected abstract boolean resolveIndex(Request request);
|
protected abstract boolean resolveIndex(Request request);
|
||||||
|
@ -291,11 +307,27 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("executing [{}] on shard [{}]", request, request.internalShardId);
|
logger.trace("executing [{}] on shard [{}]", request, request.internalShardId);
|
||||||
}
|
}
|
||||||
Response response = shardOperation(request, request.internalShardId);
|
asyncShardOperation(request, request.internalShardId, new ActionListener<Response>() {
|
||||||
channel.sendResponse(response);
|
@Override
|
||||||
|
public void onResponse(Response response) {
|
||||||
|
try {
|
||||||
|
channel.sendResponse(response);
|
||||||
|
} catch (IOException e) {
|
||||||
|
onFailure(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
try {
|
||||||
|
channel.sendResponse(e);
|
||||||
|
} catch (IOException e1) {
|
||||||
|
throw new UncheckedIOException(e1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Internal request class that gets built on each node. Holds the original request plus additional info.
|
* Internal request class that gets built on each node. Holds the original request plus additional info.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.termvectors;
|
package org.elasticsearch.action.termvectors;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.RoutingMissingException;
|
import org.elasticsearch.action.RoutingMissingException;
|
||||||
import org.elasticsearch.action.support.ActionFilters;
|
import org.elasticsearch.action.support.ActionFilters;
|
||||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||||
|
@ -37,6 +38,8 @@ import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs the get operation.
|
* Performs the get operation.
|
||||||
*/
|
*/
|
||||||
|
@ -82,6 +85,23 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void asyncShardOperation(TermVectorsRequest request, ShardId shardId, ActionListener<TermVectorsResponse> listener) throws IOException {
|
||||||
|
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
|
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||||
|
if (request.realtime()) { // it's a realtime request which is not subject to refresh cycles
|
||||||
|
listener.onResponse(shardOperation(request, shardId));
|
||||||
|
} else {
|
||||||
|
indexShard.awaitShardSearchActive(b -> {
|
||||||
|
try {
|
||||||
|
super.asyncShardOperation(request, shardId, listener);
|
||||||
|
} catch (Exception ex) {
|
||||||
|
listener.onFailure(ex);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) {
|
protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) {
|
||||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||||
|
|
|
@ -38,6 +38,7 @@ import java.io.BufferedReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
import java.security.AllPermission;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -210,6 +211,7 @@ final class BootstrapChecks {
|
||||||
checks.add(new OnOutOfMemoryErrorCheck());
|
checks.add(new OnOutOfMemoryErrorCheck());
|
||||||
checks.add(new EarlyAccessCheck());
|
checks.add(new EarlyAccessCheck());
|
||||||
checks.add(new G1GCCheck());
|
checks.add(new G1GCCheck());
|
||||||
|
checks.add(new AllPermissionCheck());
|
||||||
return Collections.unmodifiableList(checks);
|
return Collections.unmodifiableList(checks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -692,4 +694,27 @@ final class BootstrapChecks {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static class AllPermissionCheck implements BootstrapCheck {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final BootstrapCheckResult check(BootstrapContext context) {
|
||||||
|
if (isAllPermissionGranted()) {
|
||||||
|
return BootstrapCheck.BootstrapCheckResult.failure("granting the all permission effectively disables security");
|
||||||
|
}
|
||||||
|
return BootstrapCheckResult.success();
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean isAllPermissionGranted() {
|
||||||
|
final SecurityManager sm = System.getSecurityManager();
|
||||||
|
assert sm != null;
|
||||||
|
try {
|
||||||
|
sm.checkPermission(new AllPermission());
|
||||||
|
} catch (final SecurityException e) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,7 +51,7 @@ class Elasticsearch extends EnvironmentAwareCommand {
|
||||||
|
|
||||||
// visible for testing
|
// visible for testing
|
||||||
Elasticsearch() {
|
Elasticsearch() {
|
||||||
super("starts elasticsearch");
|
super("starts elasticsearch", () -> {}); // we configure logging later so we override the base class from configuring logging
|
||||||
versionOption = parser.acceptsAll(Arrays.asList("V", "version"),
|
versionOption = parser.acceptsAll(Arrays.asList("V", "version"),
|
||||||
"Prints elasticsearch version information and exits");
|
"Prints elasticsearch version information and exits");
|
||||||
daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"),
|
daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"),
|
||||||
|
@ -92,15 +92,6 @@ class Elasticsearch extends EnvironmentAwareCommand {
|
||||||
return elasticsearch.main(args, terminal);
|
return elasticsearch.main(args, terminal);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected boolean shouldConfigureLoggingWithoutConfig() {
|
|
||||||
/*
|
|
||||||
* If we allow logging to be configured without a config before we are ready to read the log4j2.properties file, then we will fail
|
|
||||||
* to detect uses of logging before it is properly configured.
|
|
||||||
*/
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void execute(Terminal terminal, OptionSet options, Environment env) throws UserException {
|
protected void execute(Terminal terminal, OptionSet options, Environment env) throws UserException {
|
||||||
if (options.nonOptionArguments().isEmpty() == false) {
|
if (options.nonOptionArguments().isEmpty() == false) {
|
||||||
|
|
|
@ -65,12 +65,10 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// visible for testing
|
|
||||||
static boolean isFatalUncaught(Throwable e) {
|
static boolean isFatalUncaught(Throwable e) {
|
||||||
return e instanceof Error;
|
return e instanceof Error;
|
||||||
}
|
}
|
||||||
|
|
||||||
// visible for testing
|
|
||||||
void onFatalUncaught(final String threadName, final Throwable t) {
|
void onFatalUncaught(final String threadName, final Throwable t) {
|
||||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||||
logger.error(
|
logger.error(
|
||||||
|
@ -78,24 +76,32 @@ class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionH
|
||||||
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
() -> new ParameterizedMessage("fatal error in thread [{}], exiting", threadName), t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// visible for testing
|
|
||||||
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
||||||
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
final Logger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||||
logger.warn((org.apache.logging.log4j.util.Supplier<?>)
|
logger.warn((org.apache.logging.log4j.util.Supplier<?>)
|
||||||
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
() -> new ParameterizedMessage("uncaught exception in thread [{}]", threadName), t);
|
||||||
}
|
}
|
||||||
|
|
||||||
// visible for testing
|
|
||||||
void halt(int status) {
|
void halt(int status) {
|
||||||
AccessController.doPrivileged(new PrivilegedAction<Void>() {
|
AccessController.doPrivileged(new PrivilegedHaltAction(status));
|
||||||
@SuppressForbidden(reason = "halt")
|
}
|
||||||
@Override
|
|
||||||
public Void run() {
|
static class PrivilegedHaltAction implements PrivilegedAction<Void> {
|
||||||
// we halt to prevent shutdown hooks from running
|
|
||||||
Runtime.getRuntime().halt(status);
|
private final int status;
|
||||||
return null;
|
|
||||||
}
|
private PrivilegedHaltAction(final int status) {
|
||||||
});
|
this.status = status;
|
||||||
|
}
|
||||||
|
|
||||||
|
@SuppressForbidden(reason = "halt")
|
||||||
|
@Override
|
||||||
|
public Void run() {
|
||||||
|
// we halt to prevent shutdown hooks from running
|
||||||
|
Runtime.getRuntime().halt(status);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,11 @@ final class Security {
|
||||||
Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults));
|
Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults));
|
||||||
|
|
||||||
// enable security manager
|
// enable security manager
|
||||||
final String[] classesThatCanExit = new String[]{ElasticsearchUncaughtExceptionHandler.class.getName(), Command.class.getName()};
|
final String[] classesThatCanExit =
|
||||||
|
new String[]{
|
||||||
|
// SecureSM matches class names as regular expressions so we escape the $ that arises from the nested class name
|
||||||
|
ElasticsearchUncaughtExceptionHandler.PrivilegedHaltAction.class.getName().replace("$", "\\$"),
|
||||||
|
Command.class.getName()};
|
||||||
System.setSecurityManager(new SecureSM(classesThatCanExit));
|
System.setSecurityManager(new SecureSM(classesThatCanExit));
|
||||||
|
|
||||||
// do some basic tests
|
// do some basic tests
|
||||||
|
|
|
@ -0,0 +1,43 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.cli;
|
||||||
|
|
||||||
|
import org.apache.logging.log4j.Level;
|
||||||
|
import org.elasticsearch.common.logging.LogConfigurator;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Holder class for method to configure logging without Elasticsearch configuration files for use in CLI tools that will not read such
|
||||||
|
* files.
|
||||||
|
*/
|
||||||
|
final class CommandLoggingConfigurator {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configures logging without Elasticsearch configuration files based on the system property "es.logger.level" only. As such, any
|
||||||
|
* logging will be written to the console.
|
||||||
|
*/
|
||||||
|
static void configureLoggingWithoutConfig() {
|
||||||
|
// initialize default for es.logger.level because we will not read the log4j2.properties
|
||||||
|
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
|
||||||
|
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
|
||||||
|
LogConfigurator.configureWithoutConfig(settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -22,9 +22,7 @@ package org.elasticsearch.cli;
|
||||||
import joptsimple.OptionSet;
|
import joptsimple.OptionSet;
|
||||||
import joptsimple.OptionSpec;
|
import joptsimple.OptionSpec;
|
||||||
import joptsimple.util.KeyValuePair;
|
import joptsimple.util.KeyValuePair;
|
||||||
import org.apache.logging.log4j.Level;
|
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
import org.elasticsearch.common.logging.LogConfigurator;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.node.InternalSettingsPreparer;
|
import org.elasticsearch.node.InternalSettingsPreparer;
|
||||||
|
@ -40,8 +38,25 @@ public abstract class EnvironmentAwareCommand extends Command {
|
||||||
|
|
||||||
private final OptionSpec<KeyValuePair> settingOption;
|
private final OptionSpec<KeyValuePair> settingOption;
|
||||||
|
|
||||||
public EnvironmentAwareCommand(String description) {
|
/**
|
||||||
super(description);
|
* Construct the command with the specified command description. This command will have logging configured without reading Elasticsearch
|
||||||
|
* configuration files.
|
||||||
|
*
|
||||||
|
* @param description the command description
|
||||||
|
*/
|
||||||
|
public EnvironmentAwareCommand(final String description) {
|
||||||
|
this(description, CommandLoggingConfigurator::configureLoggingWithoutConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct the command with the specified command description and runnable to execute before main is invoked. Commands constructed
|
||||||
|
* with this constructor must take ownership of configuring logging.
|
||||||
|
*
|
||||||
|
* @param description the command description
|
||||||
|
* @param beforeMain the before-main runnable
|
||||||
|
*/
|
||||||
|
public EnvironmentAwareCommand(final String description, final Runnable beforeMain) {
|
||||||
|
super(description, beforeMain);
|
||||||
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
|
this.settingOption = parser.accepts("E", "Configure a setting").withRequiredArg().ofType(KeyValuePair.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,26 +119,6 @@ public abstract class EnvironmentAwareCommand extends Command {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
protected final void beforeExecute() {
|
|
||||||
if (shouldConfigureLoggingWithoutConfig()) {
|
|
||||||
// initialize default for es.logger.level because we will not read the log4j2.properties
|
|
||||||
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
|
|
||||||
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
|
|
||||||
LogConfigurator.configureWithoutConfig(settings);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Indicate whether or not logging should be configured without reading a log4j2.properties. Most commands should do this because we do
|
|
||||||
* not configure logging for CLI tools. Only commands that configure logging on their own should not do this.
|
|
||||||
*
|
|
||||||
* @return true if logging should be configured without reading a log4j2.properties file
|
|
||||||
*/
|
|
||||||
protected boolean shouldConfigureLoggingWithoutConfig() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Execute the command with the initialized {@link Environment}. */
|
/** Execute the command with the initialized {@link Environment}. */
|
||||||
protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception;
|
protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception;
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.cli;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A command that is aware of logging. This class should be preferred over the base {@link Command} class for any CLI tools that depend on
|
||||||
|
* core Elasticsearch as they could directly or indirectly touch classes that touch logging and as such logging needs to be configured.
|
||||||
|
*/
|
||||||
|
public abstract class LoggingAwareCommand extends Command {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct the command with the specified command description. This command will have logging configured without reading Elasticsearch
|
||||||
|
* configuration files.
|
||||||
|
*
|
||||||
|
* @param description the command description
|
||||||
|
*/
|
||||||
|
public LoggingAwareCommand(final String description) {
|
||||||
|
super(description, CommandLoggingConfigurator::configureLoggingWithoutConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.cli;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A multi-command that is aware of logging. This class should be preferred over the base {@link MultiCommand} class for any CLI tools that
|
||||||
|
* depend on core Elasticsearch as they could directly or indirectly touch classes that touch logging and as such logging needs to be
|
||||||
|
* configured.
|
||||||
|
*/
|
||||||
|
public abstract class LoggingAwareMultiCommand extends MultiCommand {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct the command with the specified command description. This command will have logging configured without reading Elasticsearch
|
||||||
|
* configuration files.
|
||||||
|
*
|
||||||
|
* @param description the command description
|
||||||
|
*/
|
||||||
|
public LoggingAwareMultiCommand(final String description) {
|
||||||
|
super(description, CommandLoggingConfigurator::configureLoggingWithoutConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -30,16 +30,19 @@ import org.apache.lucene.codecs.PostingsFormat;
|
||||||
import org.apache.lucene.document.LatLonDocValuesField;
|
import org.apache.lucene.document.LatLonDocValuesField;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
|
import org.apache.lucene.index.FilterLeafReader;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||||
import org.apache.lucene.index.IndexWriter;
|
import org.apache.lucene.index.IndexWriter;
|
||||||
import org.apache.lucene.index.IndexWriterConfig;
|
import org.apache.lucene.index.IndexWriterConfig;
|
||||||
|
import org.apache.lucene.index.LeafReader;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
import org.apache.lucene.index.NoMergePolicy;
|
import org.apache.lucene.index.NoMergePolicy;
|
||||||
import org.apache.lucene.index.SegmentCommitInfo;
|
import org.apache.lucene.index.SegmentCommitInfo;
|
||||||
import org.apache.lucene.index.SegmentInfos;
|
import org.apache.lucene.index.SegmentInfos;
|
||||||
|
import org.apache.lucene.index.SegmentReader;
|
||||||
import org.apache.lucene.search.DocIdSetIterator;
|
import org.apache.lucene.search.DocIdSetIterator;
|
||||||
import org.apache.lucene.search.Explanation;
|
import org.apache.lucene.search.Explanation;
|
||||||
import org.apache.lucene.search.FieldDoc;
|
import org.apache.lucene.search.FieldDoc;
|
||||||
|
@ -650,6 +653,21 @@ public class Lucene {
|
||||||
return LenientParser.parse(toParse, defaultValue);
|
return LenientParser.parse(toParse, defaultValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tries to extract a segment reader from the given index reader.
|
||||||
|
* If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
|
||||||
|
*/
|
||||||
|
public static SegmentReader segmentReader(LeafReader reader) {
|
||||||
|
if (reader instanceof SegmentReader) {
|
||||||
|
return (SegmentReader) reader;
|
||||||
|
} else if (reader instanceof FilterLeafReader) {
|
||||||
|
final FilterLeafReader fReader = (FilterLeafReader) reader;
|
||||||
|
return segmentReader(FilterLeafReader.unwrap(fReader));
|
||||||
|
}
|
||||||
|
// hard fail - we can't get a SegmentReader
|
||||||
|
throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
|
||||||
|
}
|
||||||
|
|
||||||
@SuppressForbidden(reason = "Version#parseLeniently() used in a central place")
|
@SuppressForbidden(reason = "Version#parseLeniently() used in a central place")
|
||||||
private static final class LenientParser {
|
private static final class LenientParser {
|
||||||
public static Version parse(String toParse, Version defaultValue) {
|
public static Version parse(String toParse, Version defaultValue) {
|
||||||
|
@ -675,10 +693,6 @@ public class Lucene {
|
||||||
throw new IllegalStateException(message);
|
throw new IllegalStateException(message);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
public int freq() throws IOException {
|
|
||||||
throw new IllegalStateException(message);
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public int docID() {
|
public int docID() {
|
||||||
throw new IllegalStateException(message);
|
throw new IllegalStateException(message);
|
||||||
}
|
}
|
||||||
|
|
|
@ -333,6 +333,13 @@ public class FunctionScoreQuery extends Query {
|
||||||
}
|
}
|
||||||
return expl;
|
return expl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
// If minScore is not null, then matches depend on statistics of the
|
||||||
|
// top-level reader.
|
||||||
|
return minScore == null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class FunctionFactorScorer extends FilterScorer {
|
static class FunctionFactorScorer extends FilterScorer {
|
||||||
|
|
|
@ -59,11 +59,6 @@ final class MinScoreScorer extends Scorer {
|
||||||
return in.score();
|
return in.score();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int freq() throws IOException {
|
|
||||||
return in.freq();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DocIdSetIterator iterator() {
|
public DocIdSetIterator iterator() {
|
||||||
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
|
return TwoPhaseIterator.asDocIdSetIterator(twoPhaseIterator());
|
||||||
|
|
|
@ -50,11 +50,6 @@ public class ScriptScoreFunction extends ScoreFunction {
|
||||||
return score;
|
return score;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int freq() throws IOException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public DocIdSetIterator iterator() {
|
public DocIdSetIterator iterator() {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.index.engine.EngineConfig;
|
||||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||||
import org.elasticsearch.index.mapper.FieldMapper;
|
import org.elasticsearch.index.mapper.FieldMapper;
|
||||||
import org.elasticsearch.index.mapper.MapperService;
|
import org.elasticsearch.index.mapper.MapperService;
|
||||||
import org.elasticsearch.index.seqno.LocalCheckpointTracker;
|
|
||||||
import org.elasticsearch.index.similarity.SimilarityService;
|
import org.elasticsearch.index.similarity.SimilarityService;
|
||||||
import org.elasticsearch.index.store.FsDirectoryService;
|
import org.elasticsearch.index.store.FsDirectoryService;
|
||||||
import org.elasticsearch.index.store.Store;
|
import org.elasticsearch.index.store.Store;
|
||||||
|
@ -135,6 +134,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
||||||
IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING,
|
IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING,
|
||||||
IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING,
|
IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING,
|
||||||
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING,
|
IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING,
|
||||||
|
IndexSettings.INDEX_SEARCH_IDLE_AFTER,
|
||||||
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
|
IndexFieldDataService.INDEX_FIELDDATA_CACHE_KEY,
|
||||||
FieldMapper.IGNORE_MALFORMED_SETTING,
|
FieldMapper.IGNORE_MALFORMED_SETTING,
|
||||||
FieldMapper.COERCE_SETTING,
|
FieldMapper.COERCE_SETTING,
|
||||||
|
|
|
@ -19,13 +19,14 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.settings;
|
package org.elasticsearch.common.settings;
|
||||||
|
|
||||||
|
import org.elasticsearch.cli.LoggingAwareMultiCommand;
|
||||||
import org.elasticsearch.cli.MultiCommand;
|
import org.elasticsearch.cli.MultiCommand;
|
||||||
import org.elasticsearch.cli.Terminal;
|
import org.elasticsearch.cli.Terminal;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A cli tool for managing secrets in the elasticsearch keystore.
|
* A cli tool for managing secrets in the elasticsearch keystore.
|
||||||
*/
|
*/
|
||||||
public class KeyStoreCli extends MultiCommand {
|
public class KeyStoreCli extends LoggingAwareMultiCommand {
|
||||||
|
|
||||||
private KeyStoreCli() {
|
private KeyStoreCli() {
|
||||||
super("A tool for managing settings stored in the elasticsearch keystore");
|
super("A tool for managing settings stored in the elasticsearch keystore");
|
||||||
|
@ -39,4 +40,5 @@ public class KeyStoreCli extends MultiCommand {
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
exit(new KeyStoreCli().main(args, Terminal.DEFAULT));
|
exit(new KeyStoreCli().main(args, Terminal.DEFAULT));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.elasticsearch.common.settings.Setting.Property;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
import org.elasticsearch.env.NodeEnvironment;
|
import org.elasticsearch.env.NodeEnvironment;
|
||||||
|
@ -624,6 +625,27 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
|
if (refreshTask.getInterval().equals(indexSettings.getRefreshInterval()) == false) {
|
||||||
|
// once we change the refresh interval we schedule yet another refresh
|
||||||
|
// to ensure we are in a clean and predictable state.
|
||||||
|
// it doesn't matter if we move from or to <code>-1</code> in both cases we want
|
||||||
|
// docs to become visible immediately. This also flushes all pending indexing / search reqeusts
|
||||||
|
// that are waiting for a refresh.
|
||||||
|
threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() {
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
logger.warn("forced refresh failed after interval change", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doRun() throws Exception {
|
||||||
|
maybeRefreshEngine(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isForceExecution() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
});
|
||||||
rescheduleRefreshTasks();
|
rescheduleRefreshTasks();
|
||||||
}
|
}
|
||||||
final Translog.Durability durability = indexSettings.getTranslogDurability();
|
final Translog.Durability durability = indexSettings.getTranslogDurability();
|
||||||
|
@ -686,17 +708,13 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void maybeRefreshEngine() {
|
private void maybeRefreshEngine(boolean force) {
|
||||||
if (indexSettings.getRefreshInterval().millis() > 0) {
|
if (indexSettings.getRefreshInterval().millis() > 0 || force) {
|
||||||
for (IndexShard shard : this.shards.values()) {
|
for (IndexShard shard : this.shards.values()) {
|
||||||
if (shard.isReadAllowed()) {
|
try {
|
||||||
try {
|
shard.scheduledRefresh();
|
||||||
if (shard.isRefreshNeeded()) {
|
} catch (IndexShardClosedException | AlreadyClosedException ex) {
|
||||||
shard.refresh("schedule");
|
// fine - continue;
|
||||||
}
|
|
||||||
} catch (IndexShardClosedException | AlreadyClosedException ex) {
|
|
||||||
// fine - continue;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -896,7 +914,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void runInternal() {
|
protected void runInternal() {
|
||||||
indexService.maybeRefreshEngine();
|
indexService.maybeRefreshEngine(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -62,6 +62,9 @@ public final class IndexSettings {
|
||||||
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING =
|
public static final Setting<TimeValue> INDEX_TRANSLOG_SYNC_INTERVAL_SETTING =
|
||||||
Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100),
|
Setting.timeSetting("index.translog.sync_interval", TimeValue.timeValueSeconds(5), TimeValue.timeValueMillis(100),
|
||||||
Property.IndexScope);
|
Property.IndexScope);
|
||||||
|
public static final Setting<TimeValue> INDEX_SEARCH_IDLE_AFTER =
|
||||||
|
Setting.timeSetting("index.search.idle.after", TimeValue.timeValueSeconds(30),
|
||||||
|
TimeValue.timeValueMinutes(0), Property.IndexScope, Property.Dynamic);
|
||||||
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING =
|
public static final Setting<Translog.Durability> INDEX_TRANSLOG_DURABILITY_SETTING =
|
||||||
new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(),
|
new Setting<>("index.translog.durability", Translog.Durability.REQUEST.name(),
|
||||||
(value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope);
|
(value) -> Translog.Durability.valueOf(value.toUpperCase(Locale.ROOT)), Property.Dynamic, Property.IndexScope);
|
||||||
|
@ -262,6 +265,8 @@ public final class IndexSettings {
|
||||||
private volatile int maxNgramDiff;
|
private volatile int maxNgramDiff;
|
||||||
private volatile int maxShingleDiff;
|
private volatile int maxShingleDiff;
|
||||||
private volatile boolean TTLPurgeDisabled;
|
private volatile boolean TTLPurgeDisabled;
|
||||||
|
private volatile TimeValue searchIdleAfter;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The maximum number of refresh listeners allows on this shard.
|
* The maximum number of refresh listeners allows on this shard.
|
||||||
*/
|
*/
|
||||||
|
@ -371,6 +376,7 @@ public final class IndexSettings {
|
||||||
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
|
maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL);
|
||||||
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
|
this.mergePolicyConfig = new MergePolicyConfig(logger, this);
|
||||||
this.indexSortConfig = new IndexSortConfig(this);
|
this.indexSortConfig = new IndexSortConfig(this);
|
||||||
|
searchIdleAfter = scopedSettings.get(INDEX_SEARCH_IDLE_AFTER);
|
||||||
singleType = INDEX_MAPPING_SINGLE_TYPE_SETTING.get(indexMetaData.getSettings()); // get this from metadata - it's not registered
|
singleType = INDEX_MAPPING_SINGLE_TYPE_SETTING.get(indexMetaData.getSettings()); // get this from metadata - it's not registered
|
||||||
if ((singleType || version.before(Version.V_6_0_0_alpha1)) == false) {
|
if ((singleType || version.before(Version.V_6_0_0_alpha1)) == false) {
|
||||||
throw new AssertionError(index.toString() + "multiple types are only allowed on pre 6.x indices but version is: ["
|
throw new AssertionError(index.toString() + "multiple types are only allowed on pre 6.x indices but version is: ["
|
||||||
|
@ -411,8 +417,11 @@ public final class IndexSettings {
|
||||||
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
|
scopedSettings.addSettingsUpdateConsumer(MAX_REFRESH_LISTENERS_PER_SHARD, this::setMaxRefreshListeners);
|
||||||
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
|
scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll);
|
||||||
scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields);
|
scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields);
|
||||||
|
scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void setSearchIdleAfter(TimeValue searchIdleAfter) { this.searchIdleAfter = searchIdleAfter; }
|
||||||
|
|
||||||
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
|
private void setTranslogFlushThresholdSize(ByteSizeValue byteSizeValue) {
|
||||||
this.flushThresholdSize = byteSizeValue;
|
this.flushThresholdSize = byteSizeValue;
|
||||||
}
|
}
|
||||||
|
@ -752,4 +761,16 @@ public final class IndexSettings {
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexScopedSettings getScopedSettings() { return scopedSettings;}
|
public IndexScopedSettings getScopedSettings() { return scopedSettings;}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true iff the refresh setting exists or in other words is explicitly set.
|
||||||
|
*/
|
||||||
|
public boolean isExplicitRefresh() {
|
||||||
|
return INDEX_REFRESH_INTERVAL_SETTING.exists(settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the time that an index shard becomes search idle unless it's accessed in between
|
||||||
|
*/
|
||||||
|
public TimeValue getSearchIdleAfter() { return searchIdleAfter; }
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,18 +71,12 @@ class CombinedDeletionPolicy extends IndexDeletionPolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setLastCommittedTranslogGeneration(List<? extends IndexCommit> commits) throws IOException {
|
private void setLastCommittedTranslogGeneration(List<? extends IndexCommit> commits) throws IOException {
|
||||||
// We need to keep translog since the smallest translog generation of un-deleted commits.
|
// when opening an existing lucene index, we currently always open the last commit.
|
||||||
// However, there are commits that are not deleted just because they are being snapshotted (rather than being kept by the policy).
|
// we therefore use the translog gen as the one that will be required for recovery
|
||||||
// TODO: We need to distinguish those commits and skip them in calculating the minimum required translog generation.
|
final IndexCommit indexCommit = commits.get(commits.size() - 1);
|
||||||
long minRequiredGen = Long.MAX_VALUE;
|
assert indexCommit.isDeleted() == false : "last commit is deleted";
|
||||||
for (IndexCommit indexCommit : commits) {
|
long minGen = Long.parseLong(indexCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY));
|
||||||
if (indexCommit.isDeleted() == false) {
|
translogDeletionPolicy.setMinTranslogGenerationForRecovery(minGen);
|
||||||
long translogGen = Long.parseLong(indexCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY));
|
|
||||||
minRequiredGen = Math.min(translogGen, minRequiredGen);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert minRequiredGen != Long.MAX_VALUE : "All commits are deleted";
|
|
||||||
translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredGen);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public SnapshotDeletionPolicy getIndexDeletionPolicy() {
|
public SnapshotDeletionPolicy getIndexDeletionPolicy() {
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.logging.log4j.util.Supplier;
|
import org.apache.logging.log4j.util.Supplier;
|
||||||
import org.apache.lucene.index.DirectoryReader;
|
import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.FilterLeafReader;
|
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexFileNames;
|
import org.apache.lucene.index.IndexFileNames;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
|
@ -79,10 +78,12 @@ import java.util.Arrays;
|
||||||
import java.util.Base64;
|
import java.util.Base64;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
@ -91,6 +92,7 @@ import java.util.concurrent.locks.Lock;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
public abstract class Engine implements Closeable {
|
public abstract class Engine implements Closeable {
|
||||||
|
|
||||||
|
@ -143,27 +145,12 @@ public abstract class Engine implements Closeable {
|
||||||
return a.ramBytesUsed();
|
return a.ramBytesUsed();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Tries to extract a segment reader from the given index reader.
|
|
||||||
* If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
|
|
||||||
*/
|
|
||||||
protected static SegmentReader segmentReader(LeafReader reader) {
|
|
||||||
if (reader instanceof SegmentReader) {
|
|
||||||
return (SegmentReader) reader;
|
|
||||||
} else if (reader instanceof FilterLeafReader) {
|
|
||||||
final FilterLeafReader fReader = (FilterLeafReader) reader;
|
|
||||||
return segmentReader(FilterLeafReader.unwrap(fReader));
|
|
||||||
}
|
|
||||||
// hard fail - we can't get a SegmentReader
|
|
||||||
throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns whether a leaf reader comes from a merge (versus flush or addIndexes).
|
* Returns whether a leaf reader comes from a merge (versus flush or addIndexes).
|
||||||
*/
|
*/
|
||||||
protected static boolean isMergedSegment(LeafReader reader) {
|
protected static boolean isMergedSegment(LeafReader reader) {
|
||||||
// We expect leaves to be segment readers
|
// We expect leaves to be segment readers
|
||||||
final Map<String, String> diagnostics = segmentReader(reader).getSegmentInfo().info.getDiagnostics();
|
final Map<String, String> diagnostics = Lucene.segmentReader(reader).getSegmentInfo().info.getDiagnostics();
|
||||||
final String source = diagnostics.get(IndexWriter.SOURCE);
|
final String source = diagnostics.get(IndexWriter.SOURCE);
|
||||||
assert Arrays.asList(IndexWriter.SOURCE_ADDINDEXES_READERS, IndexWriter.SOURCE_FLUSH,
|
assert Arrays.asList(IndexWriter.SOURCE_ADDINDEXES_READERS, IndexWriter.SOURCE_FLUSH,
|
||||||
IndexWriter.SOURCE_MERGE).contains(source) : "Unknown source " + source;
|
IndexWriter.SOURCE_MERGE).contains(source) : "Unknown source " + source;
|
||||||
|
@ -608,25 +595,40 @@ public abstract class Engine implements Closeable {
|
||||||
*/
|
*/
|
||||||
public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) {
|
public final SegmentsStats segmentsStats(boolean includeSegmentFileSizes) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
try (Searcher searcher = acquireSearcher("segments_stats")) {
|
Set<String> segmentName = new HashSet<>();
|
||||||
SegmentsStats stats = new SegmentsStats();
|
SegmentsStats stats = new SegmentsStats();
|
||||||
for (LeafReaderContext reader : searcher.reader().leaves()) {
|
try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.INTERNAL)) {
|
||||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) {
|
||||||
stats.add(1, segmentReader.ramBytesUsed());
|
SegmentReader segmentReader = Lucene.segmentReader(ctx.reader());
|
||||||
stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader()));
|
fillSegmentStats(segmentReader, includeSegmentFileSizes, stats);
|
||||||
stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader()));
|
segmentName.add(segmentReader.getSegmentName());
|
||||||
stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader()));
|
}
|
||||||
stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader()));
|
}
|
||||||
stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader()));
|
|
||||||
stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader()));
|
|
||||||
|
|
||||||
if (includeSegmentFileSizes) {
|
try (Searcher searcher = acquireSearcher("segments_stats", SearcherScope.EXTERNAL)) {
|
||||||
// TODO: consider moving this to StoreStats
|
for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) {
|
||||||
stats.addFileSizes(getSegmentFileSizes(segmentReader));
|
SegmentReader segmentReader = Lucene.segmentReader(ctx.reader());
|
||||||
|
if (segmentName.contains(segmentReader.getSegmentName()) == false) {
|
||||||
|
fillSegmentStats(segmentReader, includeSegmentFileSizes, stats);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
writerSegmentStats(stats);
|
}
|
||||||
return stats;
|
writerSegmentStats(stats);
|
||||||
|
return stats;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void fillSegmentStats(SegmentReader segmentReader, boolean includeSegmentFileSizes, SegmentsStats stats) {
|
||||||
|
stats.add(1, segmentReader.ramBytesUsed());
|
||||||
|
stats.addTermsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPostingsReader()));
|
||||||
|
stats.addStoredFieldsMemoryInBytes(guardedRamBytesUsed(segmentReader.getFieldsReader()));
|
||||||
|
stats.addTermVectorsMemoryInBytes(guardedRamBytesUsed(segmentReader.getTermVectorsReader()));
|
||||||
|
stats.addNormsMemoryInBytes(guardedRamBytesUsed(segmentReader.getNormsReader()));
|
||||||
|
stats.addPointsMemoryInBytes(guardedRamBytesUsed(segmentReader.getPointsReader()));
|
||||||
|
stats.addDocValuesMemoryInBytes(guardedRamBytesUsed(segmentReader.getDocValuesReader()));
|
||||||
|
|
||||||
|
if (includeSegmentFileSizes) {
|
||||||
|
// TODO: consider moving this to StoreStats
|
||||||
|
stats.addFileSizes(getSegmentFileSizes(segmentReader));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -716,30 +718,18 @@ public abstract class Engine implements Closeable {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
Map<String, Segment> segments = new HashMap<>();
|
Map<String, Segment> segments = new HashMap<>();
|
||||||
// first, go over and compute the search ones...
|
// first, go over and compute the search ones...
|
||||||
try (Searcher searcher = acquireSearcher("segments")){
|
try (Searcher searcher = acquireSearcher("segments", SearcherScope.EXTERNAL)){
|
||||||
for (LeafReaderContext reader : searcher.reader().leaves()) {
|
for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) {
|
||||||
final SegmentReader segmentReader = segmentReader(reader.reader());
|
fillSegmentInfo(Lucene.segmentReader(ctx.reader()), verbose, true, segments);
|
||||||
SegmentCommitInfo info = segmentReader.getSegmentInfo();
|
}
|
||||||
assert !segments.containsKey(info.info.name);
|
}
|
||||||
Segment segment = new Segment(info.info.name);
|
|
||||||
segment.search = true;
|
try (Searcher searcher = acquireSearcher("segments", SearcherScope.INTERNAL)){
|
||||||
segment.docCount = reader.reader().numDocs();
|
for (LeafReaderContext ctx : searcher.reader().getContext().leaves()) {
|
||||||
segment.delDocCount = reader.reader().numDeletedDocs();
|
SegmentReader segmentReader = Lucene.segmentReader(ctx.reader());
|
||||||
segment.version = info.info.getVersion();
|
if (segments.containsKey(segmentReader.getSegmentName()) == false) {
|
||||||
segment.compound = info.info.getUseCompoundFile();
|
fillSegmentInfo(segmentReader, verbose, false, segments);
|
||||||
try {
|
|
||||||
segment.sizeInBytes = info.sizeInBytes();
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
|
||||||
}
|
}
|
||||||
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
|
||||||
segment.segmentSort = info.info.getIndexSort();
|
|
||||||
if (verbose) {
|
|
||||||
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
|
|
||||||
}
|
|
||||||
segment.attributes = info.info.getAttributes();
|
|
||||||
// TODO: add more fine grained mem stats values to per segment info here
|
|
||||||
segments.put(info.info.name, segment);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -769,16 +759,34 @@ public abstract class Engine implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
|
Segment[] segmentsArr = segments.values().toArray(new Segment[segments.values().size()]);
|
||||||
Arrays.sort(segmentsArr, new Comparator<Segment>() {
|
Arrays.sort(segmentsArr, Comparator.comparingLong(Segment::getGeneration));
|
||||||
@Override
|
|
||||||
public int compare(Segment o1, Segment o2) {
|
|
||||||
return (int) (o1.getGeneration() - o2.getGeneration());
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
return segmentsArr;
|
return segmentsArr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void fillSegmentInfo(SegmentReader segmentReader, boolean verbose, boolean search, Map<String, Segment> segments) {
|
||||||
|
SegmentCommitInfo info = segmentReader.getSegmentInfo();
|
||||||
|
assert segments.containsKey(info.info.name) == false;
|
||||||
|
Segment segment = new Segment(info.info.name);
|
||||||
|
segment.search = search;
|
||||||
|
segment.docCount = segmentReader.numDocs();
|
||||||
|
segment.delDocCount = segmentReader.numDeletedDocs();
|
||||||
|
segment.version = info.info.getVersion();
|
||||||
|
segment.compound = info.info.getUseCompoundFile();
|
||||||
|
try {
|
||||||
|
segment.sizeInBytes = info.sizeInBytes();
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.trace((Supplier<?>) () -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||||
|
}
|
||||||
|
segment.memoryInBytes = segmentReader.ramBytesUsed();
|
||||||
|
segment.segmentSort = info.info.getIndexSort();
|
||||||
|
if (verbose) {
|
||||||
|
segment.ramTree = Accountables.namedAccountable("root", segmentReader);
|
||||||
|
}
|
||||||
|
segment.attributes = info.info.getAttributes();
|
||||||
|
// TODO: add more fine grained mem stats values to per segment info here
|
||||||
|
segments.put(info.info.name, segment);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The list of segments in the engine.
|
* The list of segments in the engine.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
||||||
/**
|
/**
|
||||||
* A {@link SortedSetDocValues} implementation that returns ordinals that are global.
|
* A {@link SortedSetDocValues} implementation that returns ordinals that are global.
|
||||||
*/
|
*/
|
||||||
public class GlobalOrdinalMapping extends SortedSetDocValues {
|
final class GlobalOrdinalMapping extends SortedSetDocValues {
|
||||||
|
|
||||||
private final SortedSetDocValues values;
|
private final SortedSetDocValues values;
|
||||||
private final OrdinalMap ordinalMap;
|
private final OrdinalMap ordinalMap;
|
||||||
|
@ -49,7 +49,7 @@ public class GlobalOrdinalMapping extends SortedSetDocValues {
|
||||||
return ordinalMap.getValueCount();
|
return ordinalMap.getValueCount();
|
||||||
}
|
}
|
||||||
|
|
||||||
public final long getGlobalOrd(long segmentOrd) {
|
public long getGlobalOrd(long segmentOrd) {
|
||||||
return mapping.get(segmentOrd);
|
return mapping.get(segmentOrd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,23 +153,17 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object obj) {
|
public boolean equals(Object obj) {
|
||||||
// TODO: Do this if/when we can assume scripts are pure functions
|
|
||||||
// and they have a reliable equals impl
|
|
||||||
/*if (this == obj)
|
|
||||||
return true;
|
|
||||||
if (sameClassAs(obj) == false)
|
if (sameClassAs(obj) == false)
|
||||||
return false;
|
return false;
|
||||||
ScriptQuery other = (ScriptQuery) obj;
|
ScriptQuery other = (ScriptQuery) obj;
|
||||||
return Objects.equals(script, other.script);*/
|
return Objects.equals(script, other.script);
|
||||||
return this == obj;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
// TODO: Do this if/when we can assume scripts are pure functions
|
int h = classHash();
|
||||||
// and they have a reliable equals impl
|
h = 31 * h + script.hashCode();
|
||||||
// return Objects.hash(classHash(), script);
|
return h;
|
||||||
return System.identityHashCode(this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -196,6 +190,14 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
|
||||||
};
|
};
|
||||||
return new ConstantScoreScorer(this, score(), twoPhase);
|
return new ConstantScoreScorer(this, score(), twoPhase);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
// TODO: Change this to true when we can assume that scripts are pure functions
|
||||||
|
// ie. the return value is always the same given the same conditions and may not
|
||||||
|
// depend on the current timestamp, other documents, etc.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.BooleanQuery;
|
import org.apache.lucene.search.BooleanQuery;
|
||||||
import org.apache.lucene.search.CoveringQuery;
|
import org.apache.lucene.search.CoveringQuery;
|
||||||
import org.apache.lucene.search.DoubleValues;
|
import org.apache.lucene.search.DoubleValues;
|
||||||
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
import org.apache.lucene.search.LongValues;
|
import org.apache.lucene.search.LongValues;
|
||||||
import org.apache.lucene.search.LongValuesSource;
|
import org.apache.lucene.search.LongValuesSource;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
@ -290,13 +291,18 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int hashCode() {
|
public int hashCode() {
|
||||||
// CoveringQuery with this field value source cannot be cachable
|
int h = getClass().hashCode();
|
||||||
return System.identityHashCode(this);
|
h = 31 * h + script.hashCode();
|
||||||
|
return h;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object obj) {
|
public boolean equals(Object obj) {
|
||||||
return this == obj;
|
if (obj == null || getClass() != obj.getClass()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
ScriptLongValueSource that = (ScriptLongValueSource) obj;
|
||||||
|
return Objects.equals(script, that.script);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -304,6 +310,19 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
|
||||||
return "script(" + script.toString() + ")";
|
return "script(" + script.toString() + ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
// TODO: Change this to true when we can assume that scripts are pure functions
|
||||||
|
// ie. the return value is always the same given the same conditions and may not
|
||||||
|
// depend on the current timestamp, other documents, etc.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LongValuesSource rewrite(IndexSearcher searcher) throws IOException {
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Forked from LongValuesSource.FieldValuesSource and changed getValues() method to always use sorted numeric
|
// Forked from LongValuesSource.FieldValuesSource and changed getValues() method to always use sorted numeric
|
||||||
|
@ -364,6 +383,16 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder<TermsSetQue
|
||||||
public boolean needsScores() {
|
public boolean needsScores() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LongValuesSource rewrite(IndexSearcher searcher) throws IOException {
|
||||||
|
return this;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,10 +21,14 @@ package org.elasticsearch.index.shard;
|
||||||
|
|
||||||
import com.carrotsearch.hppc.ObjectLongMap;
|
import com.carrotsearch.hppc.ObjectLongMap;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
import org.apache.lucene.index.CheckIndex;
|
import org.apache.lucene.index.CheckIndex;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexOptions;
|
import org.apache.lucene.index.IndexOptions;
|
||||||
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
import org.apache.lucene.index.SegmentCommitInfo;
|
||||||
import org.apache.lucene.index.SegmentInfos;
|
import org.apache.lucene.index.SegmentInfos;
|
||||||
|
import org.apache.lucene.index.SegmentReader;
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.QueryCachingPolicy;
|
import org.apache.lucene.search.QueryCachingPolicy;
|
||||||
import org.apache.lucene.search.ReferenceManager;
|
import org.apache.lucene.search.ReferenceManager;
|
||||||
|
@ -58,11 +62,11 @@ import org.elasticsearch.common.lease.Releasables;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.metrics.MeanMetric;
|
import org.elasticsearch.common.metrics.MeanMetric;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
|
import org.elasticsearch.common.util.concurrent.AsyncIOProcessor;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexModule;
|
import org.elasticsearch.index.IndexModule;
|
||||||
|
@ -234,6 +238,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
*/
|
*/
|
||||||
private final RefreshListeners refreshListeners;
|
private final RefreshListeners refreshListeners;
|
||||||
|
|
||||||
|
private final AtomicLong lastSearcherAccess = new AtomicLong();
|
||||||
|
private final AtomicReference<Translog.Location> pendingRefreshLocation = new AtomicReference<>();
|
||||||
|
|
||||||
public IndexShard(
|
public IndexShard(
|
||||||
ShardRouting shardRouting,
|
ShardRouting shardRouting,
|
||||||
IndexSettings indexSettings,
|
IndexSettings indexSettings,
|
||||||
|
@ -298,6 +305,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
searcherWrapper = indexSearcherWrapper;
|
searcherWrapper = indexSearcherWrapper;
|
||||||
primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
|
primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
|
||||||
refreshListeners = buildRefreshListeners();
|
refreshListeners = buildRefreshListeners();
|
||||||
|
lastSearcherAccess.set(threadPool.relativeTimeInMillis());
|
||||||
persistMetadata(path, indexSettings, shardRouting, null, logger);
|
persistMetadata(path, indexSettings, shardRouting, null, logger);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -856,15 +864,30 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
public DocsStats docStats() {
|
public DocsStats docStats() {
|
||||||
|
// we calculate the doc stats based on the internal reader that is more up-to-date and not subject
|
||||||
|
// to external refreshes. For instance we don't refresh an external reader if we flush and indices with
|
||||||
|
// index.refresh_interval=-1 won't see any doc stats updates at all. This change will give more accurate statistics
|
||||||
|
// when indexing but not refreshing in general. Yet, if a refresh happens the internal reader is refresh as well so we are
|
||||||
|
// safe here.
|
||||||
long numDocs = 0;
|
long numDocs = 0;
|
||||||
long numDeletedDocs = 0;
|
long numDeletedDocs = 0;
|
||||||
long sizeInBytes = 0;
|
long sizeInBytes = 0;
|
||||||
List<Segment> segments = segments(false);
|
try (Engine.Searcher searcher = acquireSearcher("docStats", Engine.SearcherScope.INTERNAL)) {
|
||||||
for (Segment segment : segments) {
|
// we don't wait for a pending refreshes here since it's a stats call instead we mark it as accesssed only which will cause
|
||||||
if (segment.search) {
|
// the next scheduled refresh to go through and refresh the stats as well
|
||||||
numDocs += segment.getNumDocs();
|
markSearcherAccessed();
|
||||||
numDeletedDocs += segment.getDeletedDocs();
|
for (LeafReaderContext reader : searcher.reader().leaves()) {
|
||||||
sizeInBytes += segment.getSizeInBytes();
|
// we go on the segment level here to get accurate numbers
|
||||||
|
final SegmentReader segmentReader = Lucene.segmentReader(reader.reader());
|
||||||
|
SegmentCommitInfo info = segmentReader.getSegmentInfo();
|
||||||
|
numDocs += reader.reader().numDocs();
|
||||||
|
numDeletedDocs += reader.reader().numDeletedDocs();
|
||||||
|
try {
|
||||||
|
sizeInBytes += info.sizeInBytes();
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.trace((org.apache.logging.log4j.util.Supplier<?>)
|
||||||
|
() -> new ParameterizedMessage("failed to get size for [{}]", info.info.name), e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
|
return new DocsStats(numDocs, numDeletedDocs, sizeInBytes);
|
||||||
|
@ -949,6 +972,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
public CompletionStats completionStats(String... fields) {
|
public CompletionStats completionStats(String... fields) {
|
||||||
CompletionStats completionStats = new CompletionStats();
|
CompletionStats completionStats = new CompletionStats();
|
||||||
try (Engine.Searcher currentSearcher = acquireSearcher("completion_stats")) {
|
try (Engine.Searcher currentSearcher = acquireSearcher("completion_stats")) {
|
||||||
|
// we don't wait for a pending refreshes here since it's a stats call instead we mark it as accesssed only which will cause
|
||||||
|
// the next scheduled refresh to go through and refresh the stats as well
|
||||||
|
markSearcherAccessed();
|
||||||
completionStats.add(CompletionFieldStats.completionStats(currentSearcher.reader(), fields));
|
completionStats.add(CompletionFieldStats.completionStats(currentSearcher.reader(), fields));
|
||||||
}
|
}
|
||||||
return completionStats;
|
return completionStats;
|
||||||
|
@ -1118,6 +1144,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
return acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
|
return acquireSearcher(source, Engine.SearcherScope.EXTERNAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void markSearcherAccessed() {
|
||||||
|
lastSearcherAccess.lazySet(threadPool.relativeTimeInMillis());
|
||||||
|
}
|
||||||
|
|
||||||
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) {
|
private Engine.Searcher acquireSearcher(String source, Engine.SearcherScope scope) {
|
||||||
readAllowed();
|
readAllowed();
|
||||||
final Engine engine = getEngine();
|
final Engine engine = getEngine();
|
||||||
|
@ -2393,7 +2423,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
indexSettings::getMaxRefreshListeners,
|
indexSettings::getMaxRefreshListeners,
|
||||||
() -> refresh("too_many_listeners"),
|
() -> refresh("too_many_listeners"),
|
||||||
threadPool.executor(ThreadPool.Names.LISTENER)::execute,
|
threadPool.executor(ThreadPool.Names.LISTENER)::execute,
|
||||||
logger);
|
logger, threadPool.getThreadContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2419,14 +2449,74 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns <code>true</code> iff one or more changes to the engine are not visible to via the current searcher *or* there are pending
|
* Executes a scheduled refresh if necessary.
|
||||||
* refresh listeners.
|
|
||||||
* Otherwise <code>false</code>.
|
|
||||||
*
|
*
|
||||||
* @throws AlreadyClosedException if the engine or internal indexwriter in the engine is already closed
|
* @return <code>true</code> iff the engine got refreshed otherwise <code>false</code>
|
||||||
*/
|
*/
|
||||||
public boolean isRefreshNeeded() {
|
public boolean scheduledRefresh() {
|
||||||
return getEngine().refreshNeeded() || (refreshListeners != null && refreshListeners.refreshNeeded());
|
boolean listenerNeedsRefresh = refreshListeners.refreshNeeded();
|
||||||
|
if (isReadAllowed() && (listenerNeedsRefresh || getEngine().refreshNeeded())) {
|
||||||
|
if (listenerNeedsRefresh == false // if we have a listener that is waiting for a refresh we need to force it
|
||||||
|
&& isSearchIdle() && indexSettings.isExplicitRefresh() == false) {
|
||||||
|
// lets skip this refresh since we are search idle and
|
||||||
|
// don't necessarily need to refresh. the next searcher access will register a refreshListener and that will
|
||||||
|
// cause the next schedule to refresh.
|
||||||
|
setRefreshPending();
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
refresh("schedule");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if this shards is search idle
|
||||||
|
*/
|
||||||
|
final boolean isSearchIdle() {
|
||||||
|
return (threadPool.relativeTimeInMillis() - lastSearcherAccess.get()) >= indexSettings.getSearchIdleAfter().getMillis();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the last timestamp the searcher was accessed. This is a relative timestamp in milliseconds.
|
||||||
|
*/
|
||||||
|
final long getLastSearcherAccess() {
|
||||||
|
return lastSearcherAccess.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void setRefreshPending() {
|
||||||
|
Engine engine = getEngine();
|
||||||
|
Translog.Location lastWriteLocation = engine.getTranslog().getLastWriteLocation();
|
||||||
|
Translog.Location location;
|
||||||
|
do {
|
||||||
|
location = this.pendingRefreshLocation.get();
|
||||||
|
if (location != null && lastWriteLocation.compareTo(location) <= 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (pendingRefreshLocation.compareAndSet(location, lastWriteLocation) == false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Registers the given listener and invokes it once the shard is active again and all
|
||||||
|
* pending refresh translog location has been refreshed. If there is no pending refresh location registered the listener will be
|
||||||
|
* invoked immediately.
|
||||||
|
* @param listener the listener to invoke once the pending refresh location is visible. The listener will be called with
|
||||||
|
* <code>true</code> if the listener was registered to wait for a refresh.
|
||||||
|
*/
|
||||||
|
public final void awaitShardSearchActive(Consumer<Boolean> listener) {
|
||||||
|
if (isSearchIdle()) {
|
||||||
|
markSearcherAccessed(); // move the shard into non-search idle
|
||||||
|
}
|
||||||
|
final Translog.Location location = pendingRefreshLocation.get();
|
||||||
|
if (location != null) {
|
||||||
|
addRefreshListener(location, (b) -> {
|
||||||
|
pendingRefreshLocation.compareAndSet(location, null);
|
||||||
|
listener.accept(true);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
listener.accept(false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.index.shard;
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.lucene.search.ReferenceManager;
|
import org.apache.lucene.search.ReferenceManager;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
|
@ -45,6 +46,7 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener,
|
||||||
private final Runnable forceRefresh;
|
private final Runnable forceRefresh;
|
||||||
private final Executor listenerExecutor;
|
private final Executor listenerExecutor;
|
||||||
private final Logger logger;
|
private final Logger logger;
|
||||||
|
private final ThreadContext threadContext;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Is this closed? If true then we won't add more listeners and have flushed all pending listeners.
|
* Is this closed? If true then we won't add more listeners and have flushed all pending listeners.
|
||||||
|
@ -63,11 +65,13 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener,
|
||||||
*/
|
*/
|
||||||
private volatile Translog.Location lastRefreshedLocation;
|
private volatile Translog.Location lastRefreshedLocation;
|
||||||
|
|
||||||
public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, Logger logger) {
|
public RefreshListeners(IntSupplier getMaxRefreshListeners, Runnable forceRefresh, Executor listenerExecutor, Logger logger,
|
||||||
|
ThreadContext threadContext) {
|
||||||
this.getMaxRefreshListeners = getMaxRefreshListeners;
|
this.getMaxRefreshListeners = getMaxRefreshListeners;
|
||||||
this.forceRefresh = forceRefresh;
|
this.forceRefresh = forceRefresh;
|
||||||
this.listenerExecutor = listenerExecutor;
|
this.listenerExecutor = listenerExecutor;
|
||||||
this.logger = logger;
|
this.logger = logger;
|
||||||
|
this.threadContext = threadContext;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -98,8 +102,15 @@ public final class RefreshListeners implements ReferenceManager.RefreshListener,
|
||||||
refreshListeners = listeners;
|
refreshListeners = listeners;
|
||||||
}
|
}
|
||||||
if (listeners.size() < getMaxRefreshListeners.getAsInt()) {
|
if (listeners.size() < getMaxRefreshListeners.getAsInt()) {
|
||||||
|
ThreadContext.StoredContext storedContext = threadContext.newStoredContext(true);
|
||||||
|
Consumer<Boolean> contextPreservingListener = forced -> {
|
||||||
|
try (ThreadContext.StoredContext ignore = threadContext.stashContext()) {
|
||||||
|
storedContext.restore();
|
||||||
|
listener.accept(forced);
|
||||||
|
}
|
||||||
|
};
|
||||||
// We have a free slot so register the listener
|
// We have a free slot so register the listener
|
||||||
listeners.add(new Tuple<>(location, listener));
|
listeners.add(new Tuple<>(location, contextPreservingListener));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -157,6 +157,13 @@ final class ShardSplittingQuery extends Query {
|
||||||
|
|
||||||
return new ConstantScoreScorer(this, score(), new BitSetIterator(bitSet, bitSet.length()));
|
return new ConstantScoreScorer(this, score(), new BitSetIterator(bitSet, bitSet.length()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
// This is not a regular query, let's not cache it. It wouldn't help
|
||||||
|
// anyway.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.translog;
|
package org.elasticsearch.index.translog;
|
||||||
|
|
||||||
|
import com.carrotsearch.hppc.LongHashSet;
|
||||||
|
import com.carrotsearch.hppc.LongObjectHashMap;
|
||||||
|
import com.carrotsearch.hppc.LongSet;
|
||||||
|
import org.apache.lucene.util.FixedBitSet;
|
||||||
|
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
@ -30,32 +36,44 @@ final class MultiSnapshot implements Translog.Snapshot {
|
||||||
|
|
||||||
private final TranslogSnapshot[] translogs;
|
private final TranslogSnapshot[] translogs;
|
||||||
private final int totalOperations;
|
private final int totalOperations;
|
||||||
|
private int overriddenOperations;
|
||||||
private final Closeable onClose;
|
private final Closeable onClose;
|
||||||
private int index;
|
private int index;
|
||||||
|
private final SeqNoSet seenSeqNo;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new point in time snapshot of the given snapshots. Those snapshots are always iterated in-order.
|
* Creates a new point in time snapshot of the given snapshots. Those snapshots are always iterated in-order.
|
||||||
*/
|
*/
|
||||||
MultiSnapshot(TranslogSnapshot[] translogs, Closeable onClose) {
|
MultiSnapshot(TranslogSnapshot[] translogs, Closeable onClose) {
|
||||||
this.translogs = translogs;
|
this.translogs = translogs;
|
||||||
totalOperations = Arrays.stream(translogs).mapToInt(TranslogSnapshot::totalOperations).sum();
|
this.totalOperations = Arrays.stream(translogs).mapToInt(TranslogSnapshot::totalOperations).sum();
|
||||||
|
this.overriddenOperations = 0;
|
||||||
this.onClose = onClose;
|
this.onClose = onClose;
|
||||||
index = 0;
|
this.seenSeqNo = new SeqNoSet();
|
||||||
|
this.index = translogs.length - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int totalOperations() {
|
public int totalOperations() {
|
||||||
return totalOperations;
|
return totalOperations;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int overriddenOperations() {
|
||||||
|
return overriddenOperations;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Translog.Operation next() throws IOException {
|
public Translog.Operation next() throws IOException {
|
||||||
for (; index < translogs.length; index++) {
|
for (; index >= 0; index--) {
|
||||||
final TranslogSnapshot current = translogs[index];
|
final TranslogSnapshot current = translogs[index];
|
||||||
Translog.Operation op = current.next();
|
Translog.Operation op;
|
||||||
if (op != null) { // if we are null we move to the next snapshot
|
while ((op = current.next()) != null) {
|
||||||
return op;
|
if (op.seqNo() == SequenceNumbers.UNASSIGNED_SEQ_NO || seenSeqNo.getAndSet(op.seqNo()) == false) {
|
||||||
|
return op;
|
||||||
|
} else {
|
||||||
|
overriddenOperations++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
|
@ -65,4 +83,76 @@ final class MultiSnapshot implements Translog.Snapshot {
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
onClose.close();
|
onClose.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A wrapper of {@link FixedBitSet} but allows to check if all bits are set in O(1).
|
||||||
|
*/
|
||||||
|
private static final class CountedBitSet {
|
||||||
|
private short onBits;
|
||||||
|
private final FixedBitSet bitset;
|
||||||
|
|
||||||
|
CountedBitSet(short numBits) {
|
||||||
|
assert numBits > 0;
|
||||||
|
this.onBits = 0;
|
||||||
|
this.bitset = new FixedBitSet(numBits);
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean getAndSet(int index) {
|
||||||
|
assert index >= 0;
|
||||||
|
boolean wasOn = bitset.getAndSet(index);
|
||||||
|
if (wasOn == false) {
|
||||||
|
onBits++;
|
||||||
|
}
|
||||||
|
return wasOn;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean hasAllBitsOn() {
|
||||||
|
return onBits == bitset.length();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sequence numbers from translog are likely to form contiguous ranges,
|
||||||
|
* thus collapsing a completed bitset into a single entry will reduce memory usage.
|
||||||
|
*/
|
||||||
|
static final class SeqNoSet {
|
||||||
|
static final short BIT_SET_SIZE = 1024;
|
||||||
|
private final LongSet completedSets = new LongHashSet();
|
||||||
|
private final LongObjectHashMap<CountedBitSet> ongoingSets = new LongObjectHashMap<>();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Marks this sequence number and returns <tt>true</tt> if it is seen before.
|
||||||
|
*/
|
||||||
|
boolean getAndSet(long value) {
|
||||||
|
assert value >= 0;
|
||||||
|
final long key = value / BIT_SET_SIZE;
|
||||||
|
|
||||||
|
if (completedSets.contains(key)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
CountedBitSet bitset = ongoingSets.get(key);
|
||||||
|
if (bitset == null) {
|
||||||
|
bitset = new CountedBitSet(BIT_SET_SIZE);
|
||||||
|
ongoingSets.put(key, bitset);
|
||||||
|
}
|
||||||
|
|
||||||
|
final boolean wasOn = bitset.getAndSet(Math.toIntExact(value % BIT_SET_SIZE));
|
||||||
|
if (bitset.hasAllBitsOn()) {
|
||||||
|
ongoingSets.remove(key);
|
||||||
|
completedSets.add(key);
|
||||||
|
}
|
||||||
|
return wasOn;
|
||||||
|
}
|
||||||
|
|
||||||
|
// For testing
|
||||||
|
long completeSetsSize() {
|
||||||
|
return completedSets.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
// For testing
|
||||||
|
long ongoingSetsSize() {
|
||||||
|
return ongoingSets.size();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -831,10 +831,19 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
public interface Snapshot extends Closeable {
|
public interface Snapshot extends Closeable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The total number of operations in the translog.
|
* The total estimated number of operations in the snapshot.
|
||||||
*/
|
*/
|
||||||
int totalOperations();
|
int totalOperations();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The number of operations have been overridden (eg. superseded) in the snapshot so far.
|
||||||
|
* If two operations have the same sequence number, the operation with a lower term will be overridden by the operation
|
||||||
|
* with a higher term. Unlike {@link #totalOperations()}, this value is updated each time after {@link #next()}) is called.
|
||||||
|
*/
|
||||||
|
default int overriddenOperations() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the next operation in the snapshot or <code>null</code> if we reached the end.
|
* Returns the next operation in the snapshot or <code>null</code> if we reached the end.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -19,13 +19,14 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.translog;
|
package org.elasticsearch.index.translog;
|
||||||
|
|
||||||
|
import org.elasticsearch.cli.LoggingAwareMultiCommand;
|
||||||
import org.elasticsearch.cli.MultiCommand;
|
import org.elasticsearch.cli.MultiCommand;
|
||||||
import org.elasticsearch.cli.Terminal;
|
import org.elasticsearch.cli.Terminal;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class encapsulating and dispatching commands from the {@code elasticsearch-translog} command line tool
|
* Class encapsulating and dispatching commands from the {@code elasticsearch-translog} command line tool
|
||||||
*/
|
*/
|
||||||
public class TranslogToolCli extends MultiCommand {
|
public class TranslogToolCli extends LoggingAwareMultiCommand {
|
||||||
|
|
||||||
private TranslogToolCli() {
|
private TranslogToolCli() {
|
||||||
super("A CLI tool for various Elasticsearch translog actions");
|
super("A CLI tool for various Elasticsearch translog actions");
|
||||||
|
|
|
@ -157,6 +157,11 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache,
|
||||||
shardKeyMap.add(context.reader());
|
shardKeyMap.add(context.reader());
|
||||||
return in.bulkScorer(context);
|
return in.bulkScorer(context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
return in.isCacheable(ctx);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Clear all entries that belong to the given index. */
|
/** Clear all entries that belong to the given index. */
|
||||||
|
|
|
@ -66,6 +66,7 @@ import java.io.OutputStream;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Locale;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
|
@ -567,8 +568,9 @@ public class RecoverySourceHandler {
|
||||||
cancellableThreads.executeIO(sendBatch);
|
cancellableThreads.executeIO(sendBatch);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert expectedTotalOps == skippedOps + totalSentOps
|
assert expectedTotalOps == snapshot.overriddenOperations() + skippedOps + totalSentOps
|
||||||
: "expected total [" + expectedTotalOps + "], skipped [" + skippedOps + "], total sent [" + totalSentOps + "]";
|
: String.format(Locale.ROOT, "expected total [%d], overridden [%d], skipped [%d], total sent [%d]",
|
||||||
|
expectedTotalOps, snapshot.overriddenOperations(), skippedOps, totalSentOps);
|
||||||
|
|
||||||
logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps);
|
logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps);
|
||||||
|
|
||||||
|
|
|
@ -616,7 +616,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
BytesStreamOutput out = new BytesStreamOutput();
|
BytesStreamOutput out = new BytesStreamOutput();
|
||||||
Streams.copy(blob, out);
|
Streams.copy(blob, out);
|
||||||
// EMPTY is safe here because RepositoryData#fromXContent calls namedObject
|
// EMPTY is safe here because RepositoryData#fromXContent calls namedObject
|
||||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
|
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes(), XContentType.JSON)) {
|
||||||
repositoryData = RepositoryData.snapshotsFromXContent(parser, indexGen);
|
repositoryData = RepositoryData.snapshotsFromXContent(parser, indexGen);
|
||||||
} catch (NotXContentException e) {
|
} catch (NotXContentException e) {
|
||||||
logger.warn("[{}] index blob is not valid x-content [{} bytes]", snapshotsIndexBlobName, out.bytes().length());
|
logger.warn("[{}] index blob is not valid x-content [{} bytes]", snapshotsIndexBlobName, out.bytes().length());
|
||||||
|
@ -628,7 +628,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
|
try (InputStream blob = snapshotsBlobContainer.readBlob(INCOMPATIBLE_SNAPSHOTS_BLOB)) {
|
||||||
BytesStreamOutput out = new BytesStreamOutput();
|
BytesStreamOutput out = new BytesStreamOutput();
|
||||||
Streams.copy(blob, out);
|
Streams.copy(blob, out);
|
||||||
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes())) {
|
try (XContentParser parser = XContentHelper.createParser(NamedXContentRegistry.EMPTY, out.bytes(), XContentType.JSON)) {
|
||||||
repositoryData = repositoryData.incompatibleSnapshotsFromXContent(parser);
|
repositoryData = repositoryData.incompatibleSnapshotsFromXContent(parser);
|
||||||
}
|
}
|
||||||
} catch (NoSuchFileException e) {
|
} catch (NoSuchFileException e) {
|
||||||
|
|
|
@ -328,6 +328,14 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
||||||
}
|
}
|
||||||
|
|
||||||
public void shard(SearchShardTarget target) {
|
public void shard(SearchShardTarget target) {
|
||||||
|
if (innerHits != null) {
|
||||||
|
for (SearchHits innerHits : innerHits.values()) {
|
||||||
|
for (SearchHit innerHit : innerHits) {
|
||||||
|
innerHit.shard(target);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
this.shard = target;
|
this.shard = target;
|
||||||
if (target != null) {
|
if (target != null) {
|
||||||
this.index = target.getIndex();
|
this.index = target.getIndex();
|
||||||
|
@ -414,18 +422,17 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
||||||
builder.field(Fields._SHARD, shard.getShardId());
|
builder.field(Fields._SHARD, shard.getShardId());
|
||||||
builder.field(Fields._NODE, shard.getNodeIdText());
|
builder.field(Fields._NODE, shard.getNodeIdText());
|
||||||
}
|
}
|
||||||
|
if (index != null) {
|
||||||
|
builder.field(Fields._INDEX, RemoteClusterAware.buildRemoteIndexName(clusterAlias, index));
|
||||||
|
}
|
||||||
|
if (type != null) {
|
||||||
|
builder.field(Fields._TYPE, type);
|
||||||
|
}
|
||||||
|
if (id != null) {
|
||||||
|
builder.field(Fields._ID, id);
|
||||||
|
}
|
||||||
if (nestedIdentity != null) {
|
if (nestedIdentity != null) {
|
||||||
nestedIdentity.toXContent(builder, params);
|
nestedIdentity.toXContent(builder, params);
|
||||||
} else {
|
|
||||||
if (index != null) {
|
|
||||||
builder.field(Fields._INDEX, RemoteClusterAware.buildRemoteIndexName(clusterAlias, index));
|
|
||||||
}
|
|
||||||
if (type != null) {
|
|
||||||
builder.field(Fields._TYPE, type);
|
|
||||||
}
|
|
||||||
if (id != null) {
|
|
||||||
builder.field(Fields._ID, id);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (version != -1) {
|
if (version != -1) {
|
||||||
builder.field(Fields._VERSION, version);
|
builder.field(Fields._VERSION, version);
|
||||||
|
@ -840,9 +847,9 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<D
|
||||||
private static final String FIELD = "field";
|
private static final String FIELD = "field";
|
||||||
private static final String OFFSET = "offset";
|
private static final String OFFSET = "offset";
|
||||||
|
|
||||||
private Text field;
|
private final Text field;
|
||||||
private int offset;
|
private final int offset;
|
||||||
private NestedIdentity child;
|
private final NestedIdentity child;
|
||||||
|
|
||||||
public NestedIdentity(String field, int offset, NestedIdentity child) {
|
public NestedIdentity(String field, int offset, NestedIdentity child) {
|
||||||
this.field = new Text(field);
|
this.field = new Text(field);
|
||||||
|
|
|
@ -582,6 +582,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return createSearchContext(request, timeout, true);
|
return createSearchContext(request, timeout, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
private DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout,
|
private DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout,
|
||||||
boolean assertAsyncActions)
|
boolean assertAsyncActions)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
@ -979,22 +980,31 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
||||||
* The action listener is guaranteed to be executed on the search thread-pool
|
* The action listener is guaranteed to be executed on the search thread-pool
|
||||||
*/
|
*/
|
||||||
private void rewriteShardRequest(ShardSearchRequest request, ActionListener<ShardSearchRequest> listener) {
|
private void rewriteShardRequest(ShardSearchRequest request, ActionListener<ShardSearchRequest> listener) {
|
||||||
|
ActionListener<Rewriteable> actionListener = ActionListener.wrap(r ->
|
||||||
|
threadPool.executor(Names.SEARCH).execute(new AbstractRunnable() {
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
listener.onFailure(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void doRun() throws Exception {
|
||||||
|
listener.onResponse(request);
|
||||||
|
}
|
||||||
|
}), listener::onFailure);
|
||||||
|
IndexShard shardOrNull = indicesService.getShardOrNull(request.shardId());
|
||||||
|
if (shardOrNull != null) {
|
||||||
|
// now we need to check if there is a pending refresh and register
|
||||||
|
ActionListener<Rewriteable> finalListener = actionListener;
|
||||||
|
actionListener = ActionListener.wrap(r ->
|
||||||
|
shardOrNull.awaitShardSearchActive(b -> finalListener.onResponse(r)), finalListener::onFailure);
|
||||||
|
}
|
||||||
// we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here for BWC as well as
|
// we also do rewrite on the coordinating node (TransportSearchService) but we also need to do it here for BWC as well as
|
||||||
// AliasFilters that might need to be rewritten. These are edge-cases but we are every efficient doing the rewrite here so it's not
|
// AliasFilters that might need to be rewritten. These are edge-cases but we are every efficient doing the rewrite here so it's not
|
||||||
// adding a lot of overhead
|
// adding a lot of overhead
|
||||||
Rewriteable.rewriteAndFetch(request.getRewriteable(), indicesService.getRewriteContext(request::nowInMillis),
|
Rewriteable.rewriteAndFetch(request.getRewriteable(), indicesService.getRewriteContext(request::nowInMillis), actionListener);
|
||||||
ActionListener.wrap(r ->
|
|
||||||
threadPool.executor(Names.SEARCH).execute(new AbstractRunnable() {
|
|
||||||
@Override
|
|
||||||
public void onFailure(Exception e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected void doRun() throws Exception {
|
|
||||||
listener.onResponse(request);
|
|
||||||
}
|
|
||||||
}), listener::onFailure));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1003,4 +1013,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
||||||
public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis) {
|
public QueryRewriteContext getRewriteContext(LongSupplier nowInMillis) {
|
||||||
return indicesService.getRewriteContext(nowInMillis);
|
return indicesService.getRewriteContext(nowInMillis);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public IndicesService getIndicesService() {
|
||||||
|
return indicesService;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.lucene.search.LeafCollector;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||||
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalMapping;
|
|
||||||
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
import org.elasticsearch.search.aggregations.support.ValuesSource;
|
||||||
import org.elasticsearch.search.sort.SortOrder;
|
import org.elasticsearch.search.sort.SortOrder;
|
||||||
|
|
||||||
|
@ -179,16 +178,10 @@ abstract class CompositeValuesSource<VS extends ValuesSource, T extends Comparab
|
||||||
if (lookup == null) {
|
if (lookup == null) {
|
||||||
lookup = dvs;
|
lookup = dvs;
|
||||||
if (topValue != null && topValueLong == null) {
|
if (topValue != null && topValueLong == null) {
|
||||||
if (lookup instanceof GlobalOrdinalMapping) {
|
topValueLong = lookup.lookupTerm(topValue);
|
||||||
// Find the global ordinal (or the insertion point) for the provided top value.
|
if (topValueLong < 0) {
|
||||||
topValueLong = lookupGlobalOrdinals((GlobalOrdinalMapping) lookup, topValue);
|
// convert negative insert position
|
||||||
} else {
|
topValueLong = -topValueLong - 2;
|
||||||
// Global ordinals are not needed, switch back to ordinals (single segment case).
|
|
||||||
topValueLong = lookup.lookupTerm(topValue);
|
|
||||||
if (topValueLong < 0) {
|
|
||||||
// convert negative insert position
|
|
||||||
topValueLong = -topValueLong - 2;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -202,25 +195,6 @@ abstract class CompositeValuesSource<VS extends ValuesSource, T extends Comparab
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
private static long lookupGlobalOrdinals(GlobalOrdinalMapping mapping, BytesRef key) throws IOException {
|
|
||||||
long low = 0;
|
|
||||||
long high = mapping.getValueCount();
|
|
||||||
|
|
||||||
while (low <= high) {
|
|
||||||
long mid = (low + high) >>> 1;
|
|
||||||
BytesRef midVal = mapping.lookupOrd(mid);
|
|
||||||
int cmp = midVal.compareTo(key);
|
|
||||||
if (cmp < 0) {
|
|
||||||
low = mid + 1;
|
|
||||||
} else if (cmp > 0) {
|
|
||||||
high = mid - 1;
|
|
||||||
} else {
|
|
||||||
return mid;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return low-1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -259,11 +259,6 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector impleme
|
||||||
return currentScore;
|
return currentScore;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int freq() throws IOException {
|
|
||||||
throw new ElasticsearchException("This caching scorer implementation only implements score() and docID()");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int docID() {
|
public int docID() {
|
||||||
return currentDocId;
|
return currentDocId;
|
||||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.util.IntArray;
|
||||||
import org.elasticsearch.common.util.LongHash;
|
import org.elasticsearch.common.util.LongHash;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues;
|
import org.elasticsearch.index.fielddata.AbstractSortedSetDocValues;
|
||||||
import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalMapping;
|
|
||||||
import org.elasticsearch.search.DocValueFormat;
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
import org.elasticsearch.search.aggregations.Aggregator;
|
import org.elasticsearch.search.aggregations.Aggregator;
|
||||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||||
|
@ -50,6 +49,7 @@ import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.function.LongUnaryOperator;
|
||||||
|
|
||||||
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
|
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
|
||||||
|
|
||||||
|
@ -295,9 +295,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||||
*/
|
*/
|
||||||
static class LowCardinality extends GlobalOrdinalsStringTermsAggregator {
|
static class LowCardinality extends GlobalOrdinalsStringTermsAggregator {
|
||||||
|
|
||||||
|
private LongUnaryOperator mapping;
|
||||||
private IntArray segmentDocCounts;
|
private IntArray segmentDocCounts;
|
||||||
private SortedSetDocValues globalOrds;
|
|
||||||
private SortedSetDocValues segmentOrds;
|
|
||||||
|
|
||||||
LowCardinality(String name,
|
LowCardinality(String name,
|
||||||
AggregatorFactories factories,
|
AggregatorFactories factories,
|
||||||
|
@ -321,14 +320,14 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||||
@Override
|
@Override
|
||||||
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
|
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
|
||||||
final LeafBucketCollector sub) throws IOException {
|
final LeafBucketCollector sub) throws IOException {
|
||||||
if (segmentOrds != null) {
|
if (mapping != null) {
|
||||||
mapSegmentCountsToGlobalCounts();
|
mapSegmentCountsToGlobalCounts(mapping);
|
||||||
}
|
}
|
||||||
globalOrds = valuesSource.globalOrdinalsValues(ctx);
|
final SortedSetDocValues segmentOrds = valuesSource.ordinalsValues(ctx);
|
||||||
segmentOrds = valuesSource.ordinalsValues(ctx);
|
|
||||||
segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount());
|
segmentDocCounts = context.bigArrays().grow(segmentDocCounts, 1 + segmentOrds.getValueCount());
|
||||||
assert sub == LeafBucketCollector.NO_OP_COLLECTOR;
|
assert sub == LeafBucketCollector.NO_OP_COLLECTOR;
|
||||||
final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds);
|
final SortedDocValues singleValues = DocValues.unwrapSingleton(segmentOrds);
|
||||||
|
mapping = valuesSource.globalOrdinalsMapping(ctx);
|
||||||
if (singleValues != null) {
|
if (singleValues != null) {
|
||||||
return new LeafBucketCollectorBase(sub, segmentOrds) {
|
return new LeafBucketCollectorBase(sub, segmentOrds) {
|
||||||
@Override
|
@Override
|
||||||
|
@ -356,9 +355,10 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void doPostCollection() {
|
protected void doPostCollection() throws IOException {
|
||||||
if (segmentOrds != null) {
|
if (mapping != null) {
|
||||||
mapSegmentCountsToGlobalCounts();
|
mapSegmentCountsToGlobalCounts(mapping);
|
||||||
|
mapping = null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -367,16 +367,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||||
Releasables.close(segmentDocCounts);
|
Releasables.close(segmentDocCounts);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void mapSegmentCountsToGlobalCounts() {
|
private void mapSegmentCountsToGlobalCounts(LongUnaryOperator mapping) throws IOException {
|
||||||
// There is no public method in Ordinals.Docs that allows for this mapping...
|
|
||||||
// This is the cleanest way I can think of so far
|
|
||||||
|
|
||||||
GlobalOrdinalMapping mapping;
|
|
||||||
if (globalOrds.getValueCount() == segmentOrds.getValueCount()) {
|
|
||||||
mapping = null;
|
|
||||||
} else {
|
|
||||||
mapping = (GlobalOrdinalMapping) globalOrds;
|
|
||||||
}
|
|
||||||
for (long i = 1; i < segmentDocCounts.size(); i++) {
|
for (long i = 1; i < segmentDocCounts.size(); i++) {
|
||||||
// We use set(...) here, because we need to reset the slow to 0.
|
// We use set(...) here, because we need to reset the slow to 0.
|
||||||
// segmentDocCounts get reused over the segments and otherwise counts would be too high.
|
// segmentDocCounts get reused over the segments and otherwise counts would be too high.
|
||||||
|
@ -385,7 +376,7 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
final long ord = i - 1; // remember we do +1 when counting
|
final long ord = i - 1; // remember we do +1 when counting
|
||||||
final long globalOrd = mapping == null ? ord : mapping.getGlobalOrd(ord);
|
final long globalOrd = mapping.applyAsLong(ord);
|
||||||
long bucketOrd = getBucketOrd(globalOrd);
|
long bucketOrd = getBucketOrd(globalOrd);
|
||||||
incrementBucketDocCount(bucketOrd, inc);
|
incrementBucketDocCount(bucketOrd, inc);
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,6 +49,8 @@ import java.util.Map;
|
||||||
public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource, TermsAggregatorFactory> {
|
public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource, TermsAggregatorFactory> {
|
||||||
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TermsAggregatorFactory.class));
|
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TermsAggregatorFactory.class));
|
||||||
|
|
||||||
|
static Boolean REMAP_GLOBAL_ORDS, COLLECT_SEGMENT_ORDS;
|
||||||
|
|
||||||
private final BucketOrder order;
|
private final BucketOrder order;
|
||||||
private final IncludeExclude includeExclude;
|
private final IncludeExclude includeExclude;
|
||||||
private final String executionHint;
|
private final String executionHint;
|
||||||
|
@ -258,10 +260,12 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
||||||
final long maxOrd = getMaxOrd(valuesSource, context.searcher());
|
final long maxOrd = getMaxOrd(valuesSource, context.searcher());
|
||||||
assert maxOrd != -1;
|
assert maxOrd != -1;
|
||||||
final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs());
|
final double ratio = maxOrd / ((double) context.searcher().getIndexReader().numDocs());
|
||||||
|
|
||||||
if (factories == AggregatorFactories.EMPTY &&
|
if (factories == AggregatorFactories.EMPTY &&
|
||||||
includeExclude == null &&
|
includeExclude == null &&
|
||||||
Aggregator.descendsFromBucketAggregator(parent) == false &&
|
Aggregator.descendsFromBucketAggregator(parent) == false &&
|
||||||
ratio <= 0.5 && maxOrd <= 2048) {
|
// we use the static COLLECT_SEGMENT_ORDS to allow tests to force specific optimizations
|
||||||
|
(COLLECT_SEGMENT_ORDS!= null ? COLLECT_SEGMENT_ORDS.booleanValue() : ratio <= 0.5 && maxOrd <= 2048)) {
|
||||||
/**
|
/**
|
||||||
* We can use the low cardinality execution mode iff this aggregator:
|
* We can use the low cardinality execution mode iff this aggregator:
|
||||||
* - has no sub-aggregator AND
|
* - has no sub-aggregator AND
|
||||||
|
@ -276,18 +280,24 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory<Values
|
||||||
|
|
||||||
}
|
}
|
||||||
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);
|
final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(format);
|
||||||
boolean remapGlobalOrds = true;
|
boolean remapGlobalOrds;
|
||||||
if (includeExclude == null &&
|
if (REMAP_GLOBAL_ORDS != null) {
|
||||||
Aggregator.descendsFromBucketAggregator(parent) == false &&
|
// We use REMAP_GLOBAL_ORDS to allow tests to force specific optimizations
|
||||||
(factories == AggregatorFactories.EMPTY ||
|
remapGlobalOrds = REMAP_GLOBAL_ORDS.booleanValue();
|
||||||
(isAggregationSort(order) == false && subAggCollectMode == SubAggCollectionMode.BREADTH_FIRST))) {
|
} else {
|
||||||
/**
|
remapGlobalOrds = true;
|
||||||
* We don't need to remap global ords iff this aggregator:
|
if (includeExclude == null &&
|
||||||
* - has no include/exclude rules AND
|
Aggregator.descendsFromBucketAggregator(parent) == false &&
|
||||||
* - is not a child of a bucket aggregator AND
|
(factories == AggregatorFactories.EMPTY ||
|
||||||
* - has no sub-aggregator or only sub-aggregator that can be deferred ({@link SubAggCollectionMode#BREADTH_FIRST}).
|
(isAggregationSort(order) == false && subAggCollectMode == SubAggCollectionMode.BREADTH_FIRST))) {
|
||||||
**/
|
/**
|
||||||
remapGlobalOrds = false;
|
* We don't need to remap global ords iff this aggregator:
|
||||||
|
* - has no include/exclude rules AND
|
||||||
|
* - is not a child of a bucket aggregator AND
|
||||||
|
* - has no sub-aggregator or only sub-aggregator that can be deferred ({@link SubAggCollectionMode#BREADTH_FIRST}).
|
||||||
|
**/
|
||||||
|
remapGlobalOrds = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource, order,
|
return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals) valuesSource, order,
|
||||||
format, bucketCountThresholds, filter, context, parent, remapGlobalOrds, subAggCollectMode, showTermDocCountError,
|
format, bucketCountThresholds, filter, context, parent, remapGlobalOrds, subAggCollectMode, showTermDocCountError,
|
||||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
|
||||||
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.function.LongUnaryOperator;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Utility class that allows to return views of {@link ValuesSource}s that
|
* Utility class that allows to return views of {@link ValuesSource}s that
|
||||||
|
@ -201,6 +202,13 @@ public enum MissingValues {
|
||||||
SortedSetDocValues values = valuesSource.globalOrdinalsValues(context);
|
SortedSetDocValues values = valuesSource.globalOrdinalsValues(context);
|
||||||
return replaceMissing(values, missing);
|
return replaceMissing(values, missing);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LongUnaryOperator globalOrdinalsMapping(LeafReaderContext context) throws IOException {
|
||||||
|
return getGlobalMapping(valuesSource.ordinalsValues(context),
|
||||||
|
valuesSource.globalOrdinalsValues(context),
|
||||||
|
valuesSource.globalOrdinalsMapping(context), missing);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -311,6 +319,43 @@ public enum MissingValues {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static LongUnaryOperator getGlobalMapping(SortedSetDocValues values, SortedSetDocValues globalValues,
|
||||||
|
LongUnaryOperator segmentToGlobalOrd, BytesRef missing) throws IOException {
|
||||||
|
final long missingGlobalOrd = globalValues.lookupTerm(missing);
|
||||||
|
final long missingSegmentOrd = values.lookupTerm(missing);
|
||||||
|
|
||||||
|
if (missingSegmentOrd >= 0) {
|
||||||
|
// the missing value exists in the segment, nothing to do
|
||||||
|
return segmentToGlobalOrd;
|
||||||
|
} else if (missingGlobalOrd >= 0) {
|
||||||
|
// the missing value exists in another segment, but not the current one
|
||||||
|
final long insertedSegmentOrd = -1L - missingSegmentOrd;
|
||||||
|
final long insertedGlobalOrd = missingGlobalOrd;
|
||||||
|
return segmentOrd -> {
|
||||||
|
if (insertedSegmentOrd == segmentOrd) {
|
||||||
|
return insertedGlobalOrd;
|
||||||
|
} else if (insertedSegmentOrd > segmentOrd) {
|
||||||
|
return segmentToGlobalOrd.applyAsLong(segmentOrd);
|
||||||
|
} else {
|
||||||
|
return segmentToGlobalOrd.applyAsLong(segmentOrd - 1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
// the missing value exists neither in this segment nor in another segment
|
||||||
|
final long insertedSegmentOrd = -1L - missingSegmentOrd;
|
||||||
|
final long insertedGlobalOrd = -1L - missingGlobalOrd;
|
||||||
|
return segmentOrd -> {
|
||||||
|
if (insertedSegmentOrd == segmentOrd) {
|
||||||
|
return insertedGlobalOrd;
|
||||||
|
} else if (insertedSegmentOrd > segmentOrd) {
|
||||||
|
return segmentToGlobalOrd.applyAsLong(segmentOrd);
|
||||||
|
} else {
|
||||||
|
return 1 + segmentToGlobalOrd.applyAsLong(segmentOrd - 1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public static ValuesSource.GeoPoint replaceMissing(final ValuesSource.GeoPoint valuesSource, final GeoPoint missing) {
|
public static ValuesSource.GeoPoint replaceMissing(final ValuesSource.GeoPoint valuesSource, final GeoPoint missing) {
|
||||||
return new ValuesSource.GeoPoint() {
|
return new ValuesSource.GeoPoint() {
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import org.apache.lucene.index.DirectoryReader;
|
||||||
import org.apache.lucene.index.DocValues;
|
import org.apache.lucene.index.DocValues;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.index.LeafReaderContext;
|
import org.apache.lucene.index.LeafReaderContext;
|
||||||
|
import org.apache.lucene.index.OrdinalMap;
|
||||||
import org.apache.lucene.index.SortedNumericDocValues;
|
import org.apache.lucene.index.SortedNumericDocValues;
|
||||||
import org.apache.lucene.index.SortedSetDocValues;
|
import org.apache.lucene.index.SortedSetDocValues;
|
||||||
import org.apache.lucene.search.IndexSearcher;
|
import org.apache.lucene.search.IndexSearcher;
|
||||||
|
@ -47,6 +48,7 @@ import org.elasticsearch.search.aggregations.support.values.ScriptDoubleValues;
|
||||||
import org.elasticsearch.search.aggregations.support.values.ScriptLongValues;
|
import org.elasticsearch.search.aggregations.support.values.ScriptLongValues;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.function.LongUnaryOperator;
|
||||||
|
|
||||||
public abstract class ValuesSource {
|
public abstract class ValuesSource {
|
||||||
|
|
||||||
|
@ -90,6 +92,11 @@ public abstract class ValuesSource {
|
||||||
return org.elasticsearch.index.fielddata.FieldData.emptySortedBinary();
|
return org.elasticsearch.index.fielddata.FieldData.emptySortedBinary();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LongUnaryOperator globalOrdinalsMapping(LeafReaderContext context) throws IOException {
|
||||||
|
return LongUnaryOperator.identity();
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -105,6 +112,10 @@ public abstract class ValuesSource {
|
||||||
public abstract SortedSetDocValues globalOrdinalsValues(LeafReaderContext context)
|
public abstract SortedSetDocValues globalOrdinalsValues(LeafReaderContext context)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
/** Returns a mapping from segment ordinals to global ordinals. */
|
||||||
|
public abstract LongUnaryOperator globalOrdinalsMapping(LeafReaderContext context)
|
||||||
|
throws IOException;
|
||||||
|
|
||||||
public long globalMaxOrd(IndexSearcher indexSearcher) throws IOException {
|
public long globalMaxOrd(IndexSearcher indexSearcher) throws IOException {
|
||||||
IndexReader indexReader = indexSearcher.getIndexReader();
|
IndexReader indexReader = indexSearcher.getIndexReader();
|
||||||
if (indexReader.leaves().isEmpty()) {
|
if (indexReader.leaves().isEmpty()) {
|
||||||
|
@ -142,6 +153,18 @@ public abstract class ValuesSource {
|
||||||
final AtomicOrdinalsFieldData atomicFieldData = global.load(context);
|
final AtomicOrdinalsFieldData atomicFieldData = global.load(context);
|
||||||
return atomicFieldData.getOrdinalsValues();
|
return atomicFieldData.getOrdinalsValues();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public LongUnaryOperator globalOrdinalsMapping(LeafReaderContext context) throws IOException {
|
||||||
|
final IndexOrdinalsFieldData global = indexFieldData.loadGlobal((DirectoryReader)context.parent.reader());
|
||||||
|
final OrdinalMap map = global.getOrdinalMap();
|
||||||
|
if (map == null) {
|
||||||
|
// segments and global ordinals are the same
|
||||||
|
return LongUnaryOperator.identity();
|
||||||
|
}
|
||||||
|
final org.apache.lucene.util.LongValues segmentToGlobalOrd = map.getGlobalOrds(context.ord);
|
||||||
|
return segmentToGlobalOrd::get;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,21 +20,16 @@
|
||||||
package org.elasticsearch.search.fetch.subphase;
|
package org.elasticsearch.search.fetch.subphase;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
|
||||||
import org.elasticsearch.search.SearchHit;
|
import org.elasticsearch.search.SearchHit;
|
||||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||||
import org.elasticsearch.search.internal.SearchContext;
|
import org.elasticsearch.search.internal.SearchContext;
|
||||||
import org.elasticsearch.search.lookup.SourceLookup;
|
import org.elasticsearch.search.lookup.SourceLookup;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.UncheckedIOException;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder;
|
|
||||||
|
|
||||||
public final class FetchSourceSubPhase implements FetchSubPhase {
|
public final class FetchSourceSubPhase implements FetchSubPhase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -65,7 +60,17 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
|
||||||
final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length());
|
final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length());
|
||||||
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
|
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
|
||||||
XContentBuilder builder = new XContentBuilder(source.sourceContentType().xContent(), streamOutput);
|
XContentBuilder builder = new XContentBuilder(source.sourceContentType().xContent(), streamOutput);
|
||||||
builder.value(value);
|
if (value != null) {
|
||||||
|
builder.value(value);
|
||||||
|
} else {
|
||||||
|
// This happens if the source filtering could not find the specified in the _source.
|
||||||
|
// Just doing `builder.value(null)` is valid, but the xcontent validation can't detect what format
|
||||||
|
// it is. In certain cases, for example response serialization we fail if no xcontent type can't be
|
||||||
|
// detected. So instead we just return an empty top level object. Also this is in inline with what was
|
||||||
|
// being return in this situation in 5.x and earlier.
|
||||||
|
builder.startObject();
|
||||||
|
builder.endObject();
|
||||||
|
}
|
||||||
hitContext.hit().sourceRef(builder.bytes());
|
hitContext.hit().sourceRef(builder.bytes());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new ElasticsearchException("Error filtering source", e);
|
throw new ElasticsearchException("Error filtering source", e);
|
||||||
|
|
|
@ -170,6 +170,11 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
|
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
|
||||||
BulkScorer in = weight.bulkScorer(context);
|
BulkScorer in = weight.bulkScorer(context);
|
||||||
|
|
|
@ -63,11 +63,6 @@ final class ProfileScorer extends Scorer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int freq() throws IOException {
|
|
||||||
return scorer.freq();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Weight getWeight() {
|
public Weight getWeight() {
|
||||||
return profileWeight;
|
return profileWeight;
|
||||||
|
|
|
@ -118,4 +118,9 @@ public final class ProfileWeight extends Weight {
|
||||||
subQueryWeight.extractTerms(set);
|
subQueryWeight.extractTerms(set);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
return subQueryWeight.isCacheable(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,6 +77,11 @@ public final class DocValuesSliceQuery extends SliceQuery {
|
||||||
return new ConstantScoreScorer(this, score(), twoPhase);
|
return new ConstantScoreScorer(this, score(), twoPhase);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
return DocValues.isCacheable(ctx, getField());
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,6 +63,11 @@ public final class TermsSliceQuery extends SliceQuery {
|
||||||
final DocIdSetIterator leafIt = disi.iterator();
|
final DocIdSetIterator leafIt = disi.iterator();
|
||||||
return new ConstantScoreScorer(this, score(), leafIt);
|
return new ConstantScoreScorer(this, score(), leafIt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCacheable(LeafReaderContext ctx) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,7 +43,7 @@ import java.util.zip.DeflaterOutputStream;
|
||||||
* {@link CompressibleBytesOutputStream#close()} should be called when the bytes are no longer needed and
|
* {@link CompressibleBytesOutputStream#close()} should be called when the bytes are no longer needed and
|
||||||
* can be safely released.
|
* can be safely released.
|
||||||
*/
|
*/
|
||||||
final class CompressibleBytesOutputStream extends StreamOutput implements Releasable {
|
final class CompressibleBytesOutputStream extends StreamOutput {
|
||||||
|
|
||||||
private final StreamOutput stream;
|
private final StreamOutput stream;
|
||||||
private final BytesStream bytesStreamOutput;
|
private final BytesStream bytesStreamOutput;
|
||||||
|
@ -92,18 +92,18 @@ final class CompressibleBytesOutputStream extends StreamOutput implements Releas
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void close() {
|
public void close() throws IOException {
|
||||||
if (stream == bytesStreamOutput) {
|
if (stream == bytesStreamOutput) {
|
||||||
assert shouldCompress == false : "If the streams are the same we should not be compressing";
|
assert shouldCompress == false : "If the streams are the same we should not be compressing";
|
||||||
IOUtils.closeWhileHandlingException(stream);
|
IOUtils.close(stream);
|
||||||
} else {
|
} else {
|
||||||
assert shouldCompress : "If the streams are different we should be compressing";
|
assert shouldCompress : "If the streams are different we should be compressing";
|
||||||
IOUtils.closeWhileHandlingException(stream, bytesStreamOutput);
|
IOUtils.close(stream, bytesStreamOutput);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void reset() throws IOException {
|
public void reset() throws IOException {
|
||||||
stream.reset();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,6 @@ import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
|
||||||
import org.elasticsearch.common.metrics.CounterMetric;
|
import org.elasticsearch.common.metrics.CounterMetric;
|
||||||
import org.elasticsearch.common.metrics.MeanMetric;
|
import org.elasticsearch.common.metrics.MeanMetric;
|
||||||
import org.elasticsearch.common.network.NetworkAddress;
|
import org.elasticsearch.common.network.NetworkAddress;
|
||||||
|
@ -73,8 +72,10 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.StreamCorruptedException;
|
import java.io.StreamCorruptedException;
|
||||||
|
import java.io.UncheckedIOException;
|
||||||
import java.net.BindException;
|
import java.net.BindException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
@ -1704,29 +1705,36 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
||||||
|
|
||||||
private final class SendListener extends SendMetricListener {
|
private final class SendListener extends SendMetricListener {
|
||||||
private final TcpChannel channel;
|
private final TcpChannel channel;
|
||||||
private final Releasable optionalReleasable;
|
private final Closeable optionalCloseable;
|
||||||
private final Runnable transportAdaptorCallback;
|
private final Runnable transportAdaptorCallback;
|
||||||
|
|
||||||
private SendListener(TcpChannel channel, Releasable optionalReleasable, Runnable transportAdaptorCallback, long messageLength) {
|
private SendListener(TcpChannel channel, Closeable optionalCloseable, Runnable transportAdaptorCallback, long messageLength) {
|
||||||
super(messageLength);
|
super(messageLength);
|
||||||
this.channel = channel;
|
this.channel = channel;
|
||||||
this.optionalReleasable = optionalReleasable;
|
this.optionalCloseable = optionalCloseable;
|
||||||
this.transportAdaptorCallback = transportAdaptorCallback;
|
this.transportAdaptorCallback = transportAdaptorCallback;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void innerInnerOnResponse(Void v) {
|
protected void innerInnerOnResponse(Void v) {
|
||||||
release();
|
closeAndCallback(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void innerOnFailure(Exception e) {
|
protected void innerOnFailure(Exception e) {
|
||||||
logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e);
|
logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e);
|
||||||
release();
|
closeAndCallback(e);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void release() {
|
private void closeAndCallback(final Exception e) {
|
||||||
Releasables.close(optionalReleasable, transportAdaptorCallback::run);
|
try {
|
||||||
|
IOUtils.close(optionalCloseable, transportAdaptorCallback::run);
|
||||||
|
} catch (final IOException inner) {
|
||||||
|
if (e != null) {
|
||||||
|
inner.addSuppressed(e);
|
||||||
|
}
|
||||||
|
throw new UncheckedIOException(inner);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -45,7 +45,6 @@ import static org.hamcrest.CoreMatchers.containsString;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||||
import static org.hamcrest.Matchers.hasToString;
|
import static org.hamcrest.Matchers.hasToString;
|
||||||
import static org.mockito.Matchers.eq;
|
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.verify;
|
import static org.mockito.Mockito.verify;
|
||||||
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
import static org.mockito.Mockito.verifyNoMoreInteractions;
|
||||||
|
@ -690,6 +689,26 @@ public class BootstrapChecksTests extends ESTestCase {
|
||||||
BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonJava8Check), "testG1GCCheck");
|
BootstrapChecks.check(defaultContext, true, Collections.singletonList(nonJava8Check), "testG1GCCheck");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testAllPermissionCheck() throws NodeValidationException {
|
||||||
|
final AtomicBoolean isAllPermissionGranted = new AtomicBoolean(true);
|
||||||
|
final BootstrapChecks.AllPermissionCheck allPermissionCheck = new BootstrapChecks.AllPermissionCheck() {
|
||||||
|
@Override
|
||||||
|
boolean isAllPermissionGranted() {
|
||||||
|
return isAllPermissionGranted.get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
final List<BootstrapCheck> checks = Collections.singletonList(allPermissionCheck);
|
||||||
|
final NodeValidationException e = expectThrows(
|
||||||
|
NodeValidationException.class,
|
||||||
|
() -> BootstrapChecks.check(defaultContext, true, checks, "testIsAllPermissionCheck"));
|
||||||
|
assertThat(e, hasToString(containsString("granting the all permission effectively disables security")));
|
||||||
|
|
||||||
|
// if all permissions are not granted, nothing should happen
|
||||||
|
isAllPermissionGranted.set(false);
|
||||||
|
BootstrapChecks.check(defaultContext, true, checks, "testIsAllPermissionCheck");
|
||||||
|
}
|
||||||
|
|
||||||
public void testAlwaysEnforcedChecks() {
|
public void testAlwaysEnforcedChecks() {
|
||||||
final BootstrapCheck check = new BootstrapCheck() {
|
final BootstrapCheck check = new BootstrapCheck() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -28,7 +28,7 @@ public class CommandTests extends ESTestCase {
|
||||||
static class UserErrorCommand extends Command {
|
static class UserErrorCommand extends Command {
|
||||||
|
|
||||||
UserErrorCommand() {
|
UserErrorCommand() {
|
||||||
super("Throws a user error");
|
super("Throws a user error", () -> {});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -46,7 +46,7 @@ public class CommandTests extends ESTestCase {
|
||||||
static class UsageErrorCommand extends Command {
|
static class UsageErrorCommand extends Command {
|
||||||
|
|
||||||
UsageErrorCommand() {
|
UsageErrorCommand() {
|
||||||
super("Throws a usage error");
|
super("Throws a usage error", () -> {});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -66,7 +66,7 @@ public class CommandTests extends ESTestCase {
|
||||||
boolean executed = false;
|
boolean executed = false;
|
||||||
|
|
||||||
NoopCommand() {
|
NoopCommand() {
|
||||||
super("Does nothing");
|
super("Does nothing", () -> {});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -26,13 +26,13 @@ public class MultiCommandTests extends CommandTestCase {
|
||||||
|
|
||||||
static class DummyMultiCommand extends MultiCommand {
|
static class DummyMultiCommand extends MultiCommand {
|
||||||
DummyMultiCommand() {
|
DummyMultiCommand() {
|
||||||
super("A dummy multi command");
|
super("A dummy multi command", () -> {});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static class DummySubCommand extends Command {
|
static class DummySubCommand extends Command {
|
||||||
DummySubCommand() {
|
DummySubCommand() {
|
||||||
super("A dummy subcommand");
|
super("A dummy subcommand", () -> {});
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
protected void execute(Terminal terminal, OptionSet options) throws Exception {
|
||||||
|
|
|
@ -103,11 +103,6 @@ public class MinScoreScorerTests extends LuceneTestCase {
|
||||||
final int idx = Arrays.binarySearch(docs, docID());
|
final int idx = Arrays.binarySearch(docs, docID());
|
||||||
return scores[idx];
|
return scores[idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public int freq() throws IOException {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -60,23 +60,20 @@ public class CombinedDeletionPolicyTests extends ESTestCase {
|
||||||
EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
|
EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
|
||||||
List<IndexCommit> commitList = new ArrayList<>();
|
List<IndexCommit> commitList = new ArrayList<>();
|
||||||
long count = randomIntBetween(10, 20);
|
long count = randomIntBetween(10, 20);
|
||||||
long minGen = Long.MAX_VALUE;
|
long lastGen = 0;
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
long lastGen = randomIntBetween(10, 20000);
|
lastGen += randomIntBetween(10, 20000);
|
||||||
minGen = Math.min(minGen, lastGen);
|
|
||||||
commitList.add(mockIndexCommitWithTranslogGen(lastGen));
|
commitList.add(mockIndexCommitWithTranslogGen(lastGen));
|
||||||
}
|
}
|
||||||
combinedDeletionPolicy.onInit(commitList);
|
combinedDeletionPolicy.onInit(commitList);
|
||||||
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(minGen);
|
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(lastGen);
|
||||||
commitList.clear();
|
commitList.clear();
|
||||||
minGen = Long.MAX_VALUE;
|
|
||||||
for (int i = 0; i < count; i++) {
|
for (int i = 0; i < count; i++) {
|
||||||
long lastGen = randomIntBetween(10, 20000);
|
lastGen += randomIntBetween(10, 20000);
|
||||||
minGen = Math.min(minGen, lastGen);
|
|
||||||
commitList.add(mockIndexCommitWithTranslogGen(lastGen));
|
commitList.add(mockIndexCommitWithTranslogGen(lastGen));
|
||||||
}
|
}
|
||||||
combinedDeletionPolicy.onCommit(commitList);
|
combinedDeletionPolicy.onCommit(commitList);
|
||||||
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(minGen);
|
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(lastGen);
|
||||||
}
|
}
|
||||||
|
|
||||||
IndexCommit mockIndexCommitWithTranslogGen(long gen) throws IOException {
|
IndexCommit mockIndexCommitWithTranslogGen(long gen) throws IOException {
|
||||||
|
|
|
@ -174,7 +174,7 @@ public class InternalEngineTests extends EngineTestCase {
|
||||||
|
|
||||||
public void testSegments() throws Exception {
|
public void testSegments() throws Exception {
|
||||||
try (Store store = createStore();
|
try (Store store = createStore();
|
||||||
Engine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
|
InternalEngine engine = createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE)) {
|
||||||
List<Segment> segments = engine.segments(false);
|
List<Segment> segments = engine.segments(false);
|
||||||
assertThat(segments.isEmpty(), equalTo(true));
|
assertThat(segments.isEmpty(), equalTo(true));
|
||||||
assertThat(engine.segmentsStats(false).getCount(), equalTo(0L));
|
assertThat(engine.segmentsStats(false).getCount(), equalTo(0L));
|
||||||
|
@ -290,6 +290,69 @@ public class InternalEngineTests extends EngineTestCase {
|
||||||
assertThat(segments.get(2).getNumDocs(), equalTo(1));
|
assertThat(segments.get(2).getNumDocs(), equalTo(1));
|
||||||
assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
|
assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
|
||||||
assertThat(segments.get(2).isCompound(), equalTo(true));
|
assertThat(segments.get(2).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
// internal refresh - lets make sure we see those segments in the stats
|
||||||
|
ParsedDocument doc5 = testParsedDocument("5", null, testDocumentWithTextField(), B_3, null);
|
||||||
|
engine.index(indexForDoc(doc5));
|
||||||
|
engine.refresh("test", Engine.SearcherScope.INTERNAL);
|
||||||
|
|
||||||
|
segments = engine.segments(false);
|
||||||
|
assertThat(segments.size(), equalTo(4));
|
||||||
|
assertThat(engine.segmentsStats(false).getCount(), equalTo(4L));
|
||||||
|
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||||
|
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||||
|
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(0).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(0).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
assertThat(segments.get(1).isCommitted(), equalTo(false));
|
||||||
|
assertThat(segments.get(1).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(1).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
|
||||||
|
assertThat(segments.get(1).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
assertThat(segments.get(2).isCommitted(), equalTo(false));
|
||||||
|
assertThat(segments.get(2).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(2).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
|
||||||
|
assertThat(segments.get(2).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
assertThat(segments.get(3).isCommitted(), equalTo(false));
|
||||||
|
assertThat(segments.get(3).isSearch(), equalTo(false));
|
||||||
|
assertThat(segments.get(3).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(3).getDeletedDocs(), equalTo(0));
|
||||||
|
assertThat(segments.get(3).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
// now refresh the external searcher and make sure it has the new segment
|
||||||
|
engine.refresh("test");
|
||||||
|
segments = engine.segments(false);
|
||||||
|
assertThat(segments.size(), equalTo(4));
|
||||||
|
assertThat(engine.segmentsStats(false).getCount(), equalTo(4L));
|
||||||
|
assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true));
|
||||||
|
assertThat(segments.get(0).isCommitted(), equalTo(true));
|
||||||
|
assertThat(segments.get(0).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(0).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(0).getDeletedDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(0).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
assertThat(segments.get(1).isCommitted(), equalTo(false));
|
||||||
|
assertThat(segments.get(1).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(1).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(1).getDeletedDocs(), equalTo(0));
|
||||||
|
assertThat(segments.get(1).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
assertThat(segments.get(2).isCommitted(), equalTo(false));
|
||||||
|
assertThat(segments.get(2).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(2).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(2).getDeletedDocs(), equalTo(0));
|
||||||
|
assertThat(segments.get(2).isCompound(), equalTo(true));
|
||||||
|
|
||||||
|
assertThat(segments.get(3).isCommitted(), equalTo(false));
|
||||||
|
assertThat(segments.get(3).isSearch(), equalTo(true));
|
||||||
|
assertThat(segments.get(3).getNumDocs(), equalTo(1));
|
||||||
|
assertThat(segments.get(3).getDeletedDocs(), equalTo(0));
|
||||||
|
assertThat(segments.get(3).isCompound(), equalTo(true));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,11 +50,6 @@ public class ScriptQueryBuilderTests extends AbstractQueryTestCase<ScriptQueryBu
|
||||||
@Override
|
@Override
|
||||||
protected void doAssertLuceneQuery(ScriptQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException {
|
protected void doAssertLuceneQuery(ScriptQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException {
|
||||||
assertThat(query, instanceOf(ScriptQueryBuilder.ScriptQuery.class));
|
assertThat(query, instanceOf(ScriptQueryBuilder.ScriptQuery.class));
|
||||||
// make sure the query would not get cached
|
|
||||||
ScriptQuery sQuery = (ScriptQuery) query;
|
|
||||||
ScriptQuery clone = new ScriptQuery(sQuery.script, sQuery.filterScript);
|
|
||||||
assertFalse(sQuery.equals(clone));
|
|
||||||
assertFalse(sQuery.hashCode() == clone.hashCode());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIllegalConstructorArg() {
|
public void testIllegalConstructorArg() {
|
||||||
|
|
|
@ -46,16 +46,23 @@ import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||||
import org.hamcrest.Matcher;
|
import org.hamcrest.Matcher;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
|
import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder;
|
||||||
import static org.hamcrest.Matchers.anyOf;
|
import static org.hamcrest.Matchers.anyOf;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.greaterThan;
|
||||||
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
import static org.hamcrest.Matchers.instanceOf;
|
import static org.hamcrest.Matchers.instanceOf;
|
||||||
|
import static org.hamcrest.Matchers.notNullValue;
|
||||||
|
import static org.hamcrest.Matchers.nullValue;
|
||||||
|
import static org.hamcrest.core.Is.is;
|
||||||
|
|
||||||
public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase {
|
public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase {
|
||||||
|
|
||||||
|
@ -299,6 +306,68 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testSeqNoCollision() throws Exception {
|
||||||
|
try (ReplicationGroup shards = createGroup(2)) {
|
||||||
|
shards.startAll();
|
||||||
|
int initDocs = shards.indexDocs(randomInt(10));
|
||||||
|
List<IndexShard> replicas = shards.getReplicas();
|
||||||
|
IndexShard replica1 = replicas.get(0);
|
||||||
|
IndexShard replica2 = replicas.get(1);
|
||||||
|
shards.syncGlobalCheckpoint();
|
||||||
|
|
||||||
|
logger.info("--> Isolate replica1");
|
||||||
|
IndexRequest indexDoc1 = new IndexRequest(index.getName(), "type", "d1").source("{}", XContentType.JSON);
|
||||||
|
BulkShardRequest replicationRequest = indexOnPrimary(indexDoc1, shards.getPrimary());
|
||||||
|
indexOnReplica(replicationRequest, replica2);
|
||||||
|
|
||||||
|
final Translog.Operation op1;
|
||||||
|
final List<Translog.Operation> initOperations = new ArrayList<>(initDocs);
|
||||||
|
try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) {
|
||||||
|
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
||||||
|
for (int i = 0; i < initDocs; i++) {
|
||||||
|
Translog.Operation op = snapshot.next();
|
||||||
|
assertThat(op, is(notNullValue()));
|
||||||
|
initOperations.add(op);
|
||||||
|
}
|
||||||
|
op1 = snapshot.next();
|
||||||
|
assertThat(op1, notNullValue());
|
||||||
|
assertThat(snapshot.next(), nullValue());
|
||||||
|
assertThat(snapshot.overriddenOperations(), equalTo(0));
|
||||||
|
}
|
||||||
|
// Make sure that replica2 receives translog ops (eg. op2) from replica1 and overwrites its stale operation (op1).
|
||||||
|
logger.info("--> Promote replica1 as the primary");
|
||||||
|
shards.promoteReplicaToPrimary(replica1).get(); // wait until resync completed.
|
||||||
|
shards.index(new IndexRequest(index.getName(), "type", "d2").source("{}", XContentType.JSON));
|
||||||
|
final Translog.Operation op2;
|
||||||
|
try (Translog.Snapshot snapshot = replica2.getTranslog().newSnapshot()) {
|
||||||
|
assertThat(snapshot.totalOperations(), equalTo(initDocs + 2));
|
||||||
|
op2 = snapshot.next();
|
||||||
|
assertThat(op2.seqNo(), equalTo(op1.seqNo()));
|
||||||
|
assertThat(op2.primaryTerm(), greaterThan(op1.primaryTerm()));
|
||||||
|
assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations));
|
||||||
|
assertThat(snapshot.overriddenOperations(), equalTo(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that peer-recovery transfers all but non-overridden operations.
|
||||||
|
IndexShard replica3 = shards.addReplica();
|
||||||
|
logger.info("--> Promote replica2 as the primary");
|
||||||
|
shards.promoteReplicaToPrimary(replica2);
|
||||||
|
logger.info("--> Recover replica3 from replica2");
|
||||||
|
recoverReplica(replica3, replica2);
|
||||||
|
try (Translog.Snapshot snapshot = replica3.getTranslog().newSnapshot()) {
|
||||||
|
assertThat(snapshot.totalOperations(), equalTo(initDocs + 1));
|
||||||
|
assertThat(snapshot.next(), equalTo(op2));
|
||||||
|
assertThat("Remaining of snapshot should contain init operations", snapshot, containsOperationsInAnyOrder(initOperations));
|
||||||
|
assertThat("Peer-recovery should not send overridden operations", snapshot.overriddenOperations(), equalTo(0));
|
||||||
|
}
|
||||||
|
// TODO: We should assert the content of shards in the ReplicationGroup.
|
||||||
|
// Without rollback replicas(current implementation), we don't have the same content across shards:
|
||||||
|
// - replica1 has {doc1}
|
||||||
|
// - replica2 has {doc1, doc2}
|
||||||
|
// - replica3 can have either {doc2} only if operation-based recovery or {doc1, doc2} if file-based recovery
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** Throws <code>documentFailure</code> on every indexing operation */
|
/** Throws <code>documentFailure</code> on every indexing operation */
|
||||||
static class ThrowingDocumentFailureEngineFactory implements EngineFactory {
|
static class ThrowingDocumentFailureEngineFactory implements EngineFactory {
|
||||||
final String documentFailureMessage;
|
final String documentFailureMessage;
|
||||||
|
|
|
@ -18,14 +18,14 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.index.shard;
|
package org.elasticsearch.index.shard;
|
||||||
|
|
||||||
import org.apache.lucene.document.Field;
|
|
||||||
import org.apache.lucene.document.NumericDocValuesField;
|
|
||||||
import org.apache.lucene.store.LockObtainFailedException;
|
import org.apache.lucene.store.LockObtainFailedException;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.stats.IndexStats;
|
import org.elasticsearch.action.admin.indices.stats.IndexStats;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
|
import org.elasticsearch.action.index.IndexResponse;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
import org.elasticsearch.action.support.IndicesOptions;
|
import org.elasticsearch.action.support.IndicesOptions;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
|
@ -41,11 +41,11 @@ import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||||
import org.elasticsearch.cluster.service.ClusterService;
|
import org.elasticsearch.cluster.service.ClusterService;
|
||||||
import org.elasticsearch.common.CheckedRunnable;
|
import org.elasticsearch.common.CheckedRunnable;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
|
||||||
import org.elasticsearch.common.lucene.uid.Versions;
|
import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.env.NodeEnvironment;
|
import org.elasticsearch.env.NodeEnvironment;
|
||||||
|
@ -56,11 +56,6 @@ import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.flush.FlushStats;
|
import org.elasticsearch.index.flush.FlushStats;
|
||||||
import org.elasticsearch.index.mapper.IdFieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.Mapping;
|
|
||||||
import org.elasticsearch.index.mapper.ParseContext;
|
|
||||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
|
||||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
|
||||||
import org.elasticsearch.index.mapper.SourceToParse;
|
import org.elasticsearch.index.mapper.SourceToParse;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
|
@ -82,8 +77,10 @@ import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.BrokenBarrierException;
|
import java.util.concurrent.BrokenBarrierException;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.CyclicBarrier;
|
import java.util.concurrent.CyclicBarrier;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
|
||||||
|
@ -97,6 +94,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||||
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoSearchHits;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
public class IndexShardIT extends ESSingleNodeTestCase {
|
public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
|
@ -106,21 +104,6 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
return pluginList(InternalSettingsPlugin.class);
|
return pluginList(InternalSettingsPlugin.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
private ParsedDocument testParsedDocument(String id, String type, String routing, long seqNo,
|
|
||||||
ParseContext.Document document, BytesReference source, XContentType xContentType,
|
|
||||||
Mapping mappingUpdate) {
|
|
||||||
Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE);
|
|
||||||
Field versionField = new NumericDocValuesField("_version", 0);
|
|
||||||
SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
|
|
||||||
document.add(uidField);
|
|
||||||
document.add(versionField);
|
|
||||||
document.add(seqID.seqNo);
|
|
||||||
document.add(seqID.seqNoDocValue);
|
|
||||||
document.add(seqID.primaryTerm);
|
|
||||||
return new ParsedDocument(versionField, seqID, id, type, routing,
|
|
||||||
Collections.singletonList(document), source, xContentType, mappingUpdate);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testLockTryingToDelete() throws Exception {
|
public void testLockTryingToDelete() throws Exception {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -550,4 +533,96 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||||
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE);
|
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE);
|
||||||
return shardRouting;
|
return shardRouting;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testAutomaticRefresh() throws InterruptedException {
|
||||||
|
TimeValue randomTimeValue = randomFrom(random(), null, TimeValue.ZERO, TimeValue.timeValueMillis(randomIntBetween(0, 1000)));
|
||||||
|
Settings.Builder builder = Settings.builder();
|
||||||
|
if (randomTimeValue != null) {
|
||||||
|
builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), randomTimeValue);
|
||||||
|
}
|
||||||
|
IndexService indexService = createIndex("test", builder.build());
|
||||||
|
assertFalse(indexService.getIndexSettings().isExplicitRefresh());
|
||||||
|
ensureGreen();
|
||||||
|
AtomicInteger totalNumDocs = new AtomicInteger(Integer.MAX_VALUE);
|
||||||
|
CountDownLatch started = new CountDownLatch(1);
|
||||||
|
Thread t = new Thread(() -> {
|
||||||
|
SearchResponse searchResponse;
|
||||||
|
started.countDown();
|
||||||
|
do {
|
||||||
|
searchResponse = client().prepareSearch().get();
|
||||||
|
} while (searchResponse.getHits().totalHits != totalNumDocs.get());
|
||||||
|
});
|
||||||
|
t.start();
|
||||||
|
started.await();
|
||||||
|
assertNoSearchHits(client().prepareSearch().get());
|
||||||
|
int numDocs = scaledRandomIntBetween(25, 100);
|
||||||
|
totalNumDocs.set(numDocs);
|
||||||
|
CountDownLatch indexingDone = new CountDownLatch(numDocs);
|
||||||
|
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
|
||||||
|
indexingDone.countDown(); // one doc is indexed above blocking
|
||||||
|
IndexShard shard = indexService.getShard(0);
|
||||||
|
boolean hasRefreshed = shard.scheduledRefresh();
|
||||||
|
if (randomTimeValue == TimeValue.ZERO) {
|
||||||
|
// with ZERO we are guaranteed to see the doc since we will wait for a refresh in the background
|
||||||
|
assertFalse(hasRefreshed);
|
||||||
|
assertTrue(shard.isSearchIdle());
|
||||||
|
} else if (randomTimeValue == null){
|
||||||
|
// with null we are guaranteed to see the doc since do execute the refresh.
|
||||||
|
// we can't assert on hasRefreshed since it might have been refreshed in the background on the shard concurrently
|
||||||
|
assertFalse(shard.isSearchIdle());
|
||||||
|
}
|
||||||
|
assertHitCount(client().prepareSearch().get(), 1);
|
||||||
|
for (int i = 1; i < numDocs; i++) {
|
||||||
|
client().prepareIndex("test", "test", "" + i).setSource("{\"foo\" : \"bar\"}", XContentType.JSON)
|
||||||
|
.execute(new ActionListener<IndexResponse>() {
|
||||||
|
@Override
|
||||||
|
public void onResponse(IndexResponse indexResponse) {
|
||||||
|
indexingDone.countDown();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void onFailure(Exception e) {
|
||||||
|
indexingDone.countDown();
|
||||||
|
throw new AssertionError(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
indexingDone.await();
|
||||||
|
t.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testPendingRefreshWithIntervalChange() throws InterruptedException {
|
||||||
|
Settings.Builder builder = Settings.builder();
|
||||||
|
builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO);
|
||||||
|
IndexService indexService = createIndex("test", builder.build());
|
||||||
|
assertFalse(indexService.getIndexSettings().isExplicitRefresh());
|
||||||
|
ensureGreen();
|
||||||
|
assertNoSearchHits(client().prepareSearch().get());
|
||||||
|
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
|
||||||
|
IndexShard shard = indexService.getShard(0);
|
||||||
|
assertFalse(shard.scheduledRefresh());
|
||||||
|
assertTrue(shard.isSearchIdle());
|
||||||
|
CountDownLatch refreshLatch = new CountDownLatch(1);
|
||||||
|
client().admin().indices().prepareRefresh()
|
||||||
|
.execute(ActionListener.wrap(refreshLatch::countDown));// async on purpose to make sure it happens concurrently
|
||||||
|
assertHitCount(client().prepareSearch().get(), 1);
|
||||||
|
client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
|
||||||
|
assertFalse(shard.scheduledRefresh());
|
||||||
|
|
||||||
|
// now disable background refresh and make sure the refresh happens
|
||||||
|
CountDownLatch updateSettingsLatch = new CountDownLatch(1);
|
||||||
|
client().admin().indices()
|
||||||
|
.prepareUpdateSettings("test")
|
||||||
|
.setSettings(Settings.builder().put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1).build())
|
||||||
|
.execute(ActionListener.wrap(updateSettingsLatch::countDown));
|
||||||
|
assertHitCount(client().prepareSearch().get(), 2);
|
||||||
|
// wait for both to ensure we don't have in-flight operations
|
||||||
|
updateSettingsLatch.await();
|
||||||
|
refreshLatch.await();
|
||||||
|
|
||||||
|
client().prepareIndex("test", "test", "2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get();
|
||||||
|
assertTrue(shard.scheduledRefresh());
|
||||||
|
assertTrue(shard.isSearchIdle());
|
||||||
|
assertHitCount(client().prepareSearch().get(), 3);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,9 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.common.lease.Releasables;
|
import org.elasticsearch.common.lease.Releasables;
|
||||||
|
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
@ -70,6 +72,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.env.NodeEnvironment;
|
import org.elasticsearch.env.NodeEnvironment;
|
||||||
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.VersionType;
|
import org.elasticsearch.index.VersionType;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
import org.elasticsearch.index.engine.Engine;
|
||||||
import org.elasticsearch.index.engine.EngineException;
|
import org.elasticsearch.index.engine.EngineException;
|
||||||
|
@ -2269,11 +2272,17 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
final String id = Integer.toString(i);
|
final String id = Integer.toString(i);
|
||||||
indexDoc(indexShard, "test", id);
|
indexDoc(indexShard, "test", id);
|
||||||
}
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
indexShard.refresh("test");
|
indexShard.refresh("test");
|
||||||
|
} else {
|
||||||
|
indexShard.flush(new FlushRequest());
|
||||||
|
}
|
||||||
{
|
{
|
||||||
final DocsStats docsStats = indexShard.docStats();
|
final DocsStats docsStats = indexShard.docStats();
|
||||||
assertThat(docsStats.getCount(), equalTo(numDocs));
|
assertThat(docsStats.getCount(), equalTo(numDocs));
|
||||||
|
try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
|
||||||
|
assertTrue(searcher.reader().numDocs() <= docsStats.getCount());
|
||||||
|
}
|
||||||
assertThat(docsStats.getDeleted(), equalTo(0L));
|
assertThat(docsStats.getDeleted(), equalTo(0L));
|
||||||
assertThat(docsStats.getAverageSizeInBytes(), greaterThan(0L));
|
assertThat(docsStats.getAverageSizeInBytes(), greaterThan(0L));
|
||||||
}
|
}
|
||||||
|
@ -2293,9 +2302,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
flushRequest.waitIfOngoing(false);
|
flushRequest.waitIfOngoing(false);
|
||||||
indexShard.flush(flushRequest);
|
indexShard.flush(flushRequest);
|
||||||
|
|
||||||
indexShard.refresh("test");
|
if (randomBoolean()) {
|
||||||
|
indexShard.refresh("test");
|
||||||
|
}
|
||||||
{
|
{
|
||||||
final DocsStats docStats = indexShard.docStats();
|
final DocsStats docStats = indexShard.docStats();
|
||||||
|
try (Engine.Searcher searcher = indexShard.acquireSearcher("test")) {
|
||||||
|
assertTrue(searcher.reader().numDocs() <= docStats.getCount());
|
||||||
|
}
|
||||||
assertThat(docStats.getCount(), equalTo(numDocs));
|
assertThat(docStats.getCount(), equalTo(numDocs));
|
||||||
// Lucene will delete a segment if all docs are deleted from it; this means that we lose the deletes when deleting all docs
|
// Lucene will delete a segment if all docs are deleted from it; this means that we lose the deletes when deleting all docs
|
||||||
assertThat(docStats.getDeleted(), equalTo(numDocsToDelete == numDocs ? 0 : numDocsToDelete));
|
assertThat(docStats.getDeleted(), equalTo(numDocsToDelete == numDocs ? 0 : numDocsToDelete));
|
||||||
|
@ -2307,7 +2321,11 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
forceMergeRequest.maxNumSegments(1);
|
forceMergeRequest.maxNumSegments(1);
|
||||||
indexShard.forceMerge(forceMergeRequest);
|
indexShard.forceMerge(forceMergeRequest);
|
||||||
|
|
||||||
indexShard.refresh("test");
|
if (randomBoolean()) {
|
||||||
|
indexShard.refresh("test");
|
||||||
|
} else {
|
||||||
|
indexShard.flush(new FlushRequest());
|
||||||
|
}
|
||||||
{
|
{
|
||||||
final DocsStats docStats = indexShard.docStats();
|
final DocsStats docStats = indexShard.docStats();
|
||||||
assertThat(docStats.getCount(), equalTo(numDocs));
|
assertThat(docStats.getCount(), equalTo(numDocs));
|
||||||
|
@ -2338,8 +2356,11 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertThat("Without flushing, segment sizes should be zero",
|
assertThat("Without flushing, segment sizes should be zero",
|
||||||
indexShard.docStats().getTotalSizeInBytes(), equalTo(0L));
|
indexShard.docStats().getTotalSizeInBytes(), equalTo(0L));
|
||||||
|
|
||||||
indexShard.flush(new FlushRequest());
|
if (randomBoolean()) {
|
||||||
indexShard.refresh("test");
|
indexShard.flush(new FlushRequest());
|
||||||
|
} else {
|
||||||
|
indexShard.refresh("test");
|
||||||
|
}
|
||||||
{
|
{
|
||||||
final DocsStats docsStats = indexShard.docStats();
|
final DocsStats docsStats = indexShard.docStats();
|
||||||
final StoreStats storeStats = indexShard.storeStats();
|
final StoreStats storeStats = indexShard.storeStats();
|
||||||
|
@ -2359,9 +2380,11 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
indexDoc(indexShard, "doc", Integer.toString(i), "{\"foo\": \"bar\"}");
|
indexDoc(indexShard, "doc", Integer.toString(i), "{\"foo\": \"bar\"}");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (randomBoolean()) {
|
||||||
indexShard.flush(new FlushRequest());
|
indexShard.flush(new FlushRequest());
|
||||||
indexShard.refresh("test");
|
} else {
|
||||||
|
indexShard.refresh("test");
|
||||||
|
}
|
||||||
{
|
{
|
||||||
final DocsStats docsStats = indexShard.docStats();
|
final DocsStats docsStats = indexShard.docStats();
|
||||||
final StoreStats storeStats = indexShard.storeStats();
|
final StoreStats storeStats = indexShard.storeStats();
|
||||||
|
@ -2565,4 +2588,138 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
public void verify(String verificationToken, DiscoveryNode localNode) {
|
public void verify(String verificationToken, DiscoveryNode localNode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testIsSearchIdle() throws Exception {
|
||||||
|
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
|
.build();
|
||||||
|
IndexMetaData metaData = IndexMetaData.builder("test")
|
||||||
|
.putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||||
|
.settings(settings)
|
||||||
|
.primaryTerm(0, 1).build();
|
||||||
|
IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||||
|
recoverShardFromStore(primary);
|
||||||
|
indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
assertTrue(primary.scheduledRefresh());
|
||||||
|
assertFalse(primary.isSearchIdle());
|
||||||
|
|
||||||
|
IndexScopedSettings scopedSettings = primary.indexSettings().getScopedSettings();
|
||||||
|
settings = Settings.builder().put(settings).put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO).build();
|
||||||
|
scopedSettings.applySettings(settings);
|
||||||
|
assertTrue(primary.isSearchIdle());
|
||||||
|
|
||||||
|
settings = Settings.builder().put(settings).put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.timeValueMinutes(1))
|
||||||
|
.build();
|
||||||
|
scopedSettings.applySettings(settings);
|
||||||
|
assertFalse(primary.isSearchIdle());
|
||||||
|
|
||||||
|
settings = Settings.builder().put(settings).put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.timeValueMillis(10))
|
||||||
|
.build();
|
||||||
|
scopedSettings.applySettings(settings);
|
||||||
|
|
||||||
|
assertBusy(() -> assertTrue(primary.isSearchIdle()));
|
||||||
|
do {
|
||||||
|
// now loop until we are fast enough... shouldn't take long
|
||||||
|
primary.awaitShardSearchActive(aBoolean -> {});
|
||||||
|
} while (primary.isSearchIdle());
|
||||||
|
closeShards(primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testScheduledRefresh() throws IOException, InterruptedException {
|
||||||
|
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
|
.build();
|
||||||
|
IndexMetaData metaData = IndexMetaData.builder("test")
|
||||||
|
.putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||||
|
.settings(settings)
|
||||||
|
.primaryTerm(0, 1).build();
|
||||||
|
IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||||
|
recoverShardFromStore(primary);
|
||||||
|
indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
assertTrue(primary.scheduledRefresh());
|
||||||
|
IndexScopedSettings scopedSettings = primary.indexSettings().getScopedSettings();
|
||||||
|
settings = Settings.builder().put(settings).put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO).build();
|
||||||
|
scopedSettings.applySettings(settings);
|
||||||
|
|
||||||
|
assertFalse(primary.getEngine().refreshNeeded());
|
||||||
|
indexDoc(primary, "test", "1", "{\"foo\" : \"bar\"}");
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
long lastSearchAccess = primary.getLastSearcherAccess();
|
||||||
|
assertFalse(primary.scheduledRefresh());
|
||||||
|
assertEquals(lastSearchAccess, primary.getLastSearcherAccess());
|
||||||
|
// wait until the thread-pool has moved the timestamp otherwise we can't assert on this below
|
||||||
|
awaitBusy(() -> primary.getThreadPool().relativeTimeInMillis() > lastSearchAccess);
|
||||||
|
CountDownLatch latch = new CountDownLatch(10);
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
primary.awaitShardSearchActive(refreshed -> {
|
||||||
|
assertTrue(refreshed);
|
||||||
|
try (Engine.Searcher searcher = primary.acquireSearcher("test")) {
|
||||||
|
assertEquals(2, searcher.reader().numDocs());
|
||||||
|
} finally {
|
||||||
|
latch.countDown();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
assertNotEquals("awaitShardSearchActive must access a searcher to remove search idle state", lastSearchAccess,
|
||||||
|
primary.getLastSearcherAccess());
|
||||||
|
assertTrue(lastSearchAccess < primary.getLastSearcherAccess());
|
||||||
|
try (Engine.Searcher searcher = primary.acquireSearcher("test")) {
|
||||||
|
assertEquals(1, searcher.reader().numDocs());
|
||||||
|
}
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
assertTrue(primary.scheduledRefresh());
|
||||||
|
latch.await();
|
||||||
|
CountDownLatch latch1 = new CountDownLatch(1);
|
||||||
|
primary.awaitShardSearchActive(refreshed -> {
|
||||||
|
assertFalse(refreshed);
|
||||||
|
try (Engine.Searcher searcher = primary.acquireSearcher("test")) {
|
||||||
|
assertEquals(2, searcher.reader().numDocs());
|
||||||
|
} finally {
|
||||||
|
latch1.countDown();
|
||||||
|
}
|
||||||
|
|
||||||
|
});
|
||||||
|
latch1.await();
|
||||||
|
closeShards(primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testRefreshIsNeededWithRefreshListeners() throws IOException, InterruptedException {
|
||||||
|
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||||
|
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||||
|
.build();
|
||||||
|
IndexMetaData metaData = IndexMetaData.builder("test")
|
||||||
|
.putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
|
||||||
|
.settings(settings)
|
||||||
|
.primaryTerm(0, 1).build();
|
||||||
|
IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
|
||||||
|
recoverShardFromStore(primary);
|
||||||
|
indexDoc(primary, "test", "0", "{\"foo\" : \"bar\"}");
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
assertTrue(primary.scheduledRefresh());
|
||||||
|
Engine.IndexResult doc = indexDoc(primary, "test", "1", "{\"foo\" : \"bar\"}");
|
||||||
|
CountDownLatch latch = new CountDownLatch(1);
|
||||||
|
primary.addRefreshListener(doc.getTranslogLocation(), r -> latch.countDown());
|
||||||
|
assertEquals(1, latch.getCount());
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
assertTrue(primary.scheduledRefresh());
|
||||||
|
latch.await();
|
||||||
|
|
||||||
|
IndexScopedSettings scopedSettings = primary.indexSettings().getScopedSettings();
|
||||||
|
settings = Settings.builder().put(settings).put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO).build();
|
||||||
|
scopedSettings.applySettings(settings);
|
||||||
|
|
||||||
|
doc = indexDoc(primary, "test", "2", "{\"foo\" : \"bar\"}");
|
||||||
|
CountDownLatch latch1 = new CountDownLatch(1);
|
||||||
|
primary.addRefreshListener(doc.getTranslogLocation(), r -> latch1.countDown());
|
||||||
|
assertEquals(1, latch1.getCount());
|
||||||
|
assertTrue(primary.getEngine().refreshNeeded());
|
||||||
|
assertTrue(primary.scheduledRefresh());
|
||||||
|
latch1.await();
|
||||||
|
closeShards(primary);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.lucene.uid.Versions;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.BigArrays;
|
import org.elasticsearch.common.util.BigArrays;
|
||||||
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
@ -67,6 +68,7 @@ import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
|
@ -87,16 +89,16 @@ public class RefreshListenersTests extends ESTestCase {
|
||||||
public void setupListeners() throws Exception {
|
public void setupListeners() throws Exception {
|
||||||
// Setup dependencies of the listeners
|
// Setup dependencies of the listeners
|
||||||
maxListeners = randomIntBetween(1, 1000);
|
maxListeners = randomIntBetween(1, 1000);
|
||||||
|
// Now setup the InternalEngine which is much more complicated because we aren't mocking anything
|
||||||
|
threadPool = new TestThreadPool(getTestName());
|
||||||
listeners = new RefreshListeners(
|
listeners = new RefreshListeners(
|
||||||
() -> maxListeners,
|
() -> maxListeners,
|
||||||
() -> engine.refresh("too-many-listeners"),
|
() -> engine.refresh("too-many-listeners"),
|
||||||
// Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test.
|
// Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test.
|
||||||
Runnable::run,
|
Runnable::run,
|
||||||
logger
|
logger,
|
||||||
);
|
threadPool.getThreadContext());
|
||||||
|
|
||||||
// Now setup the InternalEngine which is much more complicated because we aren't mocking anything
|
|
||||||
threadPool = new TestThreadPool(getTestName());
|
|
||||||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
|
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
|
||||||
ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
|
ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
|
||||||
String allocationId = UUIDs.randomBase64UUID(random());
|
String allocationId = UUIDs.randomBase64UUID(random());
|
||||||
|
@ -161,6 +163,23 @@ public class RefreshListenersTests extends ESTestCase {
|
||||||
assertEquals(0, listeners.pendingCount());
|
assertEquals(0, listeners.pendingCount());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testContextIsPreserved() throws IOException, InterruptedException {
|
||||||
|
assertEquals(0, listeners.pendingCount());
|
||||||
|
Engine.IndexResult index = index("1");
|
||||||
|
CountDownLatch latch = new CountDownLatch(1);
|
||||||
|
try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) {
|
||||||
|
threadPool.getThreadContext().putHeader("test", "foobar");
|
||||||
|
assertFalse(listeners.addOrNotify(index.getTranslogLocation(), forced -> {
|
||||||
|
assertEquals("foobar", threadPool.getThreadContext().getHeader("test"));
|
||||||
|
latch.countDown();
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
assertNull(threadPool.getThreadContext().getHeader("test"));
|
||||||
|
assertEquals(1, latch.getCount());
|
||||||
|
engine.refresh("I said so");
|
||||||
|
latch.await();
|
||||||
|
}
|
||||||
|
|
||||||
public void testTooMany() throws Exception {
|
public void testTooMany() throws Exception {
|
||||||
assertEquals(0, listeners.pendingCount());
|
assertEquals(0, listeners.pendingCount());
|
||||||
assertFalse(listeners.refreshNeeded());
|
assertFalse(listeners.refreshNeeded());
|
||||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||||
|
import org.elasticsearch.index.translog.TestTranslog;
|
||||||
import org.elasticsearch.monitor.fs.FsInfo;
|
import org.elasticsearch.monitor.fs.FsInfo;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
@ -43,23 +44,17 @@ import org.elasticsearch.test.engine.MockEngineSupport;
|
||||||
import org.elasticsearch.test.transport.MockTransportService;
|
import org.elasticsearch.test.transport.MockTransportService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.nio.channels.FileChannel;
|
|
||||||
import java.nio.file.DirectoryStream;
|
|
||||||
import java.nio.file.Files;
|
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
import java.nio.file.StandardOpenOption;
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeSet;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList;
|
import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList;
|
||||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.hamcrest.Matchers.notNullValue;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Integration test for corrupted translog files
|
* Integration test for corrupted translog files
|
||||||
|
@ -119,51 +114,14 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
||||||
assertTrue(shardRouting.assignedToNode());
|
assertTrue(shardRouting.assignedToNode());
|
||||||
String nodeId = shardRouting.currentNodeId();
|
String nodeId = shardRouting.currentNodeId();
|
||||||
NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get();
|
NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).setFs(true).get();
|
||||||
Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
|
Set<Path> translogDirs = new HashSet<>();
|
||||||
for (FsInfo.Path fsPath : nodeStatses.getNodes().get(0).getFs()) {
|
for (FsInfo.Path fsPath : nodeStatses.getNodes().get(0).getFs()) {
|
||||||
String path = fsPath.getPath();
|
String path = fsPath.getPath();
|
||||||
final String relativeDataLocationPath = "indices/"+ test.getUUID() +"/" + Integer.toString(shardRouting.getId()) + "/translog";
|
String relativeDataLocationPath = "indices/" + test.getUUID() + "/" + Integer.toString(shardRouting.getId()) + "/translog";
|
||||||
Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
|
Path translogDir = PathUtils.get(path).resolve(relativeDataLocationPath);
|
||||||
if (Files.exists(file)) {
|
translogDirs.add(translogDir);
|
||||||
logger.info("--> path: {}", file);
|
|
||||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
|
|
||||||
for (Path item : stream) {
|
|
||||||
logger.info("--> File: {}", item);
|
|
||||||
if (Files.isRegularFile(item) && item.getFileName().toString().startsWith("translog-")) {
|
|
||||||
files.add(item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
Path fileToCorrupt = null;
|
TestTranslog.corruptTranslogFiles(logger, random(), translogDirs);
|
||||||
if (!files.isEmpty()) {
|
|
||||||
int corruptions = randomIntBetween(5, 20);
|
|
||||||
for (int i = 0; i < corruptions; i++) {
|
|
||||||
fileToCorrupt = RandomPicks.randomFrom(random(), files);
|
|
||||||
try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
|
|
||||||
// read
|
|
||||||
raf.position(randomIntBetween(0, (int) Math.min(Integer.MAX_VALUE, raf.size() - 1)));
|
|
||||||
long filePointer = raf.position();
|
|
||||||
ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
|
|
||||||
raf.read(bb);
|
|
||||||
bb.flip();
|
|
||||||
|
|
||||||
// corrupt
|
|
||||||
byte oldValue = bb.get(0);
|
|
||||||
byte newValue = (byte) (oldValue + 1);
|
|
||||||
bb.put(0, newValue);
|
|
||||||
|
|
||||||
// rewrite
|
|
||||||
raf.position(filePointer);
|
|
||||||
raf.write(bb);
|
|
||||||
logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}",
|
|
||||||
fileToCorrupt, filePointer, Integer.toHexString(oldValue),
|
|
||||||
Integer.toHexString(newValue), fileToCorrupt);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertThat("no file corrupted", fileToCorrupt, notNullValue());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Disables translog flushing for the specified index */
|
/** Disables translog flushing for the specified index */
|
||||||
|
|
|
@ -0,0 +1,102 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.index.translog;
|
||||||
|
|
||||||
|
import com.carrotsearch.hppc.LongHashSet;
|
||||||
|
import com.carrotsearch.hppc.LongSet;
|
||||||
|
import org.elasticsearch.common.Randomness;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.IntStream;
|
||||||
|
import java.util.stream.LongStream;
|
||||||
|
|
||||||
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
|
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||||
|
|
||||||
|
public class MultiSnapshotTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testTrackSeqNoSimpleRange() throws Exception {
|
||||||
|
final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet();
|
||||||
|
final List<Long> values = LongStream.range(0, 1024).boxed().collect(Collectors.toList());
|
||||||
|
Randomness.shuffle(values);
|
||||||
|
for (int i = 0; i < 1023; i++) {
|
||||||
|
assertThat(bitSet.getAndSet(values.get(i)), equalTo(false));
|
||||||
|
assertThat(bitSet.ongoingSetsSize(), equalTo(1L));
|
||||||
|
assertThat(bitSet.completeSetsSize(), equalTo(0L));
|
||||||
|
}
|
||||||
|
|
||||||
|
assertThat(bitSet.getAndSet(values.get(1023)), equalTo(false));
|
||||||
|
assertThat(bitSet.ongoingSetsSize(), equalTo(0L));
|
||||||
|
assertThat(bitSet.completeSetsSize(), equalTo(1L));
|
||||||
|
|
||||||
|
assertThat(bitSet.getAndSet(between(0, 1023)), equalTo(true));
|
||||||
|
assertThat(bitSet.getAndSet(between(1024, Integer.MAX_VALUE)), equalTo(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testTrackSeqNoDenseRanges() throws Exception {
|
||||||
|
final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet();
|
||||||
|
final LongSet normalSet = new LongHashSet();
|
||||||
|
IntStream.range(0, scaledRandomIntBetween(5_000, 10_000)).forEach(i -> {
|
||||||
|
long seq = between(0, 5000);
|
||||||
|
boolean existed = normalSet.add(seq) == false;
|
||||||
|
assertThat("SeqNoSet != Set" + seq, bitSet.getAndSet(seq), equalTo(existed));
|
||||||
|
assertThat(bitSet.ongoingSetsSize() + bitSet.completeSetsSize(), lessThanOrEqualTo(5L));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testTrackSeqNoSparseRanges() throws Exception {
|
||||||
|
final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet();
|
||||||
|
final LongSet normalSet = new LongHashSet();
|
||||||
|
IntStream.range(0, scaledRandomIntBetween(5_000, 10_000)).forEach(i -> {
|
||||||
|
long seq = between(i * 10_000, i * 30_000);
|
||||||
|
boolean existed = normalSet.add(seq) == false;
|
||||||
|
assertThat("SeqNoSet != Set", bitSet.getAndSet(seq), equalTo(existed));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testTrackSeqNoMimicTranslogRanges() throws Exception {
|
||||||
|
final MultiSnapshot.SeqNoSet bitSet = new MultiSnapshot.SeqNoSet();
|
||||||
|
final LongSet normalSet = new LongHashSet();
|
||||||
|
long currentSeq = between(10_000_000, 1_000_000_000);
|
||||||
|
final int iterations = scaledRandomIntBetween(100, 2000);
|
||||||
|
assertThat(bitSet.completeSetsSize(), equalTo(0L));
|
||||||
|
assertThat(bitSet.ongoingSetsSize(), equalTo(0L));
|
||||||
|
long totalDocs = 0;
|
||||||
|
for (long i = 0; i < iterations; i++) {
|
||||||
|
int batchSize = between(1, 1500);
|
||||||
|
totalDocs += batchSize;
|
||||||
|
currentSeq -= batchSize;
|
||||||
|
List<Long> batch = LongStream.range(currentSeq, currentSeq + batchSize)
|
||||||
|
.boxed()
|
||||||
|
.collect(Collectors.toList());
|
||||||
|
Randomness.shuffle(batch);
|
||||||
|
batch.forEach(seq -> {
|
||||||
|
boolean existed = normalSet.add(seq) == false;
|
||||||
|
assertThat("SeqNoSet != Set", bitSet.getAndSet(seq), equalTo(existed));
|
||||||
|
assertThat(bitSet.ongoingSetsSize(), lessThanOrEqualTo(4L));
|
||||||
|
});
|
||||||
|
assertThat(bitSet.ongoingSetsSize(), lessThanOrEqualTo(2L));
|
||||||
|
}
|
||||||
|
assertThat(bitSet.completeSetsSize(), lessThanOrEqualTo(totalDocs / 1024));
|
||||||
|
assertThat(bitSet.ongoingSetsSize(), lessThanOrEqualTo(2L));
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue