Merge branch 'master' into feature-suggest-refactoring
This commit is contained in:
commit
7ec5075a87
|
@ -60,8 +60,11 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -73,27 +76,41 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
private final ClusterService clusterService;
|
||||
private final TransportShardBulkAction shardBulkAction;
|
||||
private final TransportCreateIndexAction createIndexAction;
|
||||
private final LongSupplier relativeTimeProvider;
|
||||
|
||||
@Inject
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
this(settings, threadPool, transportService, clusterService,
|
||||
shardBulkAction, createIndexAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
System::nanoTime);
|
||||
}
|
||||
|
||||
public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex, LongSupplier relativeTimeProvider) {
|
||||
super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, BulkRequest::new);
|
||||
Objects.requireNonNull(relativeTimeProvider);
|
||||
this.clusterService = clusterService;
|
||||
this.shardBulkAction = shardBulkAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = this.settings.getAsBoolean("action.bulk.action.allow_id_generation", true);
|
||||
this.relativeTimeProvider = relativeTimeProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
final long startTime = relativeTime();
|
||||
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
|
||||
|
||||
if (autoCreateIndex.needToCheck()) {
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||
for (ActionRequest request : bulkRequest.requests) {
|
||||
|
@ -112,7 +129,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
ClusterState state = clusterService.state();
|
||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
if (autoCreateIndex.shouldAutoCreate(index, state)) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
|
@ -163,6 +180,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
}
|
||||
|
||||
boolean needToCheck() {
|
||||
return autoCreateIndex.needToCheck();
|
||||
}
|
||||
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return autoCreateIndex.shouldAutoCreate(index, state);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, ActionRequest request, String index, Throwable e) {
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
|
@ -195,16 +220,15 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
* @see #doExecute(BulkRequest, org.elasticsearch.action.ActionListener)
|
||||
*/
|
||||
public void executeBulk(final BulkRequest bulkRequest, final ActionListener<BulkResponse> listener) {
|
||||
final long startTime = System.currentTimeMillis();
|
||||
executeBulk(bulkRequest, startTime, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
final long startTimeNanos = relativeTime();
|
||||
executeBulk(bulkRequest, startTimeNanos, listener, new AtomicArray<>(bulkRequest.requests.size()));
|
||||
}
|
||||
|
||||
private long buildTookInMillis(long startTime) {
|
||||
// protect ourselves against time going backwards
|
||||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
private long buildTookInMillis(long startTimeNanos) {
|
||||
return TimeUnit.NANOSECONDS.toMillis(relativeTime() - startTimeNanos);
|
||||
}
|
||||
|
||||
private void executeBulk(final BulkRequest bulkRequest, final long startTime, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
void executeBulk(final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
// TODO use timeout to wait here if its blocked...
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.WRITE);
|
||||
|
@ -302,7 +326,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
|
||||
if (requestsByShard.isEmpty()) {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -352,7 +376,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
|
||||
private void finishHim() {
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTime)));
|
||||
listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos)));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -398,7 +422,6 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
private static class ConcreteIndices {
|
||||
private final ClusterState state;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
|
@ -422,4 +445,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return concreteIndex;
|
||||
}
|
||||
}
|
||||
|
||||
private long relativeTime() {
|
||||
return relativeTimeProvider.getAsLong();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -134,6 +134,8 @@ final class Bootstrap {
|
|||
// we've already logged this.
|
||||
}
|
||||
|
||||
JNANatives.trySetMaxNumberOfThreads();
|
||||
|
||||
// init lucene random seed. it will use /dev/urandom where available:
|
||||
StringHelper.randomId();
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.cli.CliTool;
|
||||
import org.elasticsearch.common.cli.CliToolConfig;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.cli.UserError;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
@ -37,7 +37,6 @@ import java.util.Iterator;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Properties;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
|
||||
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
|
||||
|
|
|
@ -120,6 +120,9 @@ final class BootstrapCheck {
|
|||
= Constants.MAC_OS_X ? new OsXFileDescriptorCheck() : new FileDescriptorCheck();
|
||||
checks.add(fileDescriptorCheck);
|
||||
checks.add(new MlockallCheck(BootstrapSettings.MLOCKALL_SETTING.get(settings)));
|
||||
if (Constants.LINUX) {
|
||||
checks.add(new MaxNumberOfThreadsCheck());
|
||||
}
|
||||
return Collections.unmodifiableList(checks);
|
||||
}
|
||||
|
||||
|
@ -220,4 +223,30 @@ final class BootstrapCheck {
|
|||
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
||||
private final long maxNumberOfThreadsThreshold = 1 << 15;
|
||||
|
||||
@Override
|
||||
public boolean check() {
|
||||
return getMaxNumberOfThreads() != -1 && getMaxNumberOfThreads() < maxNumberOfThreadsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return String.format(
|
||||
Locale.ROOT,
|
||||
"max number of threads [%d] for user [%s] likely too low, increase to at least [%d]",
|
||||
getMaxNumberOfThreads(),
|
||||
BootstrapInfo.getSystemProperties().get("user.name"),
|
||||
maxNumberOfThreadsThreshold);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
long getMaxNumberOfThreads() {
|
||||
return JNANatives.MAX_NUMBER_OF_THREADS;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -48,6 +48,9 @@ class JNANatives {
|
|||
// Set to true, in case policy can be applied to all threads of the process (even existing ones)
|
||||
// otherwise they are only inherited for new threads (ES app threads)
|
||||
static boolean LOCAL_SECCOMP_ALL = false;
|
||||
// set to the maximum number of threads that can be created for
|
||||
// the user ID that owns the running Elasticsearch process
|
||||
static long MAX_NUMBER_OF_THREADS = -1;
|
||||
|
||||
static void tryMlockall() {
|
||||
int errno = Integer.MIN_VALUE;
|
||||
|
@ -103,13 +106,29 @@ class JNANatives {
|
|||
}
|
||||
}
|
||||
|
||||
static void trySetMaxNumberOfThreads() {
|
||||
if (Constants.LINUX) {
|
||||
// this is only valid on Linux and the value *is* different on OS X
|
||||
// see /usr/include/sys/resource.h on OS X
|
||||
// on Linux the resource RLIMIT_NPROC means *the number of threads*
|
||||
// this is in opposition to BSD-derived OSes
|
||||
final int rlimit_nproc = 6;
|
||||
|
||||
final JNACLibrary.Rlimit rlimit = new JNACLibrary.Rlimit();
|
||||
if (JNACLibrary.getrlimit(rlimit_nproc, rlimit) == 0) {
|
||||
MAX_NUMBER_OF_THREADS = rlimit.rlim_cur.longValue();
|
||||
} else {
|
||||
logger.warn("unable to retrieve max number of threads [" + JNACLibrary.strerror(Native.getLastError()) + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static String rlimitToString(long value) {
|
||||
assert Constants.LINUX || Constants.MAC_OS_X;
|
||||
if (value == JNACLibrary.RLIM_INFINITY) {
|
||||
return "unlimited";
|
||||
} else {
|
||||
// TODO, on java 8 use Long.toUnsignedString, since that's what it is.
|
||||
return Long.toString(value);
|
||||
return Long.toUnsignedString(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -310,7 +310,7 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
|
||||
// move shards that no longer can be allocated
|
||||
changed |= moveShards(allocation);
|
||||
changed |= shardsAllocators.moveShards(allocation);
|
||||
|
||||
// rebalance
|
||||
changed |= shardsAllocators.rebalance(allocation);
|
||||
|
@ -327,46 +327,6 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private boolean moveShards(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
|
||||
// create a copy of the shards interleaving between nodes, and check if they can remain
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
found = true;
|
||||
shards.add(routingNode.get(index));
|
||||
}
|
||||
index++;
|
||||
}
|
||||
for (int i = 0; i < shards.size(); i++) {
|
||||
ShardRouting shardRouting = shards.get(i);
|
||||
// we can only move started shards...
|
||||
if (!shardRouting.started()) {
|
||||
continue;
|
||||
}
|
||||
final RoutingNode routingNode = routingNodes.node(shardRouting.currentNodeId());
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
boolean moved = shardsAllocators.move(shardRouting, routingNode, allocation);
|
||||
if (!moved) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
} else {
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
||||
private boolean electPrimariesAndUnassignedDanglingReplicas(RoutingAllocation allocation) {
|
||||
boolean changed = false;
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -49,6 +50,7 @@ import java.util.HashMap;
|
|||
import java.util.HashSet;
|
||||
import java.util.IdentityHashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
@ -119,9 +121,9 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
final Balancer balancer = new Balancer(logger, allocation, weightFunction, threshold);
|
||||
return balancer.move(shardRouting, node);
|
||||
return balancer.moveShards();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -489,56 +491,93 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
|
||||
/**
|
||||
* This function executes a move operation moving the given shard from
|
||||
* the given node to the minimal eligible node with respect to the
|
||||
* weight function. Iff the shard is moved the shard will be set to
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* For each shard to be moved this function executes a move operation
|
||||
* to the minimal eligible node with respect to the
|
||||
* weight function. If a shard is moved the shard will be set to
|
||||
* {@link ShardRoutingState#RELOCATING} and a shadow instance of this
|
||||
* shard is created with an incremented version in the state
|
||||
* {@link ShardRoutingState#INITIALIZING}.
|
||||
*
|
||||
* @return <code>true</code> iff the shard has successfully been moved.
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
public boolean move(ShardRouting shard, RoutingNode node ) {
|
||||
if (nodes.isEmpty() || !shard.started()) {
|
||||
/* with no nodes or a not started shard this is pointless */
|
||||
public boolean moveShards() {
|
||||
if (nodes.isEmpty()) {
|
||||
/* with no nodes this is pointless */
|
||||
return false;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Try moving shard [{}] from [{}]", shard, node);
|
||||
}
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (!changed) {
|
||||
final ModelNode sourceNode = nodes.get(node.nodeId());
|
||||
assert sourceNode != null;
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
sorter.reset(shard.getIndexName());
|
||||
final ModelNode[] nodes = sorter.modelNodes;
|
||||
assert sourceNode.containsShard(shard);
|
||||
/*
|
||||
* the sorter holds the minimum weight node first for the shards index.
|
||||
* We now walk through the nodes until we find a node to allocate the shard.
|
||||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
|
||||
for (ModelNode currentNode : nodes) {
|
||||
if (currentNode.getNodeId().equals(node.nodeId())) {
|
||||
// Create a copy of the started shards interleaving between nodes, and check if they can remain. In the presence of throttling
|
||||
// shard movements, the goal of this iteration order is to achieve a fairer movement of shards from the nodes that are
|
||||
// offloading the shards.
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
int index = 0;
|
||||
boolean found = true;
|
||||
while (found) {
|
||||
found = false;
|
||||
for (RoutingNode routingNode : routingNodes) {
|
||||
if (index >= routingNode.size()) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shard, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shard, allocation);
|
||||
Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
sourceNode.removeShard(shard);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
currentNode.addShard(targetRelocatingShard, decision);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
|
||||
found = true;
|
||||
ShardRouting shardRouting = routingNode.get(index);
|
||||
// we can only move started shards...
|
||||
if (shardRouting.started()) {
|
||||
shards.add(shardRouting);
|
||||
}
|
||||
}
|
||||
index++;
|
||||
}
|
||||
if (shards.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards unassigned = routingNodes.unassigned();
|
||||
boolean changed = initialize(routingNodes, unassigned);
|
||||
if (changed == false) {
|
||||
final NodeSorter sorter = newNodeSorter();
|
||||
final ModelNode[] modelNodes = sorter.modelNodes;
|
||||
for (ShardRouting shardRouting : shards) {
|
||||
final ModelNode sourceNode = nodes.get(shardRouting.currentNodeId());
|
||||
assert sourceNode != null && sourceNode.containsShard(shardRouting);
|
||||
final RoutingNode routingNode = sourceNode.getRoutingNode(routingNodes);
|
||||
Decision decision = allocation.deciders().canRemain(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
logger.debug("[{}][{}] allocated on [{}], but can no longer be allocated on it, moving...", shardRouting.index(), shardRouting.id(), routingNode.node());
|
||||
sorter.reset(shardRouting.getIndexName());
|
||||
/*
|
||||
* the sorter holds the minimum weight node first for the shards index.
|
||||
* We now walk through the nodes until we find a node to allocate the shard.
|
||||
* This is not guaranteed to be balanced after this operation we still try best effort to
|
||||
* allocate on the minimal eligible node.
|
||||
*/
|
||||
boolean moved = false;
|
||||
for (ModelNode currentNode : modelNodes) {
|
||||
if (currentNode == sourceNode) {
|
||||
continue;
|
||||
}
|
||||
RoutingNode target = currentNode.getRoutingNode(routingNodes);
|
||||
Decision allocationDecision = allocation.deciders().canAllocate(shardRouting, target, allocation);
|
||||
Decision rebalanceDecision = allocation.deciders().canRebalance(shardRouting, allocation);
|
||||
if (allocationDecision.type() == Type.YES && rebalanceDecision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
|
||||
Decision sourceDecision = sourceNode.removeShard(shardRouting);
|
||||
ShardRouting targetRelocatingShard = routingNodes.relocate(shardRouting, target.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
|
||||
// re-add (now relocating shard) to source node
|
||||
sourceNode.addShard(shardRouting, sourceDecision);
|
||||
Decision targetDecision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision);
|
||||
currentNode.addShard(targetRelocatingShard, targetDecision);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Moved shard [{}] to node [{}]", shardRouting, routingNode.node());
|
||||
}
|
||||
moved = true;
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (moved == false) {
|
||||
logger.debug("[{}][{}] can't move", shardRouting.index(), shardRouting.id());
|
||||
}
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
|
@ -36,22 +35,22 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
|||
public interface ShardsAllocator {
|
||||
|
||||
/**
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* Applies changes on started nodes based on the implemented algorithm. For example if a
|
||||
* shard has changed to {@link ShardRoutingState#STARTED} from {@link ShardRoutingState#RELOCATING}
|
||||
* this allocator might apply some cleanups on the node that used to hold the shard.
|
||||
* @param allocation all started {@link ShardRouting shards}
|
||||
*/
|
||||
void applyStartedShards(StartedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* Applies changes on failed nodes based on the implemented algorithm.
|
||||
* @param allocation all failed {@link ShardRouting shards}
|
||||
*/
|
||||
void applyFailedShards(FailedRerouteAllocation allocation);
|
||||
|
||||
/**
|
||||
* Assign all unassigned shards to nodes
|
||||
*
|
||||
* Assign all unassigned shards to nodes
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
|
@ -59,19 +58,17 @@ public interface ShardsAllocator {
|
|||
|
||||
/**
|
||||
* Rebalancing number of shards on all nodes
|
||||
*
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean rebalance(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Moves a shard from the given node to other node.
|
||||
*
|
||||
* @param shardRouting the shard to move
|
||||
* @param node A node containing the shard
|
||||
* Move started shards that can not be allocated to a node anymore
|
||||
*
|
||||
* @param allocation current node allocation
|
||||
* @return <code>true</code> if the allocation has changed, otherwise <code>false</code>
|
||||
*/
|
||||
boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation);
|
||||
boolean moveShards(RoutingAllocation allocation);
|
||||
}
|
||||
|
|
|
@ -19,8 +19,6 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.allocator;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
|
||||
|
@ -96,7 +94,7 @@ public class ShardsAllocators extends AbstractComponent implements ShardsAllocat
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
return allocator.move(shardRouting, node, allocation);
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
return allocator.moveShards(allocation);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.io.BufferedReader;
|
|||
import java.io.Console;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.PrintWriter;
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
@ -52,6 +53,13 @@ public abstract class Terminal {
|
|||
/** The current verbosity for the terminal, defaulting to {@link Verbosity#NORMAL}. */
|
||||
private Verbosity verbosity = Verbosity.NORMAL;
|
||||
|
||||
/** The newline used when calling println. */
|
||||
private final String lineSeparator;
|
||||
|
||||
protected Terminal(String lineSeparator) {
|
||||
this.lineSeparator = lineSeparator;
|
||||
}
|
||||
|
||||
/** Sets the verbosity of the terminal. */
|
||||
void setVerbosity(Verbosity verbosity) {
|
||||
this.verbosity = verbosity;
|
||||
|
@ -63,8 +71,8 @@ public abstract class Terminal {
|
|||
/** Reads password text from the terminal input. See {@link Console#readPassword()}}. */
|
||||
public abstract char[] readSecret(String prompt);
|
||||
|
||||
/** Print a message directly to the terminal. */
|
||||
protected abstract void doPrint(String msg);
|
||||
/** Returns a Writer which can be used to write to the terminal directly. */
|
||||
public abstract PrintWriter getWriter();
|
||||
|
||||
/** Prints a line to the terminal at {@link Verbosity#NORMAL} verbosity level. */
|
||||
public final void println(String msg) {
|
||||
|
@ -74,7 +82,8 @@ public abstract class Terminal {
|
|||
/** Prints a line to the terminal at {@code verbosity} level. */
|
||||
public final void println(Verbosity verbosity, String msg) {
|
||||
if (this.verbosity.ordinal() >= verbosity.ordinal()) {
|
||||
doPrint(msg + System.lineSeparator());
|
||||
getWriter().print(msg + lineSeparator);
|
||||
getWriter().flush();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -82,14 +91,17 @@ public abstract class Terminal {
|
|||
|
||||
private static final Console console = System.console();
|
||||
|
||||
ConsoleTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
static boolean isSupported() {
|
||||
return console != null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doPrint(String msg) {
|
||||
console.printf("%s", msg);
|
||||
console.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return console.writer();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -105,16 +117,25 @@ public abstract class Terminal {
|
|||
|
||||
private static class SystemTerminal extends Terminal {
|
||||
|
||||
private final PrintWriter writer = newWriter();
|
||||
|
||||
SystemTerminal() {
|
||||
super(System.lineSeparator());
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "Writer for System.out")
|
||||
private static PrintWriter newWriter() {
|
||||
return new PrintWriter(System.out);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressForbidden(reason = "System#out")
|
||||
public void doPrint(String msg) {
|
||||
System.out.print(msg);
|
||||
System.out.flush();
|
||||
public PrintWriter getWriter() {
|
||||
return writer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String text) {
|
||||
doPrint(text);
|
||||
getWriter().print(text);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in, Charset.defaultCharset()));
|
||||
try {
|
||||
return reader.readLine();
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.lucene.search.similarities.Similarity;
|
|||
import org.apache.lucene.search.similarities.Similarity.SimScorer;
|
||||
import org.apache.lucene.search.similarities.Similarity.SimWeight;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.apache.lucene.util.ToStringUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -186,9 +187,13 @@ public final class AllTermQuery extends Query {
|
|||
float boost;
|
||||
if (payload == null) {
|
||||
boost = 1;
|
||||
} else {
|
||||
assert payload.length == 4;
|
||||
} else if (payload.length == 1) {
|
||||
boost = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
|
||||
} else if (payload.length == 4) {
|
||||
// TODO: for bw compat only, remove this in 6.0
|
||||
boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
|
||||
} else {
|
||||
throw new IllegalStateException("Payloads are expected to have a length of 1 or 4 but got: " + payload);
|
||||
}
|
||||
payloadBoost += boost;
|
||||
}
|
||||
|
|
|
@ -25,11 +25,10 @@ import org.apache.lucene.analysis.TokenStream;
|
|||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PayloadAttribute;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.lucene.analysis.payloads.PayloadHelper.encodeFloat;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -39,7 +38,7 @@ public final class AllTokenStream extends TokenFilter {
|
|||
return new AllTokenStream(analyzer.tokenStream(allFieldName, allEntries), allEntries);
|
||||
}
|
||||
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[4]);
|
||||
private final BytesRef payloadSpare = new BytesRef(new byte[1]);
|
||||
|
||||
private final AllEntries allEntries;
|
||||
|
||||
|
@ -64,7 +63,7 @@ public final class AllTokenStream extends TokenFilter {
|
|||
}
|
||||
final float boost = allEntries.boost(offsetAttribute.startOffset());
|
||||
if (boost != 1.0f) {
|
||||
encodeFloat(boost, payloadSpare.bytes, payloadSpare.offset);
|
||||
payloadSpare.bytes[0] = SmallFloat.floatToByte315(boost);
|
||||
payloadAttribute.setPayload(payloadSpare);
|
||||
} else {
|
||||
payloadAttribute.setPayload(null);
|
||||
|
|
|
@ -76,7 +76,7 @@ public class FilterableTermsEnum extends TermsEnum {
|
|||
this.docsEnumFlag = docsEnumFlag;
|
||||
if (filter == null) {
|
||||
// Important - need to use the doc count that includes deleted docs
|
||||
// or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
|
||||
// or we have this issue: https://github.com/elastic/elasticsearch/issues/7951
|
||||
numDocs = reader.maxDoc();
|
||||
}
|
||||
List<LeafReaderContext> leaves = reader.leaves();
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -206,7 +207,20 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
}
|
||||
} else {
|
||||
if (recovered.compareAndSet(false, true)) {
|
||||
threadPool.generic().execute(() -> gateway.performStateRecovery(recoveryListener));
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("Recovery failed", t);
|
||||
// we reset `recovered` in the listener don't reset it here otherwise there might be a race
|
||||
// that resets it to false while a new recover is already running?
|
||||
recoveryListener.onFailure("state recovery failed: " + t.getMessage());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
gateway.performStateRecovery(recoveryListener);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -928,12 +928,6 @@ public class InternalEngine extends Engine {
|
|||
iwc.setSimilarity(engineConfig.getSimilarity());
|
||||
iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac());
|
||||
iwc.setCodec(engineConfig.getCodec());
|
||||
/* We set this timeout to a highish value to work around
|
||||
* the default poll interval in the Lucene lock that is
|
||||
* 1000ms by default. We might need to poll multiple times
|
||||
* here but with 1s poll this is only executed twice at most
|
||||
* in combination with the default writelock timeout*/
|
||||
iwc.setWriteLockTimeout(5000);
|
||||
iwc.setUseCompoundFile(true); // always use compound on flush - reduces # of file-handles on refresh
|
||||
// Warm-up hook for newly-merged segments. Warming up segments here is better since it will be performed at the end
|
||||
// of the merge operation and won't slow down _refresh
|
||||
|
|
|
@ -98,7 +98,6 @@ class DocumentParser implements Closeable {
|
|||
}
|
||||
|
||||
reverseOrder(context);
|
||||
applyDocBoost(context);
|
||||
|
||||
ParsedDocument doc = parsedDocument(source, context, update(context, mapping));
|
||||
// reset the context to free up memory
|
||||
|
@ -186,24 +185,6 @@ class DocumentParser implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private static void applyDocBoost(ParseContext.InternalParseContext context) {
|
||||
// apply doc boost
|
||||
if (context.docBoost() != 1.0f) {
|
||||
Set<String> encounteredFields = new HashSet<>();
|
||||
for (ParseContext.Document doc : context.docs()) {
|
||||
encounteredFields.clear();
|
||||
for (IndexableField field : doc) {
|
||||
if (field.fieldType().indexOptions() != IndexOptions.NONE && !field.fieldType().omitNorms()) {
|
||||
if (!encounteredFields.contains(field.name())) {
|
||||
((Field) field).setBoost(context.docBoost() * field.boost());
|
||||
encounteredFields.add(field.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static ParsedDocument parsedDocument(SourceToParse source, ParseContext.InternalParseContext context, Mapping update) {
|
||||
return new ParsedDocument(
|
||||
context.uid(),
|
||||
|
|
|
@ -300,7 +300,8 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
|
|||
for (Field field : fields) {
|
||||
if (!customBoost()
|
||||
// don't set boosts eg. on dv fields
|
||||
&& field.fieldType().indexOptions() != IndexOptions.NONE) {
|
||||
&& field.fieldType().indexOptions() != IndexOptions.NONE
|
||||
&& Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(fieldType().boost());
|
||||
}
|
||||
context.doc().add(field);
|
||||
|
|
|
@ -32,7 +32,9 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.RegexpQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.fieldstats.FieldStats;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -398,7 +400,12 @@ public abstract class MappedFieldType extends FieldType {
|
|||
}
|
||||
|
||||
public Query termQuery(Object value, @Nullable QueryShardContext context) {
|
||||
return new TermQuery(createTerm(value));
|
||||
TermQuery query = new TermQuery(createTerm(value));
|
||||
if (boost == 1f ||
|
||||
(context != null && context.indexVersionCreated().before(Version.V_5_0_0))) {
|
||||
return query;
|
||||
}
|
||||
return new BoostQuery(query, boost);
|
||||
}
|
||||
|
||||
public Query termsQuery(List values, @Nullable QueryShardContext context) {
|
||||
|
|
|
@ -321,16 +321,6 @@ public abstract class ParseContext {
|
|||
return in.externalValue();
|
||||
}
|
||||
|
||||
@Override
|
||||
public float docBoost() {
|
||||
return in.docBoost();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void docBoost(float docBoost) {
|
||||
in.docBoost(docBoost);
|
||||
}
|
||||
|
||||
@Override
|
||||
public StringBuilder stringBuilder() {
|
||||
return in.stringBuilder();
|
||||
|
@ -375,8 +365,6 @@ public abstract class ParseContext {
|
|||
|
||||
private AllEntries allEntries = new AllEntries();
|
||||
|
||||
private float docBoost = 1.0f;
|
||||
|
||||
private Mapper dynamicMappingsUpdate = null;
|
||||
|
||||
public InternalParseContext(@Nullable Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper, ContentPath path) {
|
||||
|
@ -402,7 +390,6 @@ public abstract class ParseContext {
|
|||
this.source = source == null ? null : sourceToParse.source();
|
||||
this.path.reset();
|
||||
this.allEntries = new AllEntries();
|
||||
this.docBoost = 1.0f;
|
||||
this.dynamicMappingsUpdate = null;
|
||||
}
|
||||
|
||||
|
@ -534,16 +521,6 @@ public abstract class ParseContext {
|
|||
return this.allEntries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float docBoost() {
|
||||
return this.docBoost;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void docBoost(float docBoost) {
|
||||
this.docBoost = docBoost;
|
||||
}
|
||||
|
||||
/**
|
||||
* A string builder that can be used to construct complex names for example.
|
||||
* Its better to reuse the.
|
||||
|
@ -759,10 +736,6 @@ public abstract class ParseContext {
|
|||
return clazz.cast(externalValue());
|
||||
}
|
||||
|
||||
public abstract float docBoost();
|
||||
|
||||
public abstract void docBoost(float docBoost);
|
||||
|
||||
/**
|
||||
* A string builder that can be used to construct complex names for example.
|
||||
* Its better to reuse the.
|
||||
|
|
|
@ -285,7 +285,9 @@ public class ByteFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomByteNumericField field = new CustomByteNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -513,7 +513,9 @@ public class DateFieldMapper extends NumberFieldMapper {
|
|||
if (value != null) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomLongNumericField field = new CustomLongNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -278,7 +278,9 @@ public class DoubleFieldMapper extends NumberFieldMapper {
|
|||
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomDoubleNumericField field = new CustomDoubleNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -290,7 +290,9 @@ public class FloatFieldMapper extends NumberFieldMapper {
|
|||
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomFloatNumericField field = new CustomFloatNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -298,7 +298,9 @@ public class IntegerFieldMapper extends NumberFieldMapper {
|
|||
protected void addIntegerFields(ParseContext context, List<Field> fields, int value, float boost) {
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomIntegerNumericField field = new CustomIntegerNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -282,7 +282,9 @@ public class LongFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomLongNumericField field = new CustomLongNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -290,7 +290,9 @@ public class ShortFieldMapper extends NumberFieldMapper {
|
|||
}
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomShortNumericField field = new CustomShortNumericField(value, fieldType());
|
||||
field.setBoost(boost);
|
||||
if (boost != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(boost);
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -132,10 +132,11 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
public static class TypeParser implements Mapper.TypeParser {
|
||||
@Override
|
||||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
|
||||
// TODO: temporarily disabled to give Kibana time to upgrade to text/keyword mappings
|
||||
/*if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0)) {
|
||||
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
|
||||
+ "or [keyword] field instead for field [" + fieldName + "]");
|
||||
}
|
||||
}*/
|
||||
StringFieldMapper.Builder builder = new StringFieldMapper.Builder(fieldName);
|
||||
// hack for the fact that string can't just accept true/false for
|
||||
// the index property and still accepts no/not_analyzed/analyzed
|
||||
|
@ -240,10 +241,11 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
int positionIncrementGap, int ignoreAbove,
|
||||
Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
|
||||
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
|
||||
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0)) {
|
||||
// TODO: temporarily disabled to give Kibana time to upgrade to text/keyword mappings
|
||||
/*if (Version.indexCreated(indexSettings).onOrAfter(Version.V_5_0_0)) {
|
||||
throw new IllegalArgumentException("The [string] type is removed in 5.0. You should now use either a [text] "
|
||||
+ "or [keyword] field instead for field [" + fieldType.name() + "]");
|
||||
}
|
||||
}*/
|
||||
if (fieldType.tokenized() && fieldType.indexOptions() != NONE && fieldType().hasDocValues()) {
|
||||
throw new MapperParsingException("Field [" + fieldType.name() + "] cannot be analyzed and have doc values");
|
||||
}
|
||||
|
@ -317,7 +319,9 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
Field field = new Field(fieldType().name(), valueAndBoost.value(), fieldType());
|
||||
field.setBoost(valueAndBoost.boost());
|
||||
if (valueAndBoost.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(valueAndBoost.boost());
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree;
|
|||
import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree;
|
||||
import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree;
|
||||
import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
|
@ -452,7 +453,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
return null;
|
||||
}
|
||||
for (Field field : fields) {
|
||||
if (!customBoost()) {
|
||||
if (!customBoost() &&
|
||||
fieldType.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(fieldType().boost());
|
||||
}
|
||||
context.doc().add(field);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.BytesRefBuilder;
|
||||
import org.apache.lucene.util.NumericUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Explicit;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Numbers;
|
||||
|
@ -305,7 +306,9 @@ public class IpFieldMapper extends NumberFieldMapper {
|
|||
final long value = ipToLong(ipAsString);
|
||||
if (fieldType().indexOptions() != IndexOptions.NONE || fieldType().stored()) {
|
||||
CustomLongNumericField field = new CustomLongNumericField(value, fieldType());
|
||||
field.setBoost(fieldType().boost());
|
||||
if (fieldType.boost() != 1f && Version.indexCreated(context.indexSettings()).before(Version.V_5_0_0)) {
|
||||
field.setBoost(fieldType().boost());
|
||||
}
|
||||
fields.add(field);
|
||||
}
|
||||
if (fieldType().hasDocValues()) {
|
||||
|
|
|
@ -132,7 +132,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
DocumentMapper parentDocMapper = context.getMapperService().documentMapper(type);
|
||||
if (parentDocMapper == null) {
|
||||
throw new QueryShardException(context, "[has_parent] query configured 'parent_type' [" + type
|
||||
throw new QueryShardException(context, "[" + NAME + "] query configured 'parent_type' [" + type
|
||||
+ "] is not a valid type");
|
||||
}
|
||||
|
||||
|
@ -152,49 +152,36 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
}
|
||||
|
||||
Set<String> parentTypes = new HashSet<>(5);
|
||||
parentTypes.add(parentDocMapper.type());
|
||||
Set<String> childTypes = new HashSet<>();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = null;
|
||||
for (DocumentMapper documentMapper : context.getMapperService().docMappers(false)) {
|
||||
ParentFieldMapper parentFieldMapper = documentMapper.parentFieldMapper();
|
||||
if (parentFieldMapper.active()) {
|
||||
DocumentMapper parentTypeDocumentMapper = context.getMapperService().documentMapper(parentFieldMapper.type());
|
||||
if (parentFieldMapper.active() && type.equals(parentFieldMapper.type())) {
|
||||
childTypes.add(documentMapper.type());
|
||||
parentChildIndexFieldData = context.getForField(parentFieldMapper.fieldType());
|
||||
if (parentTypeDocumentMapper == null) {
|
||||
// Only add this, if this parentFieldMapper (also a parent) isn't a child of another parent.
|
||||
parentTypes.add(parentFieldMapper.type());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (parentChildIndexFieldData == null) {
|
||||
throw new QueryShardException(context, "[has_parent] no _parent field configured");
|
||||
}
|
||||
|
||||
Query parentTypeQuery = null;
|
||||
if (parentTypes.size() == 1) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(parentTypes.iterator().next());
|
||||
if (documentMapper != null) {
|
||||
parentTypeQuery = documentMapper.typeFilter();
|
||||
}
|
||||
if (childTypes.isEmpty()) {
|
||||
throw new QueryShardException(context, "[" + NAME + "] no child types found for type [" + type + "]");
|
||||
}
|
||||
|
||||
Query childrenQuery;
|
||||
if (childTypes.size() == 1) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(childTypes.iterator().next());
|
||||
childrenQuery = documentMapper.typeFilter();
|
||||
} else {
|
||||
BooleanQuery.Builder parentsFilter = new BooleanQuery.Builder();
|
||||
for (String parentTypeStr : parentTypes) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(parentTypeStr);
|
||||
if (documentMapper != null) {
|
||||
parentsFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
BooleanQuery.Builder childrenFilter = new BooleanQuery.Builder();
|
||||
for (String childrenTypeStr : childTypes) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(childrenTypeStr);
|
||||
childrenFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
parentTypeQuery = parentsFilter.build();
|
||||
}
|
||||
|
||||
if (parentTypeQuery == null) {
|
||||
return null;
|
||||
childrenQuery = childrenFilter.build();
|
||||
}
|
||||
|
||||
// wrap the query with type query
|
||||
innerQuery = Queries.filtered(innerQuery, parentDocMapper.typeFilter());
|
||||
Query childrenFilter = Queries.not(parentTypeQuery);
|
||||
return new HasChildQueryBuilder.LateParsingQuery(childrenFilter,
|
||||
return new HasChildQueryBuilder.LateParsingQuery(childrenQuery,
|
||||
innerQuery,
|
||||
HasChildQueryBuilder.DEFAULT_MIN_CHILDREN,
|
||||
HasChildQueryBuilder.DEFAULT_MAX_CHILDREN,
|
||||
|
|
|
@ -926,13 +926,6 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
|||
}
|
||||
Store.verify(indexOutput);
|
||||
indexOutput.close();
|
||||
// write the checksum
|
||||
if (fileInfo.metadata().hasLegacyChecksum()) {
|
||||
Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||
legacyChecksums.add(fileInfo.metadata());
|
||||
legacyChecksums.write(store);
|
||||
|
||||
}
|
||||
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
|
||||
success = true;
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
|
|
|
@ -180,7 +180,6 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
|||
*
|
||||
* @return file checksum
|
||||
*/
|
||||
@Nullable
|
||||
public String checksum() {
|
||||
return metadata.checksum();
|
||||
}
|
||||
|
|
|
@ -29,8 +29,10 @@ import org.apache.lucene.store.NativeFSLockFactory;
|
|||
import org.apache.lucene.store.RateLimitedFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSLockFactory;
|
||||
import org.apache.lucene.store.SleepingLockWrapper;
|
||||
import org.apache.lucene.store.StoreRateLimiting;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.metrics.CounterMetric;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -86,10 +88,12 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
|
|||
final Path location = path.resolveIndex();
|
||||
Files.createDirectories(location);
|
||||
Directory wrapped = newFSDirectory(location, indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING));
|
||||
if (IndexMetaData.isOnSharedFilesystem(indexSettings.getSettings())) {
|
||||
wrapped = new SleepingLockWrapper(wrapped, 5000);
|
||||
}
|
||||
return new RateLimitedFSDirectory(wrapped, this, this) ;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void onPause(long nanos) {
|
||||
rateLimitingTimeInNanos.inc(nanos);
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.BufferedChecksum;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.zip.Adler32;
|
||||
import java.util.zip.Checksum;
|
||||
|
||||
/**
|
||||
* Implements verification checks to the best extent possible
|
||||
* against legacy segments.
|
||||
* <p>
|
||||
* For files since ES 1.3, we have a lucene checksum, and
|
||||
* we verify both CRC32 + length from that.
|
||||
* For older segment files, we have an elasticsearch Adler32 checksum
|
||||
* and a length, except for commit points.
|
||||
* For older commit points, we only have the length in metadata,
|
||||
* but lucene always wrote a CRC32 checksum we can verify in the future, too.
|
||||
* For (Jurassic?) files, we dont have an Adler32 checksum at all,
|
||||
* since its optional in the protocol. But we always know the length.
|
||||
* @deprecated only to support old segments
|
||||
*/
|
||||
@Deprecated
|
||||
class LegacyVerification {
|
||||
|
||||
// TODO: add a verifier for old lucene segments_N that also checks CRC.
|
||||
// but for now, at least truncation is detected here (as length will be checked)
|
||||
|
||||
/**
|
||||
* verifies Adler32 + length for index files before lucene 4.8
|
||||
*/
|
||||
static class Adler32VerifyingIndexOutput extends VerifyingIndexOutput {
|
||||
final String adler32;
|
||||
final long length;
|
||||
final Checksum checksum = new BufferedChecksum(new Adler32());
|
||||
long written;
|
||||
|
||||
public Adler32VerifyingIndexOutput(IndexOutput out, String adler32, long length) {
|
||||
super(out);
|
||||
this.adler32 = adler32;
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify() throws IOException {
|
||||
if (written != length) {
|
||||
throw new CorruptIndexException("expected length=" + length + " != actual length: " + written + " : file truncated?", out.toString());
|
||||
}
|
||||
final String actualChecksum = Store.digestToString(checksum.getValue());
|
||||
if (!adler32.equals(actualChecksum)) {
|
||||
throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + adler32 +
|
||||
" actual=" + actualChecksum, out.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeByte(byte b) throws IOException {
|
||||
out.writeByte(b);
|
||||
checksum.update(b);
|
||||
written++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBytes(byte[] bytes, int offset, int length) throws IOException {
|
||||
out.writeBytes(bytes, offset, length);
|
||||
checksum.update(bytes, offset, length);
|
||||
written += length;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* verifies length for index files before lucene 4.8
|
||||
*/
|
||||
static class LengthVerifyingIndexOutput extends VerifyingIndexOutput {
|
||||
final long length;
|
||||
long written;
|
||||
|
||||
public LengthVerifyingIndexOutput(IndexOutput out, long length) {
|
||||
super(out);
|
||||
this.length = length;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void verify() throws IOException {
|
||||
if (written != length) {
|
||||
throw new CorruptIndexException("expected length=" + length + " != actual length: " + written + " : file truncated?", out.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeByte(byte b) throws IOException {
|
||||
out.writeByte(b);
|
||||
written++;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBytes(byte[] bytes, int offset, int length) throws IOException {
|
||||
out.writeBytes(bytes, offset, length);
|
||||
written += length;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -50,7 +50,6 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -70,7 +69,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
|||
import org.elasticsearch.common.util.concurrent.RefCounted;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||
|
@ -83,7 +81,6 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.sql.Time;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
|
@ -121,8 +118,6 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* </pre>
|
||||
*/
|
||||
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
|
||||
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
||||
|
||||
static final String CODEC = "store";
|
||||
static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0
|
||||
static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0
|
||||
|
@ -150,7 +145,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY);
|
||||
}
|
||||
|
||||
@Inject
|
||||
public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException {
|
||||
super(shardId, indexSettings);
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
|
@ -460,19 +454,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
IndexOutput output = directory().createOutput(fileName, context);
|
||||
boolean success = false;
|
||||
try {
|
||||
if (metadata.hasLegacyChecksum()) {
|
||||
logger.debug("create legacy adler32 output for {}", fileName);
|
||||
output = new LegacyVerification.Adler32VerifyingIndexOutput(output, metadata.checksum(), metadata.length());
|
||||
} else if (metadata.checksum() == null) {
|
||||
// TODO: when the file is a segments_N, we can still CRC-32 + length for more safety
|
||||
// its had that checksum forever.
|
||||
logger.debug("create legacy length-only output for {}", fileName);
|
||||
output = new LegacyVerification.LengthVerifyingIndexOutput(output, metadata.length());
|
||||
} else {
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||
}
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
|
@ -489,12 +473,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
|
||||
public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException {
|
||||
if (metadata.hasLegacyChecksum() || metadata.checksum() == null) {
|
||||
logger.debug("open legacy input for {}", filename);
|
||||
return directory().openInput(filename, context);
|
||||
}
|
||||
assert metadata.writtenBy() != null;
|
||||
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
return new VerifyingIndexInput(directory().openInput(filename, context));
|
||||
}
|
||||
|
||||
|
@ -522,32 +502,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
if (input.length() != md.length()) { // first check the length no matter how old this file is
|
||||
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
|
||||
}
|
||||
if (md.writtenBy() != null && md.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
// throw exception if the file is corrupt
|
||||
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
||||
// throw exception if metadata is inconsistent
|
||||
if (!checksum.equals(md.checksum())) {
|
||||
throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum +
|
||||
", metadata checksum=" + md.checksum(), input);
|
||||
}
|
||||
} else if (md.hasLegacyChecksum()) {
|
||||
// legacy checksum verification - no footer that we need to omit in the checksum!
|
||||
final Checksum checksum = new Adler32();
|
||||
final byte[] buffer = new byte[md.length() > 4096 ? 4096 : (int) md.length()];
|
||||
final long len = input.length();
|
||||
long read = 0;
|
||||
while (len > read) {
|
||||
final long bytesLeft = len - read;
|
||||
final int bytesToRead = bytesLeft < buffer.length ? (int) bytesLeft : buffer.length;
|
||||
input.readBytes(buffer, 0, bytesToRead, false);
|
||||
checksum.update(buffer, 0, bytesToRead);
|
||||
read += bytesToRead;
|
||||
}
|
||||
String adler32 = Store.digestToString(checksum.getValue());
|
||||
if (!adler32.equals(md.checksum())) {
|
||||
throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + md.checksum() +
|
||||
" actual=" + adler32, input);
|
||||
}
|
||||
// throw exception if the file is corrupt
|
||||
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
||||
// throw exception if metadata is inconsistent
|
||||
if (!checksum.equals(md.checksum())) {
|
||||
throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum +
|
||||
", metadata checksum=" + md.checksum(), input);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -803,7 +763,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
final int size = in.readVInt();
|
||||
Map<String, StoreFileMetaData> metadata = new HashMap<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
StoreFileMetaData meta = StoreFileMetaData.readStoreFileMetaData(in);
|
||||
StoreFileMetaData meta = new StoreFileMetaData(in);
|
||||
metadata.put(meta.name(), meta);
|
||||
}
|
||||
Map<String, String> commitUserData = new HashMap<>();
|
||||
|
@ -840,14 +800,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
|
||||
long numDocs;
|
||||
Map<String, StoreFileMetaData> builder = new HashMap<>();
|
||||
Map<String, String> checksumMap = readLegacyChecksums(directory).v1();
|
||||
Map<String, String> commitUserDataBuilder = new HashMap<>();
|
||||
try {
|
||||
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
|
||||
numDocs = Lucene.getNumDocs(segmentCommitInfos);
|
||||
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
|
||||
@SuppressWarnings("deprecation")
|
||||
Version maxVersion = Version.LUCENE_4_0; // we don't know which version was used to write so we take the max version.
|
||||
Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version.
|
||||
for (SegmentCommitInfo info : segmentCommitInfos) {
|
||||
final Version version = info.info.getVersion();
|
||||
if (version == null) {
|
||||
|
@ -858,26 +817,21 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
maxVersion = version;
|
||||
}
|
||||
for (String file : info.files()) {
|
||||
String legacyChecksum = checksumMap.get(file);
|
||||
if (version.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
if (version.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
|
||||
} else {
|
||||
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), legacyChecksum, version));
|
||||
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + version);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (maxVersion == null) {
|
||||
maxVersion = StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION;
|
||||
}
|
||||
final String segmentsFile = segmentCommitInfos.getSegmentsFileName();
|
||||
String legacyChecksum = checksumMap.get(segmentsFile);
|
||||
if (maxVersion.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
if (maxVersion.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
||||
} else {
|
||||
final BytesRefBuilder fileHash = new BytesRefBuilder();
|
||||
final long length;
|
||||
try (final IndexInput in = directory.openInput(segmentsFile, IOContext.READONCE)) {
|
||||
length = in.length();
|
||||
hashFile(fileHash, new InputStreamIndexInput(in, length), length);
|
||||
}
|
||||
builder.put(segmentsFile, new StoreFileMetaData(segmentsFile, length, legacyChecksum, maxVersion, fileHash.get()));
|
||||
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + maxVersion);
|
||||
}
|
||||
} catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
// we either know the index is corrupted or it's just not there
|
||||
|
@ -902,61 +856,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads legacy checksum files found in the directory.
|
||||
* <p>
|
||||
* Files are expected to start with _checksums- prefix
|
||||
* followed by long file version. Only file with the highest version is read, all other files are ignored.
|
||||
*
|
||||
* @param directory the directory to read checksums from
|
||||
* @return a map of file checksums and the checksum file version
|
||||
*/
|
||||
@SuppressWarnings("deprecation") // Legacy checksum needs legacy methods
|
||||
static Tuple<Map<String, String>, Long> readLegacyChecksums(Directory directory) throws IOException {
|
||||
synchronized (directory) {
|
||||
long lastFound = -1;
|
||||
for (String name : directory.listAll()) {
|
||||
if (!isChecksum(name)) {
|
||||
continue;
|
||||
}
|
||||
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
|
||||
if (current > lastFound) {
|
||||
lastFound = current;
|
||||
}
|
||||
}
|
||||
if (lastFound > -1) {
|
||||
try (IndexInput indexInput = directory.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE)) {
|
||||
indexInput.readInt(); // version
|
||||
return new Tuple<>(indexInput.readStringStringMap(), lastFound);
|
||||
}
|
||||
}
|
||||
return new Tuple<>(new HashMap<>(), -1L);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes all checksum files with version lower than newVersion.
|
||||
*
|
||||
* @param directory the directory to clean
|
||||
* @param newVersion the latest checksum file version
|
||||
*/
|
||||
static void cleanLegacyChecksums(Directory directory, long newVersion) throws IOException {
|
||||
synchronized (directory) {
|
||||
for (String name : directory.listAll()) {
|
||||
if (isChecksum(name)) {
|
||||
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
|
||||
if (current < newVersion) {
|
||||
try {
|
||||
directory.deleteFile(name);
|
||||
} catch (IOException ex) {
|
||||
logger.debug("can't delete old checksum file [{}]", ex, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder,
|
||||
ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
|
||||
final String checksum;
|
||||
|
@ -1225,64 +1124,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
|||
}
|
||||
}
|
||||
|
||||
public final static class LegacyChecksums {
|
||||
private final Map<String, String> legacyChecksums = new HashMap<>();
|
||||
|
||||
public void add(StoreFileMetaData metaData) throws IOException {
|
||||
|
||||
if (metaData.hasLegacyChecksum()) {
|
||||
synchronized (this) {
|
||||
// we don't add checksums if they were written by LUCENE_48... now we are using the build in mechanism.
|
||||
legacyChecksums.put(metaData.name(), metaData.checksum());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void write(Store store) throws IOException {
|
||||
synchronized (store.directory) {
|
||||
Tuple<Map<String, String>, Long> tuple = MetadataSnapshot.readLegacyChecksums(store.directory);
|
||||
tuple.v1().putAll(legacyChecksums);
|
||||
if (!tuple.v1().isEmpty()) {
|
||||
writeChecksums(store.directory, tuple.v1(), tuple.v2());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation") // Legacy checksum uses legacy methods
|
||||
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
|
||||
// Make sure if clock goes backwards we still move version forwards:
|
||||
long nextVersion = Math.max(lastVersion+1, System.currentTimeMillis());
|
||||
final String checksumName = CHECKSUMS_PREFIX + nextVersion;
|
||||
try (IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT)) {
|
||||
output.writeInt(0); // version
|
||||
output.writeStringStringMap(checksums);
|
||||
}
|
||||
directory.sync(Collections.singleton(checksumName));
|
||||
MetadataSnapshot.cleanLegacyChecksums(directory, nextVersion);
|
||||
}
|
||||
|
||||
public void clear() {
|
||||
this.legacyChecksums.clear();
|
||||
}
|
||||
|
||||
public void remove(String name) {
|
||||
legacyChecksums.remove(name);
|
||||
}
|
||||
}
|
||||
|
||||
public static final String CHECKSUMS_PREFIX = "_checksums-";
|
||||
|
||||
public static boolean isChecksum(String name) {
|
||||
// TODO can we drowp .cks
|
||||
return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup.
|
||||
* This includes write lock and checksum files
|
||||
*/
|
||||
public static boolean isAutogenerated(String name) {
|
||||
return IndexWriter.WRITE_LOCK_NAME.equals(name) || isChecksum(name);
|
||||
return IndexWriter.WRITE_LOCK_NAME.equals(name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,35 +25,42 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class StoreFileMetaData implements Streamable {
|
||||
public class StoreFileMetaData implements Writeable {
|
||||
|
||||
private String name;
|
||||
public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
||||
|
||||
private final String name;
|
||||
|
||||
// the actual file size on "disk", if compressed, the compressed size
|
||||
private long length;
|
||||
private final long length;
|
||||
|
||||
private String checksum;
|
||||
private final String checksum;
|
||||
|
||||
private Version writtenBy;
|
||||
private final Version writtenBy;
|
||||
|
||||
private BytesRef hash;
|
||||
private final BytesRef hash;
|
||||
|
||||
private StoreFileMetaData() {
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length) {
|
||||
this(name, length, null);
|
||||
public StoreFileMetaData(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
length = in.readVLong();
|
||||
checksum = in.readString();
|
||||
String versionString = in.readString();
|
||||
assert versionString != null;
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
hash = in.readBytesRef();
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum) {
|
||||
this(name, length, checksum, null, null);
|
||||
this(name, length, checksum, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) {
|
||||
|
@ -61,6 +68,10 @@ public class StoreFileMetaData implements Streamable {
|
|||
}
|
||||
|
||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
|
||||
assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that "
|
||||
+ FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
|
||||
Objects.requireNonNull(writtenBy, "writtenBy must not be null");
|
||||
Objects.requireNonNull(checksum, "checksum must not be null");
|
||||
this.name = name;
|
||||
this.length = length;
|
||||
this.checksum = checksum;
|
||||
|
@ -85,10 +96,8 @@ public class StoreFileMetaData implements Streamable {
|
|||
|
||||
/**
|
||||
* Returns a string representation of the files checksum. Since Lucene 4.8 this is a CRC32 checksum written
|
||||
* by lucene. Previously we use Adler32 on top of Lucene as the checksum algorithm, if {@link #hasLegacyChecksum()} returns
|
||||
* <code>true</code> this is a Adler32 checksum.
|
||||
* by lucene.
|
||||
*/
|
||||
@Nullable
|
||||
public String checksum() {
|
||||
return this.checksum;
|
||||
}
|
||||
|
@ -104,33 +113,22 @@ public class StoreFileMetaData implements Streamable {
|
|||
return length == other.length && checksum.equals(other.checksum) && hash.equals(other.hash);
|
||||
}
|
||||
|
||||
public static StoreFileMetaData readStoreFileMetaData(StreamInput in) throws IOException {
|
||||
StoreFileMetaData md = new StoreFileMetaData();
|
||||
md.readFrom(in);
|
||||
return md;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "name [" + name + "], length [" + length + "], checksum [" + checksum + "], writtenBy [" + writtenBy + "]" ;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
length = in.readVLong();
|
||||
checksum = in.readOptionalString();
|
||||
String versionString = in.readOptionalString();
|
||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
||||
hash = in.readBytesRef();
|
||||
public StoreFileMetaData readFrom(StreamInput in) throws IOException {
|
||||
return new StoreFileMetaData(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
out.writeVLong(length);
|
||||
out.writeOptionalString(checksum);
|
||||
out.writeOptionalString(writtenBy == null ? null : writtenBy.toString());
|
||||
out.writeString(checksum);
|
||||
out.writeString(writtenBy.toString());
|
||||
out.writeBytesRef(hash);
|
||||
}
|
||||
|
||||
|
@ -141,14 +139,6 @@ public class StoreFileMetaData implements Streamable {
|
|||
return writtenBy;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> iff the checksum is not <code>null</code> and if the file has NOT been written by
|
||||
* a Lucene version greater or equal to Lucene 4.8
|
||||
*/
|
||||
public boolean hasLegacyChecksum() {
|
||||
return checksum != null && (writtenBy == null || writtenBy.onOrAfter(Version.LUCENE_4_8) == false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a variable length hash of the file represented by this metadata object. This can be the file
|
||||
* itself if the file is small enough. If the length of the hash is <tt>0</tt> no hash value is available
|
||||
|
|
|
@ -76,8 +76,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
private final AtomicBoolean finished = new AtomicBoolean();
|
||||
|
||||
private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
|
||||
private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||
|
||||
private final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||
|
||||
// last time this status was accessed
|
||||
|
@ -145,10 +143,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
return state().getStage();
|
||||
}
|
||||
|
||||
public Store.LegacyChecksums legacyChecksums() {
|
||||
return legacyChecksums;
|
||||
}
|
||||
|
||||
/** renames all temporary files to their true name, potentially overriding existing files */
|
||||
public void renameAllTempFiles() throws IOException {
|
||||
ensureRefCount();
|
||||
|
@ -281,7 +275,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
logger.trace("cleaning temporary file [{}]", file);
|
||||
store.deleteQuiet(file);
|
||||
}
|
||||
legacyChecksums.clear();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -344,8 +337,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||
renameAllTempFiles();
|
||||
final Store store = store();
|
||||
// now write checksums
|
||||
legacyChecksums().write(store);
|
||||
try {
|
||||
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||
|
@ -399,8 +390,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
// we are done
|
||||
indexOutput.close();
|
||||
}
|
||||
// write the checksum
|
||||
legacyChecksums().add(fileMetaData);
|
||||
final String temporaryFileName = getTempNameForFile(name);
|
||||
assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
|
||||
store.directory().sync(Collections.singleton(temporaryFileName));
|
||||
|
|
|
@ -317,18 +317,15 @@ public class Node implements Closeable {
|
|||
discovery.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
discovery.startInitialJoin();
|
||||
|
||||
// tribe nodes don't have a master so we shouldn't register an observer
|
||||
if (DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis() > 0) {
|
||||
final ThreadPool thread = injector.getInstance(ThreadPool.class);
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, thread.getThreadContext());
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
if (observer.observedState().nodes().masterNodeId() == null) {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
latch.countDown();
|
||||
}
|
||||
public void onNewClusterState(ClusterState state) { latch.countDown(); }
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
|
@ -337,16 +334,17 @@ public class Node implements Closeable {
|
|||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
assert false;
|
||||
logger.warn("timed out while waiting for initial discovery state - timeout: {}",
|
||||
DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings));
|
||||
latch.countDown();
|
||||
}
|
||||
// use null timeout as we use timeout on the latchwait
|
||||
}, MasterNodeChangePredicate.INSTANCE, null);
|
||||
}
|
||||
}, MasterNodeChangePredicate.INSTANCE, DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings));
|
||||
|
||||
try {
|
||||
latch.await(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis(), TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
|
||||
try {
|
||||
latch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* To be implemented by {@link SearchScript} which can provided an {@link Explanation} of the score
|
||||
* This is currently not used inside elasticsearch but it is used, see for example here:
|
||||
* https://github.com/elasticsearch/elasticsearch/issues/8561
|
||||
* https://github.com/elastic/elasticsearch/issues/8561
|
||||
*/
|
||||
public interface ExplainableSearchScript extends LeafSearchScript {
|
||||
|
||||
|
@ -58,4 +58,4 @@ public interface ExplainableSearchScript extends LeafSearchScript {
|
|||
*/
|
||||
Explanation explain(Explanation subQueryScore) throws IOException;
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -332,9 +332,18 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
|
|||
// callback that an exception happened, but on a different thread since we don't
|
||||
// want handlers to worry about stack overflows
|
||||
final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e);
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() {
|
||||
threadPool.executor(ThreadPool.Names.GENERIC).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
public void onRejection(Throwable t) {
|
||||
// if we get rejected during node shutdown we don't wanna bubble it up
|
||||
logger.debug("failed to notify response handler on rejection", t);
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.warn("failed to notify response handler on exception", t);
|
||||
}
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
holderToNotify.handler().handleException(sendRequestException);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -153,7 +153,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
|
|||
assertMultiGetResponse(multiGetRequestBuilder.get(), numDocs);
|
||||
}
|
||||
|
||||
//https://github.com/elasticsearch/elasticsearch/issues/5038
|
||||
//https://github.com/elastic/elasticsearch/issues/5038
|
||||
public void testBulkProcessorConcurrentRequestsNoNodeAvailableException() throws Exception {
|
||||
//we create a transport client with no nodes to make sure it throws NoNodeAvailableException
|
||||
Settings settings = Settings.builder()
|
||||
|
|
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.cluster.TestClusterService;
|
||||
import org.elasticsearch.test.transport.CapturingTransport;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashSet;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.LongSupplier;
|
||||
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class TransportBulkActionTookTests extends ESTestCase {
|
||||
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = mock(ThreadPool.class);
|
||||
}
|
||||
|
||||
private TransportBulkAction createAction(boolean controlled, AtomicLong expected) {
|
||||
CapturingTransport capturingTransport = new CapturingTransport();
|
||||
ClusterService clusterService = new TestClusterService(threadPool);
|
||||
TransportService transportService = new TransportService(capturingTransport, threadPool);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY);
|
||||
ActionFilters actionFilters = new ActionFilters(new HashSet<>());
|
||||
|
||||
TransportCreateIndexAction createIndexAction = new TransportCreateIndexAction(
|
||||
Settings.EMPTY,
|
||||
transportService,
|
||||
clusterService,
|
||||
threadPool,
|
||||
null,
|
||||
actionFilters,
|
||||
resolver);
|
||||
|
||||
if (controlled) {
|
||||
|
||||
return new TestTransportBulkAction(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
transportService,
|
||||
clusterService,
|
||||
null,
|
||||
createIndexAction,
|
||||
actionFilters,
|
||||
resolver,
|
||||
null,
|
||||
expected::get) {
|
||||
@Override
|
||||
public void executeBulk(BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
|
||||
expected.set(1000000);
|
||||
super.executeBulk(bulkRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
void executeBulk(
|
||||
BulkRequest bulkRequest,
|
||||
long startTimeNanos,
|
||||
ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses) {
|
||||
expected.set(1000000);
|
||||
super.executeBulk(bulkRequest, startTimeNanos, listener, responses);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
return new TestTransportBulkAction(
|
||||
Settings.EMPTY,
|
||||
threadPool,
|
||||
transportService,
|
||||
clusterService,
|
||||
null,
|
||||
createIndexAction,
|
||||
actionFilters,
|
||||
resolver,
|
||||
null,
|
||||
System::nanoTime) {
|
||||
@Override
|
||||
public void executeBulk(BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
expected.set(elapsed);
|
||||
super.executeBulk(bulkRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
void executeBulk(
|
||||
BulkRequest bulkRequest,
|
||||
long startTimeNanos,
|
||||
ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses) {
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
expected.set(elapsed);
|
||||
super.executeBulk(bulkRequest, startTimeNanos, listener, responses);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// test unit conversion with a controlled clock
|
||||
public void testTookWithControlledClock() throws Exception {
|
||||
runTestTook(true);
|
||||
}
|
||||
|
||||
// test took advances with System#nanoTime
|
||||
public void testTookWithRealClock() throws Exception {
|
||||
runTestTook(false);
|
||||
}
|
||||
|
||||
private void runTestTook(boolean controlled) throws Exception {
|
||||
String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk.json");
|
||||
// translate Windows line endings (\r\n) to standard ones (\n)
|
||||
if (Constants.WINDOWS) {
|
||||
bulkAction = Strings.replace(bulkAction, "\r\n", "\n");
|
||||
}
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null);
|
||||
AtomicLong expected = new AtomicLong();
|
||||
TransportBulkAction action = createAction(controlled, expected);
|
||||
action.doExecute(bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkItemResponses) {
|
||||
if (controlled) {
|
||||
assertThat(
|
||||
bulkItemResponses.getTook().getMillis(),
|
||||
equalTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS)));
|
||||
} else {
|
||||
assertThat(
|
||||
bulkItemResponses.getTook().getMillis(),
|
||||
greaterThanOrEqualTo(TimeUnit.MILLISECONDS.convert(expected.get(), TimeUnit.NANOSECONDS)));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static class Resolver extends IndexNameExpressionResolver {
|
||||
public Resolver(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
return request.indices();
|
||||
}
|
||||
}
|
||||
|
||||
static class TestTransportBulkAction extends TransportBulkAction {
|
||||
|
||||
public TestTransportBulkAction(
|
||||
Settings settings,
|
||||
ThreadPool threadPool,
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
TransportShardBulkAction shardBulkAction,
|
||||
TransportCreateIndexAction createIndexAction,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex,
|
||||
LongSupplier relativeTimeProvider) {
|
||||
super(
|
||||
settings,
|
||||
threadPool,
|
||||
transportService,
|
||||
clusterService,
|
||||
shardBulkAction,
|
||||
createIndexAction,
|
||||
actionFilters,
|
||||
indexNameExpressionResolver,
|
||||
autoCreateIndex,
|
||||
relativeTimeProvider);
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean needToCheck() {
|
||||
return randomBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return randomBoolean();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class TestTransportCreateIndexAction extends TransportCreateIndexAction {
|
||||
|
||||
public TestTransportCreateIndexAction(
|
||||
Settings settings,
|
||||
TransportService transportService,
|
||||
ClusterService clusterService,
|
||||
ThreadPool threadPool,
|
||||
MetaDataCreateIndexService createIndexService,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, transportService, clusterService, threadPool, createIndexService, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, CreateIndexRequest request, ActionListener<CreateIndexResponse> listener) {
|
||||
listener.onResponse(newResponse());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -477,7 +477,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
executor.shutdown();
|
||||
boolean done = executor.awaitTermination(10, TimeUnit.SECONDS);
|
||||
boolean done = executor.awaitTermination(20, TimeUnit.SECONDS);
|
||||
assertThat(done, equalTo(true));
|
||||
if (!done) {
|
||||
executor.shutdownNow();
|
||||
|
|
|
@ -130,6 +130,33 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMaxNumberOfThreadsCheck() {
|
||||
final int limit = 1 << 15;
|
||||
final AtomicLong maxNumberOfThreads = new AtomicLong(randomIntBetween(1, limit - 1));
|
||||
final BootstrapCheck.MaxNumberOfThreadsCheck check = new BootstrapCheck.MaxNumberOfThreadsCheck() {
|
||||
@Override
|
||||
long getMaxNumberOfThreads() {
|
||||
return maxNumberOfThreads.get();
|
||||
}
|
||||
};
|
||||
|
||||
try {
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
fail("should have failed due to max number of threads too low");
|
||||
} catch (final RuntimeException e) {
|
||||
assertThat(e.getMessage(), containsString("max number of threads"));
|
||||
}
|
||||
|
||||
maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
|
||||
// nothing should happen if current max number of threads is
|
||||
// not available
|
||||
maxNumberOfThreads.set(-1);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check));
|
||||
}
|
||||
|
||||
public void testEnforceLimits() {
|
||||
final Set<Setting> enforceSettings = BootstrapCheck.enforceSettings();
|
||||
final Setting setting = randomFrom(Arrays.asList(enforceSettings.toArray(new Setting[enforceSettings.size()])));
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.elasticsearch.bwcompat;
|
||||
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.search.Explanation;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
|
||||
|
@ -297,6 +299,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
importIndex(indexName);
|
||||
assertIndexSanity(indexName, version);
|
||||
assertBasicSearchWorks(indexName);
|
||||
assertAllSearchWorks(indexName);
|
||||
assertBasicAggregationWorks(indexName);
|
||||
assertRealtimeGetWorks(indexName);
|
||||
assertNewReplicasWork(indexName);
|
||||
|
@ -354,6 +357,39 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
|
||||
}
|
||||
|
||||
boolean findPayloadBoostInExplanation(Explanation expl) {
|
||||
if (expl.getDescription().startsWith("payloadBoost=") && expl.getValue() != 1f) {
|
||||
return true;
|
||||
} else {
|
||||
boolean found = false;
|
||||
for (Explanation sub : expl.getDetails()) {
|
||||
found |= findPayloadBoostInExplanation(sub);
|
||||
}
|
||||
return found;
|
||||
}
|
||||
}
|
||||
|
||||
void assertAllSearchWorks(String indexName) {
|
||||
logger.info("--> testing _all search");
|
||||
SearchResponse searchRsp = client().prepareSearch(indexName).get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
|
||||
SearchHit bestHit = searchRsp.getHits().getAt(0);
|
||||
|
||||
// Make sure there are payloads and they are taken into account for the score
|
||||
// the 'string' field has a boost of 4 in the mappings so it should get a payload boost
|
||||
String stringValue = (String) bestHit.sourceAsMap().get("string");
|
||||
assertNotNull(stringValue);
|
||||
Explanation explanation = client().prepareExplain(indexName, bestHit.getType(), bestHit.getId())
|
||||
.setQuery(QueryBuilders.matchQuery("_all", stringValue)).get().getExplanation();
|
||||
assertTrue("Could not find payload boost in explanation\n" + explanation, findPayloadBoostInExplanation(explanation));
|
||||
|
||||
// Make sure the query can run on the whole index
|
||||
searchRsp = client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("_all", stringValue)).setExplain(true).get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertThat(searchRsp.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
|
||||
}
|
||||
|
||||
void assertBasicAggregationWorks(String indexName) {
|
||||
// histogram on a long
|
||||
SearchResponse searchRsp = client().prepareSearch(indexName).addAggregation(AggregationBuilders.histogram("histo").field("long_sort").interval(10)).get();
|
||||
|
|
|
@ -60,7 +60,7 @@ public class ClusterModuleTests extends ModuleTestCase {
|
|||
return false;
|
||||
}
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
|
||||
public class ShardsAllocatorModuleIT extends ESIntegTestCase {
|
||||
|
||||
public void testLoadDefaultShardsAllocator() throws IOException {
|
||||
assertAllocatorInstance(Settings.Builder.EMPTY_SETTINGS, BalancedShardsAllocator.class);
|
||||
}
|
||||
|
||||
public void testLoadByShortKeyShardsAllocator() throws IOException {
|
||||
Settings build = settingsBuilder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), "even_shard") // legacy just to make sure we don't barf
|
||||
.build();
|
||||
assertAllocatorInstance(build, BalancedShardsAllocator.class);
|
||||
build = settingsBuilder().put(ClusterModule.SHARDS_ALLOCATOR_TYPE_SETTING.getKey(), ClusterModule.BALANCED_ALLOCATOR).build();
|
||||
assertAllocatorInstance(build, BalancedShardsAllocator.class);
|
||||
}
|
||||
|
||||
private void assertAllocatorInstance(Settings settings, Class<? extends ShardsAllocator> clazz) throws IOException {
|
||||
while (cluster().size() != 0) {
|
||||
internalCluster().stopRandomDataNode();
|
||||
}
|
||||
internalCluster().startNode(settings);
|
||||
ShardsAllocator instance = internalCluster().getInstance(ShardsAllocator.class);
|
||||
assertThat(instance, instanceOf(clazz));
|
||||
}
|
||||
}
|
|
@ -320,7 +320,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean move(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
public boolean moveShards(RoutingAllocation allocation) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,41 +19,41 @@
|
|||
|
||||
package org.elasticsearch.common.cli;
|
||||
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
public class TerminalTests extends CliToolTestCase {
|
||||
public void testVerbosity() throws Exception {
|
||||
CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.SILENT);
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
assertPrinted(terminal, Terminal.Verbosity.SILENT, "text");
|
||||
assertNotPrinted(terminal, Terminal.Verbosity.NORMAL, "text");
|
||||
assertNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
|
||||
|
||||
terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
|
||||
terminal = new MockTerminal();
|
||||
assertPrinted(terminal, Terminal.Verbosity.SILENT, "text");
|
||||
assertPrinted(terminal, Terminal.Verbosity.NORMAL, "text");
|
||||
assertNotPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
|
||||
|
||||
terminal = new CaptureOutputTerminal(Terminal.Verbosity.VERBOSE);
|
||||
terminal = new MockTerminal();
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
assertPrinted(terminal, Terminal.Verbosity.SILENT, "text");
|
||||
assertPrinted(terminal, Terminal.Verbosity.NORMAL, "text");
|
||||
assertPrinted(terminal, Terminal.Verbosity.VERBOSE, "text");
|
||||
}
|
||||
|
||||
public void testEscaping() throws Exception {
|
||||
CaptureOutputTerminal terminal = new CaptureOutputTerminal(Terminal.Verbosity.NORMAL);
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
assertPrinted(terminal, Terminal.Verbosity.NORMAL, "This message contains percent like %20n");
|
||||
}
|
||||
|
||||
private void assertPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {
|
||||
private void assertPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception {
|
||||
logTerminal.println(verbosity, text);
|
||||
assertEquals(1, logTerminal.getTerminalOutput().size());
|
||||
assertTrue(logTerminal.getTerminalOutput().get(0).contains(text));
|
||||
logTerminal.terminalOutput.clear();
|
||||
String output = logTerminal.getOutput();
|
||||
assertTrue(output, output.contains(text));
|
||||
logTerminal.resetOutput();
|
||||
}
|
||||
|
||||
private void assertNotPrinted(CaptureOutputTerminal logTerminal, Terminal.Verbosity verbosity, String text) {
|
||||
private void assertNotPrinted(MockTerminal logTerminal, Terminal.Verbosity verbosity, String text) throws Exception {
|
||||
logTerminal.println(verbosity, text);
|
||||
assertThat(logTerminal.getTerminalOutput(), hasSize(0));
|
||||
String output = logTerminal.getOutput();
|
||||
assertTrue(output, output.isEmpty());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,21 +19,21 @@
|
|||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.log4j.Appender;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.elasticsearch.common.cli.CliToolTestCase;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.log4j.Appender;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.elasticsearch.common.cli.MockTerminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
@ -162,7 +162,7 @@ public class LoggingConfigurationTests extends ESTestCase {
|
|||
.put("appender.console.type", "console")
|
||||
.put("appender.console.layout.type", "consolePattern")
|
||||
.put("appender.console.layout.conversionPattern", "[%d{ISO8601}][%-5p][%-25c] %m%n")
|
||||
.build(), new CliToolTestCase.MockTerminal());
|
||||
.build(), new MockTerminal());
|
||||
LogConfigurator.configure(environment.settings(), true);
|
||||
// args should overwrite whatever is in the config
|
||||
ESLogger esLogger = ESLoggerFactory.getLogger("test_resolve_order");
|
||||
|
@ -187,7 +187,7 @@ public class LoggingConfigurationTests extends ESTestCase {
|
|||
Settings.builder()
|
||||
.put(Environment.PATH_CONF_SETTING.getKey(), tmpDir.toAbsolutePath())
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
.build(), new CliToolTestCase.MockTerminal());
|
||||
.build(), new MockTerminal());
|
||||
LogConfigurator.configure(environment.settings(), false);
|
||||
ESLogger esLogger = ESLoggerFactory.getLogger("test_config_not_read");
|
||||
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.SmallFloat;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -89,8 +90,8 @@ public class SimpleAllTests extends ESTestCase {
|
|||
if (payload == null || payload.length == 0) {
|
||||
assertEquals(boost, 1f, 0.001f);
|
||||
} else {
|
||||
assertEquals(4, payload.length);
|
||||
final float b = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
|
||||
assertEquals(1, payload.length);
|
||||
final float b = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
|
||||
assertEquals(boost, b, 0.001f);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
|
||||
public class ScriptScoreFunctionTests extends ESTestCase {
|
||||
/**
|
||||
* Tests https://github.com/elasticsearch/elasticsearch/issues/2426
|
||||
* Tests https://github.com/elastic/elasticsearch/issues/2426
|
||||
*/
|
||||
public void testScriptScoresReturnsNaN() throws IOException {
|
||||
ScoreFunction scoreFunction = new ScriptScoreFunction(new Script("Float.NaN"), new FloatValueScript(Float.NaN));
|
||||
|
|
|
@ -46,20 +46,13 @@ public class PrioritizedRunnableTests extends ESTestCase {
|
|||
|
||||
// test age advances with System#nanoTime
|
||||
public void testGetAgeInMillisWithRealClock() throws InterruptedException {
|
||||
long nanosecondsInMillisecond = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
|
||||
PrioritizedRunnable runnable = new PrioritizedRunnable(Priority.NORMAL) {
|
||||
@Override
|
||||
public void run() {
|
||||
}
|
||||
};
|
||||
|
||||
// force at least one millisecond to elapse, but ensure the
|
||||
// clock has enough resolution to observe the passage of time
|
||||
long start = System.nanoTime();
|
||||
long elapsed;
|
||||
while ((elapsed = (System.nanoTime() - start)) < nanosecondsInMillisecond) {
|
||||
// busy spin
|
||||
}
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
|
||||
// creation happened before start, so age will be at least as
|
||||
// large as elapsed
|
||||
|
|
|
@ -198,7 +198,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
|
||||
|
||||
/**
|
||||
* Test that no split brain occurs under partial network partition. See https://github.com/elasticsearch/elasticsearch/issues/2488
|
||||
* Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488
|
||||
*/
|
||||
public void testFailWithMinimumMasterNodesConfigured() throws Exception {
|
||||
List<String> nodes = startCluster(3);
|
||||
|
|
|
@ -285,7 +285,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
logger.info("--> one node is closed - start indexing data into the second one");
|
||||
client.prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("field", "value3").endObject()).execute().actionGet();
|
||||
// TODO: remove once refresh doesn't fail immediately if there a master block:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/9997
|
||||
// https://github.com/elastic/elasticsearch/issues/9997
|
||||
client.admin().cluster().prepareHealth("test").setWaitForYellowStatus().get();
|
||||
client.admin().indices().prepareRefresh().execute().actionGet();
|
||||
|
||||
|
|
|
@ -366,7 +366,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// related to https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// related to https://github.com/elastic/elasticsearch/issues/5864
|
||||
public void testMistypedTypeInRoot() throws IOException {
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/mistyped_type_in_root.json");
|
||||
try {
|
||||
|
@ -378,7 +378,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// issue https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// issue https://github.com/elastic/elasticsearch/issues/5864
|
||||
public void testMisplacedMappingAsRoot() throws IOException {
|
||||
String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/all/misplaced_mapping_key_in_root.json");
|
||||
try {
|
||||
|
@ -390,7 +390,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// issue https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// issue https://github.com/elastic/elasticsearch/issues/5864
|
||||
// test that RootObjectMapping still works
|
||||
public void testRootObjectMapperPropertiesDoNotCauseException() throws IOException {
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
|
@ -404,7 +404,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
parser.parse("test", new CompressedXContent(mapping));
|
||||
}
|
||||
|
||||
// issue https://github.com/elasticsearch/elasticsearch/issues/5864
|
||||
// issue https://github.com/elastic/elasticsearch/issues/5864
|
||||
public void testMetadataMappersStillWorking() throws MapperParsingException, IOException {
|
||||
String mapping = "{";
|
||||
Map<String, String> rootTypes = new HashMap<>();
|
||||
|
|
|
@ -19,13 +19,18 @@
|
|||
|
||||
package org.elasticsearch.index.mapper.boost;
|
||||
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.DocumentFieldMappers;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
@ -33,6 +38,7 @@ import org.elasticsearch.test.InternalSettingsPlugin;
|
|||
import java.util.Collection;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class CustomBoostMappingTests extends ESSingleNodeTestCase {
|
||||
|
||||
|
@ -77,4 +83,87 @@ public class CustomBoostMappingTests extends ESSingleNodeTestCase {
|
|||
assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f));
|
||||
assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f));
|
||||
}
|
||||
|
||||
public void testBackCompatFieldMappingBoostValues() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
|
||||
.startObject("s_field").field("type", "keyword").field("boost", 2.0f).endObject()
|
||||
.startObject("l_field").field("type", "long").field("boost", 3.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("i_field").field("type", "integer").field("boost", 4.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("sh_field").field("type", "short").field("boost", 5.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("b_field").field("type", "byte").field("boost", 6.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("d_field").field("type", "double").field("boost", 7.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("f_field").field("type", "float").field("boost", 8.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("date_field").field("type", "date").field("boost", 9.0f).startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.endObject().endObject().endObject().string();
|
||||
|
||||
{
|
||||
IndexService indexService = createIndex("test", BW_SETTINGS);
|
||||
QueryShardContext context = indexService.newQueryShardContext();
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
|
||||
DocumentFieldMappers fieldMappers = mapper.mappers();
|
||||
assertThat(fieldMappers.getMapper("s_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("l_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("i_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("sh_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("b_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("d_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("f_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
assertThat(fieldMappers.getMapper("date_field").fieldType().termQuery("0", context), instanceOf(TermQuery.class));
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("s_field", "s_value")
|
||||
.field("l_field", 1L)
|
||||
.field("i_field", 1)
|
||||
.field("sh_field", 1)
|
||||
.field("b_field", 1)
|
||||
.field("d_field", 1)
|
||||
.field("f_field", 1)
|
||||
.field("date_field", "20100101")
|
||||
.endObject().bytes());
|
||||
|
||||
assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(2.0f));
|
||||
assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(3.0f));
|
||||
assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(4.0f));
|
||||
assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(5.0f));
|
||||
assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(6.0f));
|
||||
assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(7.0f));
|
||||
assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(8.0f));
|
||||
assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(9.0f));
|
||||
}
|
||||
|
||||
{
|
||||
IndexService indexService = createIndex("text");
|
||||
QueryShardContext context = indexService.newQueryShardContext();
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapperParser().parse("type", new CompressedXContent(mapping));
|
||||
DocumentFieldMappers fieldMappers = mapper.mappers();
|
||||
assertThat(fieldMappers.getMapper("s_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("l_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("i_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("sh_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("b_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("d_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("f_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
assertThat(fieldMappers.getMapper("date_field").fieldType().termQuery("0", context), instanceOf(BoostQuery.class));
|
||||
|
||||
ParsedDocument doc = mapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("s_field", "s_value")
|
||||
.field("l_field", 1L)
|
||||
.field("i_field", 1)
|
||||
.field("sh_field", 1)
|
||||
.field("b_field", 1)
|
||||
.field("d_field", 1)
|
||||
.field("f_field", 1)
|
||||
.field("date_field", "20100101")
|
||||
.endObject().bytes());
|
||||
|
||||
assertThat(doc.rootDoc().getField("s_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("l_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("i_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("sh_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("b_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("d_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("f_field").boost(), equalTo(1f));
|
||||
assertThat(doc.rootDoc().getField("date_field").boost(), equalTo(1f));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.test.InternalSettingsPlugin;
|
|||
import java.util.Collection;
|
||||
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -98,6 +99,97 @@ public class FieldLevelBoostTests extends ESSingleNodeTestCase {
|
|||
assertThat((double) f.boost(), closeTo(9.0, 0.001));
|
||||
}
|
||||
|
||||
public void testBackCompatFieldLevelMappingBoost() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
|
||||
.startObject("str_field").field("type", "keyword").field("boost", "2.0").endObject()
|
||||
.startObject("int_field").field("type", "integer").field("boost", "3.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("byte_field").field("type", "byte").field("boost", "4.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("date_field").field("type", "date").field("boost", "5.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("double_field").field("type", "double").field("boost", "6.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("float_field").field("type", "float").field("boost", "7.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("long_field").field("type", "long").field("boost", "8.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.startObject("short_field").field("type", "short").field("boost", "9.0").startObject("norms").field("enabled", true).endObject().endObject()
|
||||
.string();
|
||||
|
||||
{
|
||||
DocumentMapper docMapper = createIndex("test", BW_SETTINGS).mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
|
||||
BytesReference json = XContentFactory.jsonBuilder().startObject()
|
||||
.field("str_field", "some name")
|
||||
.field("int_field", 10)
|
||||
.field("byte_field", 20)
|
||||
.field("date_field", "2012-01-10")
|
||||
.field("double_field", 30.0)
|
||||
.field("float_field", 40.0)
|
||||
.field("long_field", 50)
|
||||
.field("short_field", 60)
|
||||
.bytes();
|
||||
Document doc = docMapper.parse("test", "person", "1", json).rootDoc();
|
||||
|
||||
IndexableField f = doc.getField("str_field");
|
||||
assertThat((double) f.boost(), closeTo(2.0, 0.001));
|
||||
|
||||
f = doc.getField("int_field");
|
||||
assertThat((double) f.boost(), closeTo(3.0, 0.001));
|
||||
|
||||
f = doc.getField("byte_field");
|
||||
assertThat((double) f.boost(), closeTo(4.0, 0.001));
|
||||
|
||||
f = doc.getField("date_field");
|
||||
assertThat((double) f.boost(), closeTo(5.0, 0.001));
|
||||
|
||||
f = doc.getField("double_field");
|
||||
assertThat((double) f.boost(), closeTo(6.0, 0.001));
|
||||
|
||||
f = doc.getField("float_field");
|
||||
assertThat((double) f.boost(), closeTo(7.0, 0.001));
|
||||
|
||||
f = doc.getField("long_field");
|
||||
assertThat((double) f.boost(), closeTo(8.0, 0.001));
|
||||
|
||||
f = doc.getField("short_field");
|
||||
assertThat((double) f.boost(), closeTo(9.0, 0.001));
|
||||
}
|
||||
|
||||
{
|
||||
DocumentMapper docMapper = createIndex("test2").mapperService().documentMapperParser().parse("person", new CompressedXContent(mapping));
|
||||
BytesReference json = XContentFactory.jsonBuilder().startObject()
|
||||
.field("str_field", "some name")
|
||||
.field("int_field", 10)
|
||||
.field("byte_field", 20)
|
||||
.field("date_field", "2012-01-10")
|
||||
.field("double_field", 30.0)
|
||||
.field("float_field", 40.0)
|
||||
.field("long_field", 50)
|
||||
.field("short_field", 60)
|
||||
.bytes();
|
||||
Document doc = docMapper.parse("test", "person", "1", json).rootDoc();
|
||||
|
||||
IndexableField f = doc.getField("str_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("int_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("byte_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("date_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("double_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("float_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("long_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
|
||||
f = doc.getField("short_field");
|
||||
assertThat(f.boost(), equalTo(1f));
|
||||
}
|
||||
}
|
||||
|
||||
public void testBackCompatInvalidFieldLevelBoost() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("person").startObject("properties")
|
||||
.startObject("str_field").field("type", "string").endObject()
|
||||
|
|
|
@ -185,7 +185,7 @@ public class BoolQueryBuilderTests extends AbstractQueryTestCase<BoolQueryBuilde
|
|||
}
|
||||
}
|
||||
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/7240
|
||||
// https://github.com/elastic/elasticsearch/issues/7240
|
||||
public void testEmptyBooleanQuery() throws Exception {
|
||||
XContentBuilder contentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
|
||||
BytesReference query = contentBuilder.startObject().startObject("bool").endObject().endObject().bytes();
|
||||
|
|
|
@ -73,6 +73,8 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
|
|||
DATE_FIELD_NAME, "type=date",
|
||||
OBJECT_FIELD_NAME, "type=object"
|
||||
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type"
|
||||
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -132,20 +134,26 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
|
|||
}
|
||||
}
|
||||
|
||||
public void testIllegalValues() {
|
||||
public void testIllegalValues() throws IOException {
|
||||
QueryBuilder query = RandomQueryBuilder.createQuery(random());
|
||||
try {
|
||||
new HasParentQueryBuilder(null, query);
|
||||
fail("must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
|
||||
}
|
||||
|
||||
try {
|
||||
new HasParentQueryBuilder("foo", null);
|
||||
fail("must not be null");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
}
|
||||
|
||||
QueryShardContext context = createShardContext();
|
||||
HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder("just_a_type", new MatchAllQueryBuilder());
|
||||
try {
|
||||
queryBuilder.doToQuery(context);
|
||||
} catch (QueryShardException e) {
|
||||
assertThat(e.getMessage(), equalTo("[has_parent] no child types found for type [just_a_type]"));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -196,22 +196,19 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
assertTrue(numericRangeQuery.includesMax());
|
||||
|
||||
double value;
|
||||
double width = 0;
|
||||
try {
|
||||
double width;
|
||||
if (queryBuilder.fieldName().equals(DATE_FIELD_NAME) == false) {
|
||||
value = Double.parseDouble(queryBuilder.value().toString());
|
||||
} catch (NumberFormatException e) {
|
||||
// Maybe its a date
|
||||
value = ISODateTimeFormat.dateTimeParser().parseMillis(queryBuilder.value().toString());
|
||||
width = queryBuilder.fuzziness().asTimeValue().getMillis();
|
||||
}
|
||||
|
||||
if (width == 0) {
|
||||
if (queryBuilder.fuzziness().equals(Fuzziness.AUTO)) {
|
||||
width = 1;
|
||||
} else {
|
||||
width = queryBuilder.fuzziness().asDouble();
|
||||
}
|
||||
} else {
|
||||
value = ISODateTimeFormat.dateTimeParser().parseMillis(queryBuilder.value().toString());
|
||||
width = queryBuilder.fuzziness().asTimeValue().getMillis();
|
||||
}
|
||||
|
||||
assertEquals(value - width, numericRangeQuery.getMin().doubleValue(), width * .1);
|
||||
assertEquals(value + width, numericRangeQuery.getMax().doubleValue(), width * .1);
|
||||
}
|
||||
|
|
|
@ -109,6 +109,8 @@ public class FileInfoTests extends ESTestCase {
|
|||
builder.field(Fields.NAME, name);
|
||||
builder.field(Fields.PHYSICAL_NAME, physicalName);
|
||||
builder.field(Fields.LENGTH, length);
|
||||
builder.field(Fields.WRITTEN_BY, Version.LATEST.toString());
|
||||
builder.field(Fields.CHECKSUM, "666");
|
||||
builder.endObject();
|
||||
byte[] xContent = builder.bytes().toBytes();
|
||||
|
||||
|
@ -122,9 +124,9 @@ public class FileInfoTests extends ESTestCase {
|
|||
assertThat(name, equalTo(parsedInfo.name()));
|
||||
assertThat(physicalName, equalTo(parsedInfo.physicalName()));
|
||||
assertThat(length, equalTo(parsedInfo.length()));
|
||||
assertNull(parsedInfo.checksum());
|
||||
assertNull(parsedInfo.metadata().checksum());
|
||||
assertNull(parsedInfo.metadata().writtenBy());
|
||||
assertEquals("666", parsedInfo.checksum());
|
||||
assertEquals("666", parsedInfo.metadata().checksum());
|
||||
assertEquals(Version.LATEST, parsedInfo.metadata().writtenBy());
|
||||
} else {
|
||||
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xContent)) {
|
||||
parser.nextToken();
|
||||
|
@ -139,14 +141,14 @@ public class FileInfoTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testGetPartSize() {
|
||||
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36), new ByteSizeValue(6));
|
||||
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666"), new ByteSizeValue(6));
|
||||
int numBytes = 0;
|
||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||
numBytes += info.partBytes(i);
|
||||
}
|
||||
assertEquals(numBytes, 36);
|
||||
|
||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35), new ByteSizeValue(6));
|
||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666"), new ByteSizeValue(6));
|
||||
numBytes = 0;
|
||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||
numBytes += info.partBytes(i);
|
||||
|
@ -154,7 +156,7 @@ public class FileInfoTests extends ESTestCase {
|
|||
assertEquals(numBytes, 35);
|
||||
final int numIters = randomIntBetween(10, 100);
|
||||
for (int j = 0; j < numIters; j++) {
|
||||
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000));
|
||||
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666");
|
||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000)));
|
||||
numBytes = 0;
|
||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||
|
|
|
@ -69,7 +69,7 @@ public class ExceptionRetryIT extends ESIntegTestCase {
|
|||
/**
|
||||
* Tests retry mechanism when indexing. If an exception occurs when indexing then the indexing request is tried again before finally failing.
|
||||
* If auto generated ids are used this must not lead to duplicate ids
|
||||
* see https://github.com/elasticsearch/elasticsearch/issues/8788
|
||||
* see https://github.com/elastic/elasticsearch/issues/8788
|
||||
*/
|
||||
public void testRetryDueToExceptionOnNetworkLayer() throws ExecutionException, InterruptedException, IOException {
|
||||
final AtomicBoolean exceptionThrown = new AtomicBoolean(false);
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.FilterDirectory;
|
||||
import org.apache.lucene.store.RateLimitedFSDirectory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.SleepingLockWrapper;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
public class FsDirectoryServiceTests extends ESTestCase {
|
||||
|
||||
public void testHasSleepWrapperOnSharedFS() throws IOException {
|
||||
Settings build = randomBoolean() ?
|
||||
Settings.builder().put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true).build() :
|
||||
Settings.builder().put(IndexMetaData.SETTING_SHADOW_REPLICAS, true).build();;
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
|
||||
IndexStoreConfig config = new IndexStoreConfig(build);
|
||||
IndexStore store = new IndexStore(settings, config);
|
||||
Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Files.createDirectories(tempDir);
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0));
|
||||
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
|
||||
Directory directory = fsDirectoryService.newDirectory();
|
||||
assertTrue(directory instanceof RateLimitedFSDirectory);
|
||||
RateLimitedFSDirectory rateLimitingDirectory = (RateLimitedFSDirectory) directory;
|
||||
Directory delegate = rateLimitingDirectory.getDelegate();
|
||||
assertTrue(delegate.getClass().toString(), delegate instanceof SleepingLockWrapper);
|
||||
}
|
||||
|
||||
public void testHasNoSleepWrapperOnNormalFS() throws IOException {
|
||||
Settings build = Settings.builder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "simplefs").build();
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
|
||||
IndexStoreConfig config = new IndexStoreConfig(build);
|
||||
IndexStore store = new IndexStore(settings, config);
|
||||
Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Files.createDirectories(tempDir);
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0));
|
||||
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
|
||||
Directory directory = fsDirectoryService.newDirectory();
|
||||
assertTrue(directory instanceof RateLimitedFSDirectory);
|
||||
RateLimitedFSDirectory rateLimitingDirectory = (RateLimitedFSDirectory) directory;
|
||||
Directory delegate = rateLimitingDirectory.getDelegate();
|
||||
assertFalse(delegate instanceof SleepingLockWrapper);
|
||||
assertTrue(delegate instanceof SimpleFSDirectory);
|
||||
}
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.store;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.zip.Adler32;
|
||||
|
||||
/**
|
||||
* Simple tests for LegacyVerification (old segments)
|
||||
* @deprecated remove this test when support for lucene 4.x
|
||||
* segments is not longer needed.
|
||||
*/
|
||||
@Deprecated
|
||||
public class LegacyVerificationTests extends ESTestCase {
|
||||
|
||||
public void testAdler32() throws Exception {
|
||||
Adler32 expected = new Adler32();
|
||||
byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
|
||||
expected.update(bytes);
|
||||
String expectedString = Store.digestToString(expected.getValue());
|
||||
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
|
||||
out.writeBytes(bytes, 0, bytes.length);
|
||||
out.verify();
|
||||
out.close();
|
||||
out.verify();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testAdler32Corrupt() throws Exception {
|
||||
Adler32 expected = new Adler32();
|
||||
byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
|
||||
expected.update(bytes);
|
||||
String expectedString = Store.digestToString(expected.getValue());
|
||||
|
||||
byte corruptBytes[] = "abcdefch".getBytes(StandardCharsets.UTF_8);
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
|
||||
out.writeBytes(corruptBytes, 0, bytes.length);
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException e) {
|
||||
// expected exception
|
||||
}
|
||||
out.close();
|
||||
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException e) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLengthOnlyOneByte() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 1);
|
||||
out.writeByte((byte) 3);
|
||||
out.verify();
|
||||
out.close();
|
||||
out.verify();
|
||||
|
||||
dir.close();
|
||||
}
|
||||
|
||||
public void testLengthOnlyCorrupt() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
|
||||
IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
|
||||
VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 2);
|
||||
out.writeByte((byte) 3);
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException expected) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
out.close();
|
||||
|
||||
try {
|
||||
out.verify();
|
||||
fail();
|
||||
} catch (CorruptIndexException expected) {
|
||||
// expected exception
|
||||
}
|
||||
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -355,95 +355,6 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
@AwaitsFix(bugUrl="Fails with seed E1394B038144F6E")
|
||||
// The test currently fails because the segment infos and the index don't
|
||||
// agree on the oldest version of a segment. We should fix this test by
|
||||
// switching to a static bw index
|
||||
public void testWriteLegacyChecksums() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
||||
// set default codec - all segments need checksums
|
||||
final boolean usesOldCodec = randomBoolean();
|
||||
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(usesOldCodec ? new OldSIMockingCodec() : TestUtil.getDefaultCodec()));
|
||||
int docs = 1 + random().nextInt(100);
|
||||
|
||||
for (int i = 0; i < docs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
for (int i = 0; i < docs; i++) {
|
||||
if (random().nextBoolean()) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
writer.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
|
||||
}
|
||||
Store.MetadataSnapshot metadata;
|
||||
// check before we committed
|
||||
try {
|
||||
store.getMetadata();
|
||||
fail("no index present - expected exception");
|
||||
} catch (IndexNotFoundException ex) {
|
||||
// expected
|
||||
}
|
||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
||||
|
||||
writer.close();
|
||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
||||
Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
|
||||
for (String file : store.directory().listAll()) {
|
||||
if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
BytesRef hash = new BytesRef();
|
||||
if (file.startsWith("segments")) {
|
||||
hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
|
||||
}
|
||||
StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
|
||||
legacyMeta.put(file, storeFileMetaData);
|
||||
checksums.add(storeFileMetaData);
|
||||
}
|
||||
checksums.write(store);
|
||||
|
||||
metadata = store.getMetadata();
|
||||
Map<String, StoreFileMetaData> stringStoreFileMetaDataMap = metadata.asMap();
|
||||
assertThat(legacyMeta.size(), equalTo(stringStoreFileMetaDataMap.size()));
|
||||
if (usesOldCodec) {
|
||||
for (StoreFileMetaData meta : legacyMeta.values()) {
|
||||
assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
|
||||
assertEquals(meta.name() + "checksum", meta.checksum());
|
||||
assertTrue(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
|
||||
}
|
||||
} else {
|
||||
|
||||
// even if we have a legacy checksum - if we use a new codec we should reuse
|
||||
for (StoreFileMetaData meta : legacyMeta.values()) {
|
||||
assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
|
||||
assertFalse(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
|
||||
StoreFileMetaData storeFileMetaData = metadata.get(meta.name());
|
||||
try (IndexInput input = store.openVerifyingInput(meta.name(), IOContext.DEFAULT, storeFileMetaData)) {
|
||||
assertTrue(storeFileMetaData.toString(), input instanceof Store.VerifyingIndexInput);
|
||||
input.seek(meta.length());
|
||||
Store.verify(input);
|
||||
}
|
||||
}
|
||||
}
|
||||
assertDeleteContent(store, directoryService);
|
||||
IOUtils.close(store);
|
||||
|
||||
}
|
||||
|
||||
public void testNewChecksums() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||
|
@ -489,7 +400,6 @@ public class StoreTests extends ESTestCase {
|
|||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
|
||||
assertThat(meta.hash().length, greaterThan(0));
|
||||
|
@ -503,97 +413,6 @@ public class StoreTests extends ESTestCase {
|
|||
IOUtils.close(store);
|
||||
}
|
||||
|
||||
public void testMixedChecksums() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
||||
// this time random codec....
|
||||
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
|
||||
int docs = 1 + random().nextInt(100);
|
||||
|
||||
for (int i = 0; i < docs; i++) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
|
||||
writer.addDocument(doc);
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
for (int i = 0; i < docs; i++) {
|
||||
if (random().nextBoolean()) {
|
||||
Document doc = new Document();
|
||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
||||
writer.updateDocument(new Term("id", "" + i), doc);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (random().nextBoolean()) {
|
||||
DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
|
||||
}
|
||||
Store.MetadataSnapshot metadata;
|
||||
// check before we committed
|
||||
try {
|
||||
store.getMetadata();
|
||||
fail("no index present - expected exception");
|
||||
} catch (IndexNotFoundException ex) {
|
||||
// expected
|
||||
}
|
||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
||||
writer.commit();
|
||||
writer.close();
|
||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
||||
metadata = store.getMetadata();
|
||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
||||
for (StoreFileMetaData meta : metadata) {
|
||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
if (meta.checksum() == null) {
|
||||
String checksum = null;
|
||||
try {
|
||||
CodecUtil.retrieveChecksum(input);
|
||||
fail("expected a corrupt index - posting format has not checksums");
|
||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||
try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
|
||||
checksumIndexInput.seek(meta.length());
|
||||
checksum = Store.digestToString(checksumIndexInput.getChecksum());
|
||||
}
|
||||
// fine - it's a postings format without checksums
|
||||
checksums.add(new StoreFileMetaData(meta.name(), meta.length(), checksum, null));
|
||||
}
|
||||
} else {
|
||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||
}
|
||||
}
|
||||
}
|
||||
assertConsistent(store, metadata);
|
||||
checksums.write(store);
|
||||
metadata = store.getMetadata();
|
||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
||||
for (StoreFileMetaData meta : metadata) {
|
||||
assertThat("file: " + meta.name() + " has a null checksum", meta.checksum(), not(nullValue()));
|
||||
if (meta.hasLegacyChecksum()) {
|
||||
try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
|
||||
checksumIndexInput.seek(meta.length());
|
||||
assertThat(meta.checksum(), equalTo(Store.digestToString(checksumIndexInput.getChecksum())));
|
||||
}
|
||||
} else {
|
||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||
}
|
||||
}
|
||||
}
|
||||
assertConsistent(store, metadata);
|
||||
TestUtil.checkIndex(store.directory());
|
||||
assertDeleteContent(store, directoryService);
|
||||
IOUtils.close(store);
|
||||
}
|
||||
|
||||
public void testRenameFile() throws IOException {
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
|
||||
|
@ -654,18 +473,7 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final Adler32 adler32 = new Adler32();
|
||||
long legacyFileLength = 0;
|
||||
try (IndexOutput output = dir.createOutput("legacy.bin", IOContext.DEFAULT)) {
|
||||
int iters = scaledRandomIntBetween(10, 100);
|
||||
for (int i = 0; i < iters; i++) {
|
||||
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
|
||||
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
adler32.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
||||
legacyFileLength += bytesRef.length;
|
||||
}
|
||||
}
|
||||
final long luceneChecksum;
|
||||
final long adler32LegacyChecksum = adler32.getValue();
|
||||
try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
|
||||
assertEquals(luceneFileLength, indexInput.length());
|
||||
luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
|
||||
|
@ -673,38 +481,22 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
{ // positive check
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertTrue(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertTrue(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
|
||||
{ // negative check - wrong checksum
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum + 1));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
|
||||
{ // negative check - wrong length
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength + 1, Store.digestToString(adler32LegacyChecksum));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
|
||||
{ // negative check - wrong file
|
||||
StoreFileMetaData lucene = new StoreFileMetaData("legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||
StoreFileMetaData legacy = new StoreFileMetaData("lucene_checksum.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
|
||||
assertTrue(legacy.hasLegacyChecksum());
|
||||
assertFalse(lucene.hasLegacyChecksum());
|
||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
||||
}
|
||||
dir.close();
|
||||
|
||||
|
@ -827,7 +619,7 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
|
||||
for (String file : store.directory().listAll()) {
|
||||
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && !Store.isChecksum(file) && file.startsWith("extra") == false) {
|
||||
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
|
||||
assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
||||
} else {
|
||||
assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
||||
|
@ -835,21 +627,6 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Legacy indices without lucene CRC32 did never write or calculate checksums for segments_N files
|
||||
* but for other files
|
||||
*/
|
||||
public void testRecoveryDiffWithLegacyCommit() {
|
||||
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
|
||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
|
||||
Store.MetadataSnapshot first = new Store.MetadataSnapshot(unmodifiableMap(new HashMap<>(metaDataMap)), emptyMap(), 0);
|
||||
|
||||
Store.MetadataSnapshot second = new Store.MetadataSnapshot(unmodifiableMap(new HashMap<>(metaDataMap)), emptyMap(), 0);
|
||||
Store.RecoveryDiff recoveryDiff = first.recoveryDiff(second);
|
||||
assertEquals(recoveryDiff.toString(), recoveryDiff.different.size(), 2);
|
||||
}
|
||||
|
||||
public void testRecoveryDiff() throws IOException, InterruptedException {
|
||||
int numDocs = 2 + random().nextInt(100);
|
||||
List<Document> docs = new ArrayList<>();
|
||||
|
@ -1043,21 +820,6 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
Store.MetadataSnapshot secondMeta = store.getMetadata();
|
||||
|
||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
||||
Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
|
||||
for (String file : store.directory().listAll()) {
|
||||
if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
BytesRef hash = new BytesRef();
|
||||
if (file.startsWith("segments")) {
|
||||
hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
|
||||
}
|
||||
StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
|
||||
legacyMeta.put(file, storeFileMetaData);
|
||||
checksums.add(storeFileMetaData);
|
||||
}
|
||||
checksums.write(store); // write one checksum file here - we expect it to survive all the cleanups
|
||||
|
||||
if (randomBoolean()) {
|
||||
store.cleanupAndVerify("test", firstMeta);
|
||||
|
@ -1068,16 +830,13 @@ public class StoreTests extends ESTestCase {
|
|||
if (file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(firstMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
|
||||
if (Store.isChecksum(file)) {
|
||||
numChecksums++;
|
||||
} else if (secondMeta.contains(file) == false) {
|
||||
assertTrue(firstMeta.contains(file) || file.equals("write.lock"));
|
||||
if (secondMeta.contains(file) == false) {
|
||||
numNotFound++;
|
||||
}
|
||||
|
||||
}
|
||||
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
||||
assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
|
||||
} else {
|
||||
store.cleanupAndVerify("test", secondMeta);
|
||||
String[] strings = store.directory().listAll();
|
||||
|
@ -1087,16 +846,13 @@ public class StoreTests extends ESTestCase {
|
|||
if (file.startsWith("extra")) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(file, secondMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
|
||||
if (Store.isChecksum(file)) {
|
||||
numChecksums++;
|
||||
} else if (firstMeta.contains(file) == false) {
|
||||
assertTrue(file, secondMeta.contains(file) || file.equals("write.lock"));
|
||||
if (firstMeta.contains(file) == false) {
|
||||
numNotFound++;
|
||||
}
|
||||
|
||||
}
|
||||
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
||||
assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
|
||||
}
|
||||
|
||||
deleteContent(store.directory());
|
||||
|
@ -1105,8 +861,8 @@ public class StoreTests extends ESTestCase {
|
|||
|
||||
public void testCleanUpWithLegacyChecksums() throws IOException {
|
||||
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
|
||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
|
||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, "foobar", Version.LUCENE_4_8_0, new BytesRef(new byte[]{1})));
|
||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", Version.LUCENE_4_8_0, new BytesRef()));
|
||||
Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(unmodifiableMap(metaDataMap), emptyMap(), 0);
|
||||
|
||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||
|
@ -1232,8 +988,8 @@ public class StoreTests extends ESTestCase {
|
|||
}
|
||||
|
||||
protected Store.MetadataSnapshot createMetaDataSnapshot() {
|
||||
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1);
|
||||
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1);
|
||||
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666");
|
||||
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666");
|
||||
Map<String, StoreFileMetaData> storeFileMetaDataMap = new HashMap<>();
|
||||
storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
|
||||
storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
|
||||
|
|
|
@ -117,7 +117,7 @@ public class PreBuiltAnalyzerIntegrationIT extends ESIntegTestCase {
|
|||
|
||||
/**
|
||||
* Test case for #5030: Upgrading analysis plugins fails
|
||||
* See https://github.com/elasticsearch/elasticsearch/issues/5030
|
||||
* See https://github.com/elastic/elasticsearch/issues/5030
|
||||
*/
|
||||
public void testThatPluginAnalyzersCanBeUpdated() throws Exception {
|
||||
final XContentBuilder mapping = jsonBuilder().startObject()
|
||||
|
|
|
@ -169,7 +169,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
Second regression test for https://github.com/elasticsearch/elasticsearch/issues/3381
|
||||
Second regression test for https://github.com/elastic/elasticsearch/issues/3381
|
||||
*/
|
||||
public void testUpdateMappingNoChanges() throws Exception {
|
||||
client().admin().indices().prepareCreate("test")
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexOutput;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
|
@ -25,6 +27,7 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
|
@ -35,7 +38,7 @@ import java.util.regex.Pattern;
|
|||
/**
|
||||
*/
|
||||
public class RecoveryStatusTests extends ESSingleNodeTestCase {
|
||||
|
||||
|
||||
public void testRenameTempFiles() throws IOException {
|
||||
IndexService service = createIndex("foo");
|
||||
|
||||
|
@ -50,14 +53,16 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase {
|
|||
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
|
||||
}
|
||||
});
|
||||
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store())) {
|
||||
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store())) {
|
||||
indexOutput.writeInt(1);
|
||||
IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar");
|
||||
assertSame(openIndexOutput, indexOutput);
|
||||
openIndexOutput.writeInt(1);
|
||||
CodecUtil.writeFooter(indexOutput);
|
||||
}
|
||||
|
||||
try {
|
||||
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store());
|
||||
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store());
|
||||
fail("file foo.bar is already opened and registered");
|
||||
} catch (IllegalStateException ex) {
|
||||
assertEquals("output for file [foo.bar] has already been created", ex.getMessage());
|
||||
|
|
|
@ -19,9 +19,13 @@
|
|||
|
||||
package org.elasticsearch.node.internal;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
import org.elasticsearch.common.cli.MockTerminal;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.cli.CliToolTestCase;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -29,17 +33,9 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class InternalSettingsPreparerTests extends ESTestCase {
|
||||
|
||||
|
@ -81,17 +77,9 @@ public class InternalSettingsPreparerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testReplacePromptPlaceholders() {
|
||||
final Terminal terminal = new CliToolTestCase.MockTerminal() {
|
||||
@Override
|
||||
public char[] readSecret(String message) {
|
||||
return "replaced".toCharArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String readText(String message) {
|
||||
return "text";
|
||||
}
|
||||
};
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
terminal.addTextInput("text");
|
||||
terminal.addSecretInput("replaced");
|
||||
|
||||
Settings.Builder builder = settingsBuilder()
|
||||
.put(baseEnvSettings)
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.common.cli.CliToolTestCase;
|
||||
import org.elasticsearch.common.cli.MockTerminal;
|
||||
|
||||
import static org.elasticsearch.common.cli.CliTool.ExitStatus.OK_AND_EXIT;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -28,22 +29,22 @@ import static org.hamcrest.Matchers.is;
|
|||
|
||||
public class PluginCliTests extends CliToolTestCase {
|
||||
public void testHelpWorks() throws Exception {
|
||||
CliToolTestCase.CaptureOutputTerminal terminal = new CliToolTestCase.CaptureOutputTerminal();
|
||||
MockTerminal terminal = new MockTerminal();
|
||||
assertThat(new PluginCli(terminal).execute(args("--help")), is(OK_AND_EXIT));
|
||||
assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin.help");
|
||||
|
||||
terminal.getTerminalOutput().clear();
|
||||
terminal.resetOutput();
|
||||
assertThat(new PluginCli(terminal).execute(args("install -h")), is(OK_AND_EXIT));
|
||||
assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-install.help");
|
||||
for (String plugin : InstallPluginCommand.OFFICIAL_PLUGINS) {
|
||||
assertThat(terminal.getTerminalOutput(), hasItem(containsString(plugin)));
|
||||
assertThat(terminal.getOutput(), containsString(plugin));
|
||||
}
|
||||
|
||||
terminal.getTerminalOutput().clear();
|
||||
terminal.resetOutput();
|
||||
assertThat(new PluginCli(terminal).execute(args("remove --help")), is(OK_AND_EXIT));
|
||||
assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-remove.help");
|
||||
|
||||
terminal.getTerminalOutput().clear();
|
||||
terminal.resetOutput();
|
||||
assertThat(new PluginCli(terminal).execute(args("list -h")), is(OK_AND_EXIT));
|
||||
assertTerminalOutputContainsHelpFile(terminal, "/org/elasticsearch/plugins/plugin-list.help");
|
||||
}
|
||||
|
|
|
@ -249,7 +249,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
See https://github.com/elasticsearch/elasticsearch/issues/2682
|
||||
See https://github.com/elastic/elasticsearch/issues/2682
|
||||
Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
|
||||
to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShards.
|
||||
That affected the number of shards that we executed the search on, thus some documents were missing in the search results.
|
||||
|
@ -273,7 +273,7 @@ public class AliasRoutingIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/*
|
||||
See https://github.com/elasticsearch/elasticsearch/pull/3268
|
||||
See https://github.com/elastic/elasticsearch/pull/3268
|
||||
Searching on more than one index, if one of those is an alias with configured routing, the shards that belonged
|
||||
to the other indices (without routing) were not taken into account in PlainOperationRouting#searchShardsCount.
|
||||
That could cause returning 1, which led to forcing the QUERY_AND_FETCH mode.
|
||||
|
|
|
@ -106,7 +106,7 @@ public class FilterIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
// See NullPointer issue when filters are empty:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/8438
|
||||
// https://github.com/elastic/elasticsearch/issues/8438
|
||||
public void testEmptyFilterDeclarations() throws Exception {
|
||||
QueryBuilder emptyFilter = new BoolQueryBuilder();
|
||||
SearchResponse response = client().prepareSearch("idx").addAggregation(filter("tag1", emptyFilter)).execute().actionGet();
|
||||
|
|
|
@ -132,7 +132,7 @@ public class FiltersIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
// See NullPointer issue when filters are empty:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/8438
|
||||
// https://github.com/elastic/elasticsearch/issues/8438
|
||||
public void testEmptyFilterDeclarations() throws Exception {
|
||||
QueryBuilder<?> emptyFilter = new BoolQueryBuilder();
|
||||
SearchResponse response = client().prepareSearch("idx")
|
||||
|
|
|
@ -360,7 +360,7 @@ public class NestedIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// Test based on: https://github.com/elasticsearch/elasticsearch/issues/9280
|
||||
// Test based on: https://github.com/elastic/elasticsearch/issues/9280
|
||||
public void testParentFilterResolvedCorrectly() throws Exception {
|
||||
XContentBuilder mapping = jsonBuilder().startObject().startObject("provider").startObject("properties")
|
||||
.startObject("comments")
|
||||
|
|
|
@ -309,7 +309,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
indexRandom(true, false, indexRequestBuilderList);
|
||||
|
||||
// Now create some holes in the index with selective deletes caused by updates.
|
||||
// This is the scenario that caused this issue https://github.com/elasticsearch/elasticsearch/issues/7951
|
||||
// This is the scenario that caused this issue https://github.com/elastic/elasticsearch/issues/7951
|
||||
// Scoring algorithms throw exceptions if term docFreqs exceed the reported size of the index
|
||||
// from which they are taken so need to make sure this doesn't happen.
|
||||
String[] text = cat1v1;
|
||||
|
|
|
@ -49,7 +49,7 @@ public class TermsShardMinDocCountIT extends ESIntegTestCase {
|
|||
return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString();
|
||||
}
|
||||
|
||||
// see https://github.com/elasticsearch/elasticsearch/issues/5998
|
||||
// see https://github.com/elastic/elasticsearch/issues/5998
|
||||
public void testShardMinDocCountSignificantTermsTest() throws Exception {
|
||||
String termtype = "text";
|
||||
if (randomBoolean()) {
|
||||
|
@ -107,7 +107,7 @@ public class TermsShardMinDocCountIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// see https://github.com/elasticsearch/elasticsearch/issues/5998
|
||||
// see https://github.com/elastic/elasticsearch/issues/5998
|
||||
public void testShardMinDocCountTermsTest() throws Exception {
|
||||
final String [] termTypes = {"text", "long", "integer", "float", "double"};
|
||||
String termtype = termTypes[randomInt(termTypes.length - 1)];
|
||||
|
|
|
@ -758,11 +758,11 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
|
|||
assertNoFailures(response);
|
||||
assertThat(response.getHits().totalHits(), equalTo(0L));
|
||||
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value"))).get();
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("parent", matchQuery("text", "value"))).get();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getHits().totalHits(), equalTo(0L));
|
||||
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("child", matchQuery("text", "value")).score(true))
|
||||
response = client().prepareSearch("test").setQuery(QueryBuilders.hasParentQuery("parent", matchQuery("text", "value")).score(true))
|
||||
.get();
|
||||
assertNoFailures(response);
|
||||
assertThat(response.getHits().totalHits(), equalTo(0L));
|
||||
|
@ -1894,11 +1894,6 @@ public class ChildQuerySearchIT extends ESIntegTestCase {
|
|||
fail();
|
||||
} catch (SearchPhaseExecutionException e) {
|
||||
}
|
||||
|
||||
SearchResponse response = client().prepareSearch("test")
|
||||
.setQuery(QueryBuilders.hasParentQuery("parent", matchAllQuery()))
|
||||
.get();
|
||||
assertHitCount(response, 0);
|
||||
}
|
||||
|
||||
static HasChildQueryBuilder hasChildQuery(String type, QueryBuilder queryBuilder) {
|
||||
|
|
|
@ -405,7 +405,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
// forces QUERY_THEN_FETCH because of https://github.com/elasticsearch/elasticsearch/issues/4829
|
||||
// forces QUERY_THEN_FETCH because of https://github.com/elastic/elasticsearch/issues/4829
|
||||
public void testEquivalence() throws Exception {
|
||||
// no dummy docs since merges can change scores while we run queries.
|
||||
int numDocs = indexRandomNumbers("whitespace", -1, false);
|
||||
|
|
|
@ -2477,7 +2477,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[COUNT];
|
||||
for (int i = 0; i < COUNT; i++) {
|
||||
//generating text with word to highlight in a different position
|
||||
//(https://github.com/elasticsearch/elasticsearch/issues/4103)
|
||||
//(https://github.com/elastic/elasticsearch/issues/4103)
|
||||
String prefix = randomAsciiOfLengthBetween(5, 30);
|
||||
prefixes.put(String.valueOf(i), prefix);
|
||||
indexRequestBuilders[i] = client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field1", "Sentence " + prefix
|
||||
|
|
|
@ -315,7 +315,7 @@ public class MatchedQueriesIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Test case for issue #4361: https://github.com/elasticsearch/elasticsearch/issues/4361
|
||||
* Test case for issue #4361: https://github.com/elastic/elasticsearch/issues/4361
|
||||
*/
|
||||
public void testMatchedWithShould() throws Exception {
|
||||
createIndex("test");
|
||||
|
|
|
@ -156,7 +156,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
assertHitCount(client().prepareSearch().setQuery(queryStringQuery("")).get(), 0L); // return no docs
|
||||
}
|
||||
|
||||
// see https://github.com/elasticsearch/elasticsearch/issues/3177
|
||||
// see https://github.com/elastic/elasticsearch/issues/3177
|
||||
public void testIssue3177() {
|
||||
createIndex("test");
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
|
||||
|
|
|
@ -86,7 +86,7 @@ public class FieldSortIT extends ESIntegTestCase {
|
|||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9421")
|
||||
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/9421")
|
||||
public void testIssue8226() {
|
||||
int numIndices = between(5, 10);
|
||||
final boolean useMapping = randomBoolean();
|
||||
|
@ -1382,7 +1382,7 @@ public class FieldSortIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Test case for issue 6150: https://github.com/elasticsearch/elasticsearch/issues/6150
|
||||
* Test case for issue 6150: https://github.com/elastic/elasticsearch/issues/6150
|
||||
*/
|
||||
public void testNestedSort() throws IOException, InterruptedException, ExecutionException {
|
||||
assertAcked(prepareCreate("test")
|
||||
|
|
|
@ -341,7 +341,7 @@ public class GeoDistanceIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
// Regression bug:
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/2851
|
||||
// https://github.com/elastic/elasticsearch/issues/2851
|
||||
public void testDistanceSortingWithMissingGeoPoint() throws Exception {
|
||||
Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT);
|
||||
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
|
||||
|
|
|
@ -76,7 +76,7 @@ public class SourceFetchingIT extends ESIntegTestCase {
|
|||
|
||||
/**
|
||||
* Test Case for #5132: Source filtering with wildcards broken when given multiple patterns
|
||||
* https://github.com/elasticsearch/elasticsearch/issues/5132
|
||||
* https://github.com/elastic/elasticsearch/issues/5132
|
||||
*/
|
||||
public void testSourceWithWildcardFiltering() {
|
||||
createIndex("test");
|
||||
|
|
|
@ -27,7 +27,7 @@ import java.util.HashMap;
|
|||
import java.util.Map;
|
||||
|
||||
public class SearchStatsUnitTests extends ESTestCase {
|
||||
// https://github.com/elasticsearch/elasticsearch/issues/7644
|
||||
// https://github.com/elastic/elasticsearch/issues/7644
|
||||
public void testShardLevelSearchGroupStats() throws Exception {
|
||||
// let's create two dummy search stats with groups
|
||||
Map<String, Stats> groupStats1 = new HashMap<>();
|
||||
|
@ -63,4 +63,4 @@ public class SearchStatsUnitTests extends ESTestCase {
|
|||
assertEquals(equalTo, stats.getScrollTimeInMillis());
|
||||
assertEquals(equalTo, stats.getScrollCurrent());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue