Merge branch 'master' into enhancement/switch_geodistancesortbuilder_to_geovalidationmethod
This commit is contained in:
commit
6b9ac46402
|
@ -76,7 +76,31 @@ Contributing to the Elasticsearch codebase
|
|||
|
||||
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
|
||||
|
||||
Make sure you have [Gradle](http://gradle.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE: `gradle eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors.
|
||||
Make sure you have [Gradle](http://gradle.org) installed, as
|
||||
Elasticsearch uses it as its build system.
|
||||
|
||||
Eclipse users can automatically configure their IDE: `gradle eclipse`
|
||||
then `File: Import: Existing Projects into Workspace`. Select the
|
||||
option `Search for nested projects`. Additionally you will want to
|
||||
ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini`
|
||||
accordingly to avoid GC overhead errors.
|
||||
|
||||
IntelliJ users acn automatically configure their IDE: `gradle idea`
|
||||
then `File->New Project From Existing Sources`. Point to the root of
|
||||
the source directory, select
|
||||
`Import project from external model->Gradle`, enable
|
||||
`Use auto-import`.
|
||||
|
||||
The Elasticsearch codebase makes heavy use of Java `assert`s and the
|
||||
test runner requires that assertions be enabled within the JVM. This
|
||||
can be accomplished by passing the flag `-ea` to the JVM on startup.
|
||||
|
||||
For IntelliJ, go to
|
||||
`Run->Edit Configurations...->Defaults->JUnit->VM options` and input
|
||||
`-ea`.
|
||||
|
||||
For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to
|
||||
`VM Arguments`.
|
||||
|
||||
Please follow these formatting guidelines:
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import com.bmuschko.gradle.nexus.NexusPlugin
|
|||
import org.eclipse.jgit.lib.Repository
|
||||
import org.eclipse.jgit.lib.RepositoryBuilder
|
||||
import org.gradle.plugins.ide.eclipse.model.SourceFolder
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
// common maven publishing configuration
|
||||
subprojects {
|
||||
|
@ -249,6 +250,9 @@ allprojects {
|
|||
// Name all the non-root projects after their path so that paths get grouped together when imported into eclipse.
|
||||
if (path != ':') {
|
||||
eclipse.project.name = path
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
eclipse.project.name = eclipse.project.name.replace(':', '_')
|
||||
}
|
||||
}
|
||||
|
||||
plugins.withType(JavaBasePlugin) {
|
||||
|
|
|
@ -355,6 +355,11 @@ class BuildPlugin implements Plugin<Project> {
|
|||
}
|
||||
options.encoding = 'UTF-8'
|
||||
//options.incremental = true
|
||||
|
||||
// gradle ignores target/source compatibility when it is "unnecessary", but since to compile with
|
||||
// java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-target' << '1.8' << '-source' << '1.8'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,11 +57,13 @@ class PluginPropertiesTask extends Copy {
|
|||
// configure property substitution
|
||||
from(templateFile)
|
||||
into(generatedResourcesDir)
|
||||
expand(generateSubstitutions())
|
||||
Map<String, String> properties = generateSubstitutions()
|
||||
expand(properties)
|
||||
inputs.properties(properties)
|
||||
}
|
||||
}
|
||||
|
||||
Map generateSubstitutions() {
|
||||
Map<String, String> generateSubstitutions() {
|
||||
def stringSnap = { version ->
|
||||
if (version.endsWith("-SNAPSHOT")) {
|
||||
return version.substring(0, version.length() - 9)
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.allocation;
|
|||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -32,7 +31,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -45,21 +43,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
private final ShardId shard;
|
||||
private final boolean primary;
|
||||
private final String assignedNodeId;
|
||||
private final Map<DiscoveryNode, Decision> nodeToDecision;
|
||||
private final Map<DiscoveryNode, Float> nodeWeights;
|
||||
private final UnassignedInfo unassignedInfo;
|
||||
private final long remainingDelayNanos;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId,
|
||||
UnassignedInfo unassignedInfo, Map<DiscoveryNode, Decision> nodeToDecision,
|
||||
Map<DiscoveryNode, Float> nodeWeights, long remainingDelayNanos) {
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis,
|
||||
@Nullable UnassignedInfo unassignedInfo, Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.unassignedInfo = unassignedInfo;
|
||||
this.nodeToDecision = nodeToDecision == null ? Collections.emptyMap() : nodeToDecision;
|
||||
this.nodeWeights = nodeWeights == null ? Collections.emptyMap() : nodeWeights;
|
||||
this.remainingDelayNanos = remainingDelayNanos;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
|
@ -67,27 +62,15 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
this.primary = in.readBoolean();
|
||||
this.assignedNodeId = in.readOptionalString();
|
||||
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
|
||||
this.remainingDelayMillis = in.readVLong();
|
||||
|
||||
Map<DiscoveryNode, Decision> ntd = null;
|
||||
int size = in.readVInt();
|
||||
ntd = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
DiscoveryNode dn = new DiscoveryNode(in);
|
||||
Decision decision = Decision.readFrom(in);
|
||||
ntd.put(dn, decision);
|
||||
int mapSize = in.readVInt();
|
||||
Map<DiscoveryNode, NodeExplanation> nodeToExplanation = new HashMap<>(mapSize);
|
||||
for (int i = 0; i < mapSize; i++) {
|
||||
NodeExplanation nodeExplanation = new NodeExplanation(in);
|
||||
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
|
||||
}
|
||||
this.nodeToDecision = ntd;
|
||||
|
||||
Map<DiscoveryNode, Float> ntw = null;
|
||||
size = in.readVInt();
|
||||
ntw = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
DiscoveryNode dn = new DiscoveryNode(in);
|
||||
float weight = in.readFloat();
|
||||
ntw.put(dn, weight);
|
||||
}
|
||||
this.nodeWeights = ntw;
|
||||
remainingDelayNanos = in.readVLong();
|
||||
this.nodeExplanations = nodeToExplanation;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -96,27 +79,20 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
out.writeBoolean(this.isPrimary());
|
||||
out.writeOptionalString(this.getAssignedNodeId());
|
||||
out.writeOptionalWriteable(this.getUnassignedInfo());
|
||||
out.writeVLong(remainingDelayMillis);
|
||||
|
||||
Map<DiscoveryNode, Decision> ntd = this.getNodeDecisions();
|
||||
out.writeVInt(ntd.size());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : ntd.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
Decision.writeTo(entry.getValue(), out);
|
||||
out.writeVInt(this.nodeExplanations.size());
|
||||
for (NodeExplanation explanation : this.nodeExplanations.values()) {
|
||||
explanation.writeTo(out);
|
||||
}
|
||||
Map<DiscoveryNode, Float> ntw = this.getNodeWeights();
|
||||
out.writeVInt(ntw.size());
|
||||
for (Map.Entry<DiscoveryNode, Float> entry : ntw.entrySet()) {
|
||||
entry.getKey().writeTo(out);
|
||||
out.writeFloat(entry.getValue());
|
||||
}
|
||||
out.writeVLong(remainingDelayNanos);
|
||||
}
|
||||
|
||||
|
||||
/** Return the shard that the explanation is about */
|
||||
public ShardId getShard() {
|
||||
return this.shard;
|
||||
}
|
||||
|
||||
/** Return true if the explained shard is primary, false otherwise */
|
||||
public boolean isPrimary() {
|
||||
return this.primary;
|
||||
}
|
||||
|
@ -138,22 +114,14 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
return this.unassignedInfo;
|
||||
}
|
||||
|
||||
/** Return a map of node to decision for shard allocation */
|
||||
public Map<DiscoveryNode, Decision> getNodeDecisions() {
|
||||
return this.nodeToDecision;
|
||||
/** Return the remaining allocation delay for this shard in millisocends */
|
||||
public long getRemainingDelayMillis() {
|
||||
return this.remainingDelayMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a map of node to balancer "weight" for allocation. Higher weights mean the balancer wants to allocated the shard to that node
|
||||
* more
|
||||
*/
|
||||
public Map<DiscoveryNode, Float> getNodeWeights() {
|
||||
return this.nodeWeights;
|
||||
}
|
||||
|
||||
/** Return the remaining allocation delay for this shard in nanoseconds */
|
||||
public long getRemainingDelayNanos() {
|
||||
return this.remainingDelayNanos;
|
||||
/** Return a map of node to the explanation for that node */
|
||||
public Map<DiscoveryNode, NodeExplanation> getNodeExplanations() {
|
||||
return this.nodeExplanations;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
|
@ -174,36 +142,118 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
if (unassignedInfo != null) {
|
||||
unassignedInfo.toXContent(builder, params);
|
||||
long delay = unassignedInfo.getLastComputedLeftDelayNanos();
|
||||
builder.field("allocation_delay", TimeValue.timeValueNanos(delay));
|
||||
builder.field("allocation_delay_ms", TimeValue.timeValueNanos(delay).millis());
|
||||
builder.field("remaining_delay", TimeValue.timeValueNanos(remainingDelayNanos));
|
||||
builder.field("remaining_delay_ms", TimeValue.timeValueNanos(remainingDelayNanos).millis());
|
||||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueNanos(delay));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
for (Map.Entry<DiscoveryNode, Float> entry : nodeWeights.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
builder.startObject(node.getId()); {
|
||||
builder.field("node_name", node.getName());
|
||||
builder.startObject("node_attributes"); {
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end attributes
|
||||
Decision d = nodeToDecision.get(node);
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
builder.field("final_decision", "CURRENTLY_ASSIGNED");
|
||||
} else {
|
||||
builder.field("final_decision", d.type().toString());
|
||||
}
|
||||
builder.field("weight", entry.getValue());
|
||||
d.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end node <uuid>
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
}
|
||||
|
||||
/** An Enum representing the final decision for a shard allocation on a node */
|
||||
public enum FinalDecision {
|
||||
// Yes, the shard can be assigned
|
||||
YES((byte) 0),
|
||||
// No, the shard cannot be assigned
|
||||
NO((byte) 1),
|
||||
// The shard is already assigned to this node
|
||||
ALREADY_ASSIGNED((byte) 2);
|
||||
|
||||
private final byte id;
|
||||
|
||||
FinalDecision (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static FinalDecision fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return YES;
|
||||
case 1: return NO;
|
||||
case 2: return ALREADY_ASSIGNED;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "YES";
|
||||
case 1: return "NO";
|
||||
case 2: return "ALREADY_ASSIGNED";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for final decision: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static FinalDecision readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
}
|
||||
|
||||
/** An Enum representing the state of the shard store's copy of the data on a node */
|
||||
public enum StoreCopy {
|
||||
// No data for this shard is on the node
|
||||
NONE((byte) 0),
|
||||
// A copy of the data is available on this node
|
||||
AVAILABLE((byte) 1),
|
||||
// The copy of the data on the node is corrupt
|
||||
CORRUPT((byte) 2),
|
||||
// There was an error reading this node's copy of the data
|
||||
IO_ERROR((byte) 3),
|
||||
// The copy of the data on the node is stale
|
||||
STALE((byte) 4),
|
||||
// It's unknown what the copy of the data is
|
||||
UNKNOWN((byte) 5);
|
||||
|
||||
private final byte id;
|
||||
|
||||
StoreCopy (byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
private static StoreCopy fromId(byte id) {
|
||||
switch (id) {
|
||||
case 0: return NONE;
|
||||
case 1: return AVAILABLE;
|
||||
case 2: return CORRUPT;
|
||||
case 3: return IO_ERROR;
|
||||
case 4: return STALE;
|
||||
case 5: return UNKNOWN;
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (id) {
|
||||
case 0: return "NONE";
|
||||
case 1: return "AVAILABLE";
|
||||
case 2: return "CORRUPT";
|
||||
case 3: return "IO_ERROR";
|
||||
case 4: return "STALE";
|
||||
case 5: return "UNKNOWN";
|
||||
default:
|
||||
throw new IllegalArgumentException("unknown id for store copy: [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
static StoreCopy readFrom(StreamInput in) throws IOException {
|
||||
return fromId(in.readByte());
|
||||
}
|
||||
|
||||
void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
/** The cluster allocation explanation for a single node */
|
||||
public class NodeExplanation implements Writeable, ToXContent {
|
||||
private final DiscoveryNode node;
|
||||
private final Decision nodeDecision;
|
||||
private final Float nodeWeight;
|
||||
private final IndicesShardStoresResponse.StoreStatus storeStatus;
|
||||
private final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
private final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
private final String finalExplanation;
|
||||
|
||||
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
|
||||
final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
final String finalExplanation,
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
this.node = node;
|
||||
this.nodeDecision = nodeDecision;
|
||||
this.nodeWeight = nodeWeight;
|
||||
this.storeStatus = storeStatus;
|
||||
this.finalDecision = finalDecision;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.storeCopy = storeCopy;
|
||||
}
|
||||
|
||||
public NodeExplanation(StreamInput in) throws IOException {
|
||||
this.node = new DiscoveryNode(in);
|
||||
this.nodeDecision = Decision.readFrom(in);
|
||||
this.nodeWeight = in.readFloat();
|
||||
if (in.readBoolean()) {
|
||||
this.storeStatus = IndicesShardStoresResponse.StoreStatus.readStoreStatus(in);
|
||||
} else {
|
||||
this.storeStatus = null;
|
||||
}
|
||||
this.finalDecision = ClusterAllocationExplanation.FinalDecision.readFrom(in);
|
||||
this.finalExplanation = in.readString();
|
||||
this.storeCopy = ClusterAllocationExplanation.StoreCopy.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
node.writeTo(out);
|
||||
Decision.writeTo(nodeDecision, out);
|
||||
out.writeFloat(nodeWeight);
|
||||
if (storeStatus == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
storeStatus.writeTo(out);
|
||||
}
|
||||
finalDecision.writeTo(out);
|
||||
out.writeString(finalExplanation);
|
||||
storeCopy.writeTo(out);
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(node.getId()); {
|
||||
builder.field("node_name", node.getName());
|
||||
builder.startObject("node_attributes"); {
|
||||
for (Map.Entry<String, String> attrEntry : node.getAttributes().entrySet()) {
|
||||
builder.field(attrEntry.getKey(), attrEntry.getValue());
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end attributes
|
||||
builder.startObject("store"); {
|
||||
builder.field("shard_copy", storeCopy.toString());
|
||||
if (storeStatus != null) {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
builder.field("store_exception", ExceptionsHelper.detailedMessage(storeErr));
|
||||
}
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end store
|
||||
builder.field("final_decision", finalDecision.toString());
|
||||
builder.field("final_explanation", finalExplanation.toString());
|
||||
builder.field("weight", nodeWeight);
|
||||
nodeDecision.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end node <uuid>
|
||||
return builder;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return this.node;
|
||||
}
|
||||
|
||||
public Decision getDecision() {
|
||||
return this.nodeDecision;
|
||||
}
|
||||
|
||||
public Float getWeight() {
|
||||
return this.nodeWeight;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public IndicesShardStoresResponse.StoreStatus getStoreStatus() {
|
||||
return this.storeStatus;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.FinalDecision getFinalDecision() {
|
||||
return this.finalDecision;
|
||||
}
|
||||
|
||||
public String getFinalExplanation() {
|
||||
return this.finalExplanation;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation.StoreCopy getStoreCopy() {
|
||||
return this.storeCopy;
|
||||
}
|
||||
}
|
|
@ -20,8 +20,13 @@
|
|||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
|
@ -47,8 +52,10 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -56,6 +63,7 @@ import java.util.HashMap;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
|
||||
|
@ -68,19 +76,22 @@ public class TransportClusterAllocationExplainAction
|
|||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final TransportIndicesShardStoresAction shardStoresAction;
|
||||
|
||||
@Inject
|
||||
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AllocationService allocationService, ClusterInfoService clusterInfoService,
|
||||
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator) {
|
||||
AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator,
|
||||
TransportIndicesShardStoresAction shardStoresAction) {
|
||||
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.shardStoresAction = shardStoresAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -118,12 +129,86 @@ public class TransportClusterAllocationExplainAction
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a {@code NodeExplanation} object for the given shard given all the metadata. This also attempts to construct the human
|
||||
* readable FinalDecision and final explanation as part of the explanation.
|
||||
*/
|
||||
public static NodeExplanation calculateNodeExplanation(ShardRouting shard,
|
||||
IndexMetaData indexMetaData,
|
||||
DiscoveryNode node,
|
||||
Decision nodeDecision,
|
||||
Float nodeWeight,
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
String assignedNodeId,
|
||||
Set<String> activeAllocationIds) {
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision;
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy;
|
||||
final String finalExplanation;
|
||||
|
||||
if (storeStatus == null) {
|
||||
// No copies of the data
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
|
||||
} else {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
|
||||
} else {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.IO_ERROR;
|
||||
}
|
||||
} else if (activeAllocationIds.isEmpty()) {
|
||||
// The ids are only empty if dealing with a legacy index
|
||||
// TODO: fetch the shard state versions and display here?
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.UNKNOWN;
|
||||
} else if (activeAllocationIds.contains(storeStatus.getAllocationId())) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.AVAILABLE;
|
||||
} else {
|
||||
// Otherwise, this is a stale copy of the data (allocation ids don't match)
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.STALE;
|
||||
}
|
||||
}
|
||||
|
||||
if (node.getId().equals(assignedNodeId)) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
|
||||
finalExplanation = "the shard is already assigned to this node";
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
|
||||
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
|
||||
storeCopy == ClusterAllocationExplanation.StoreCopy.NONE) {
|
||||
finalExplanation = "there is no copy of the shard available";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.CORRUPT) {
|
||||
finalExplanation = "the copy of the shard is corrupt";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else if (shard.primary() && shard.unassigned() && storeCopy == ClusterAllocationExplanation.StoreCopy.IO_ERROR) {
|
||||
finalExplanation = "the copy of the shard cannot be read";
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
} else {
|
||||
if (nodeDecision.type() == Decision.Type.NO) {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
|
||||
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
|
||||
} else {
|
||||
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
|
||||
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
|
||||
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
|
||||
} else {
|
||||
finalExplanation = "the shard can be assigned";
|
||||
}
|
||||
}
|
||||
}
|
||||
return new NodeExplanation(node, nodeDecision, nodeWeight, storeStatus, finalDecision, finalExplanation, storeCopy);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* For the given {@code ShardRouting}, return the explanation of the allocation for that shard on all nodes. If {@code
|
||||
* includeYesDecisions} is true, returns all decisions, otherwise returns only 'NO' and 'THROTTLE' decisions.
|
||||
*/
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator) {
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
|
@ -139,14 +224,35 @@ public class TransportClusterAllocationExplainAction
|
|||
nodeToDecision.put(discoNode, d);
|
||||
}
|
||||
}
|
||||
long remainingDelayNanos = 0;
|
||||
long remainingDelayMillis = 0;
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final IndexMetaData indexMetaData = metadata.index(shard.index());
|
||||
if (ui != null) {
|
||||
final MetaData metadata = allocation.metaData();
|
||||
final Settings indexSettings = metadata.index(shard.index()).getSettings();
|
||||
remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
|
||||
final Settings indexSettings = indexMetaData.getSettings();
|
||||
long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
|
||||
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(), shard.currentNodeId(), ui, nodeToDecision,
|
||||
shardAllocator.weighShard(allocation, shard), remainingDelayNanos);
|
||||
|
||||
// Calculate weights for each of the nodes
|
||||
Map<DiscoveryNode, Float> weights = shardAllocator.weighShard(allocation, shard);
|
||||
|
||||
Map<DiscoveryNode, IndicesShardStoresResponse.StoreStatus> nodeToStatus = new HashMap<>(shardStores.size());
|
||||
for (IndicesShardStoresResponse.StoreStatus status : shardStores) {
|
||||
nodeToStatus.put(status.getNode(), status);
|
||||
}
|
||||
|
||||
Map<DiscoveryNode, NodeExplanation> explanations = new HashMap<>(shardStores.size());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : nodeToDecision.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
Decision decision = entry.getValue();
|
||||
Float weight = weights.get(node);
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
|
||||
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
|
||||
storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()));
|
||||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), remainingDelayMillis, ui, explanations);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -156,30 +262,30 @@ public class TransportClusterAllocationExplainAction
|
|||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime());
|
||||
|
||||
ShardRouting shardRouting = null;
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
// If we can use any shard, just pick the first unassigned one (if there are any)
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator ui = routingNodes.unassigned().iterator();
|
||||
if (ui.hasNext()) {
|
||||
shardRouting = ui.next();
|
||||
foundShard = ui.next();
|
||||
}
|
||||
} else {
|
||||
String index = request.getIndex();
|
||||
int shard = request.getShard();
|
||||
if (request.isPrimary()) {
|
||||
// If we're looking for the primary shard, there's only one copy, so pick it directly
|
||||
shardRouting = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
|
||||
foundShard = allocation.routingTable().shardRoutingTable(index, shard).primaryShard();
|
||||
} else {
|
||||
// If looking for a replica, go through all the replica shards
|
||||
List<ShardRouting> replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shard).replicaShards();
|
||||
if (replicaShardRoutings.size() > 0) {
|
||||
// Pick the first replica at the very least
|
||||
shardRouting = replicaShardRoutings.get(0);
|
||||
foundShard = replicaShardRoutings.get(0);
|
||||
// In case there are multiple replicas where some are assigned and some aren't,
|
||||
// try to find one that is unassigned at least
|
||||
for (ShardRouting replica : replicaShardRoutings) {
|
||||
if (replica.unassigned()) {
|
||||
shardRouting = replica;
|
||||
foundShard = replica;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -187,14 +293,34 @@ public class TransportClusterAllocationExplainAction
|
|||
}
|
||||
}
|
||||
|
||||
if (shardRouting == null) {
|
||||
if (foundShard == null) {
|
||||
listener.onFailure(new ElasticsearchException("unable to find any shards to explain [{}] in the routing table", request));
|
||||
return;
|
||||
}
|
||||
final ShardRouting shardRouting = foundShard;
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
getShardStores(shardRouting, new ActionListener<IndicesShardStoresResponse>() {
|
||||
@Override
|
||||
public void onResponse(IndicesShardStoresResponse shardStoreResponse) {
|
||||
ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> shardStatuses =
|
||||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void getShardStores(ShardRouting shard, final ActionListener<IndicesShardStoresResponse> listener) {
|
||||
IndicesShardStoresRequest request = new IndicesShardStoresRequest(shard.getIndexName());
|
||||
request.shardStatuses("all");
|
||||
shardStoresAction.execute(request, listener);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
return allocationStatus;
|
||||
}
|
||||
|
||||
static StoreStatus readStoreStatus(StreamInput in) throws IOException {
|
||||
public static StoreStatus readStoreStatus(StreamInput in) throws IOException {
|
||||
StoreStatus storeStatus = new StoreStatus();
|
||||
storeStatus.readFrom(in);
|
||||
return storeStatus;
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.search.Scroll;
|
|||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilder;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.index.query.support.InnerHitsBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.rescore.RescoreBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
@ -400,11 +399,6 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
|
|||
return this;
|
||||
}
|
||||
|
||||
public SearchRequestBuilder innerHits(InnerHitsBuilder innerHitsBuilder) {
|
||||
sourceBuilder().innerHits(innerHitsBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clears all rescorers on the builder and sets the first one. To use multiple rescore windows use
|
||||
* {@link #addRescorer(org.elasticsearch.search.rescore.RescoreBuilder, int)}.
|
||||
|
|
|
@ -41,7 +41,6 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* We enforce limits once any network host is configured. In this case we assume the node is running in production
|
||||
|
@ -63,40 +62,75 @@ final class BootstrapCheck {
|
|||
* @param boundTransportAddress the node network bindings
|
||||
*/
|
||||
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) {
|
||||
check(enforceLimits(boundTransportAddress), checks(settings), Node.NODE_NAME_SETTING.get(settings));
|
||||
check(
|
||||
enforceLimits(boundTransportAddress),
|
||||
BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS.get(settings),
|
||||
checks(settings),
|
||||
Node.NODE_NAME_SETTING.get(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
* executes the provided checks and fails the node if
|
||||
* enforceLimits is true, otherwise logs warnings
|
||||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* warned
|
||||
* @param checks the checks to execute
|
||||
* @param nodeName the node name to be used as a logging prefix
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* otherwise warned
|
||||
* @param ignoreSystemChecks true if system checks should be enforced
|
||||
* or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
* @param nodeName the node name to be used as a logging prefix
|
||||
*/
|
||||
// visible for testing
|
||||
static void check(final boolean enforceLimits, final List<Check> checks, final String nodeName) {
|
||||
final ESLogger logger = Loggers.getLogger(BootstrapCheck.class, nodeName);
|
||||
static void check(final boolean enforceLimits, final boolean ignoreSystemChecks, final List<Check> checks, final String nodeName) {
|
||||
check(enforceLimits, ignoreSystemChecks, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
|
||||
}
|
||||
|
||||
final List<String> errors =
|
||||
checks.stream()
|
||||
.filter(BootstrapCheck.Check::check)
|
||||
.map(BootstrapCheck.Check::errorMessage)
|
||||
.collect(Collectors.toList());
|
||||
/**
|
||||
* executes the provided checks and fails the node if
|
||||
* enforceLimits is true, otherwise logs warnings
|
||||
*
|
||||
* @param enforceLimits true if the checks should be enforced or
|
||||
* otherwise warned
|
||||
* @param ignoreSystemChecks true if system checks should be enforced
|
||||
* or otherwise warned
|
||||
* @param checks the checks to execute
|
||||
* @param logger the logger to
|
||||
*/
|
||||
static void check(
|
||||
final boolean enforceLimits,
|
||||
final boolean ignoreSystemChecks,
|
||||
final List<Check> checks,
|
||||
final ESLogger logger) {
|
||||
final List<String> errors = new ArrayList<>();
|
||||
final List<String> ignoredErrors = new ArrayList<>();
|
||||
|
||||
for (final Check check : checks) {
|
||||
if (check.check()) {
|
||||
if (!enforceLimits || (check.isSystemCheck() && ignoreSystemChecks)) {
|
||||
ignoredErrors.add(check.errorMessage());
|
||||
} else {
|
||||
errors.add(check.errorMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!ignoredErrors.isEmpty()) {
|
||||
ignoredErrors.forEach(error -> log(logger, error));
|
||||
}
|
||||
|
||||
if (!errors.isEmpty()) {
|
||||
final List<String> messages = new ArrayList<>(1 + errors.size());
|
||||
messages.add("bootstrap checks failed");
|
||||
messages.addAll(errors);
|
||||
if (enforceLimits) {
|
||||
final RuntimeException re = new RuntimeException(String.join("\n", messages));
|
||||
errors.stream().map(IllegalStateException::new).forEach(re::addSuppressed);
|
||||
throw re;
|
||||
} else {
|
||||
messages.forEach(message -> logger.warn(message));
|
||||
}
|
||||
final RuntimeException re = new RuntimeException(String.join("\n", messages));
|
||||
errors.stream().map(IllegalStateException::new).forEach(re::addSuppressed);
|
||||
throw re;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void log(final ESLogger logger, final String error) {
|
||||
logger.warn(error);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -151,6 +185,14 @@ final class BootstrapCheck {
|
|||
*/
|
||||
String errorMessage();
|
||||
|
||||
/**
|
||||
* test if the check is a system-level check
|
||||
*
|
||||
* @return true if the check is a system-level check as opposed
|
||||
* to an Elasticsearch-level check
|
||||
*/
|
||||
boolean isSystemCheck();
|
||||
|
||||
}
|
||||
|
||||
static class HeapSizeCheck implements BootstrapCheck.Check {
|
||||
|
@ -183,6 +225,11 @@ final class BootstrapCheck {
|
|||
return JvmInfo.jvmInfo().getConfiguredMaxHeapSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class OsXFileDescriptorCheck extends FileDescriptorCheck {
|
||||
|
@ -233,6 +280,11 @@ final class BootstrapCheck {
|
|||
return ProcessProbe.getInstance().getMaxFileDescriptorCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
|
@ -259,6 +311,11 @@ final class BootstrapCheck {
|
|||
return Natives.isMemoryLocked();
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MinMasterNodesCheck implements Check {
|
||||
|
@ -279,6 +336,12 @@ final class BootstrapCheck {
|
|||
return "please set [" + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() +
|
||||
"] to a majority of the number of master eligible nodes in your cluster.";
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxNumberOfThreadsCheck implements Check {
|
||||
|
@ -305,6 +368,11 @@ final class BootstrapCheck {
|
|||
return JNANatives.MAX_NUMBER_OF_THREADS;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxSizeVirtualMemoryCheck implements Check {
|
||||
|
@ -333,6 +401,11 @@ final class BootstrapCheck {
|
|||
return JNANatives.MAX_SIZE_VIRTUAL_MEMORY;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static class MaxMapCountCheck implements Check {
|
||||
|
@ -396,6 +469,11 @@ final class BootstrapCheck {
|
|||
return Long.parseLong(procSysVmMaxMapCount);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -37,5 +37,7 @@ public final class BootstrapSettings {
|
|||
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> CTRLHANDLER_SETTING =
|
||||
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
|
||||
public static final Setting<Boolean> IGNORE_SYSTEM_BOOTSTRAP_CHECKS =
|
||||
Setting.boolSetting("bootstrap.ignore_system_bootstrap_checks", false, Property.NodeScope);
|
||||
|
||||
}
|
||||
|
|
|
@ -139,15 +139,9 @@ public class AliasValidator extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException {
|
||||
try {
|
||||
queryShardContext.reset();
|
||||
QueryParseContext queryParseContext = queryShardContext.newParseContext(parser);
|
||||
QueryBuilder<?> queryBuilder = QueryBuilder.rewriteQuery(queryParseContext.parseInnerQueryBuilder(), queryShardContext);
|
||||
queryBuilder.toFilter(queryShardContext);
|
||||
} finally {
|
||||
queryShardContext.reset();
|
||||
parser.close();
|
||||
}
|
||||
private static void validateAliasFilter(XContentParser parser, QueryShardContext queryShardContext) throws IOException {
|
||||
QueryParseContext queryParseContext = queryShardContext.newParseContext(parser);
|
||||
QueryBuilder<?> queryBuilder = QueryBuilder.rewriteQuery(queryParseContext.parseInnerQueryBuilder(), queryShardContext);
|
||||
queryBuilder.toFilter(queryShardContext);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ public class MultiPhrasePrefixQuery extends Query {
|
|||
}
|
||||
}
|
||||
if (terms.isEmpty()) {
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("No terms supplied for " + MultiPhrasePrefixQuery.class.getName());
|
||||
}
|
||||
query.add(terms.toArray(Term.class), position);
|
||||
return query.build();
|
||||
|
|
|
@ -44,8 +44,8 @@ public class Queries {
|
|||
}
|
||||
|
||||
/** Return a query that matches no document. */
|
||||
public static Query newMatchNoDocsQuery() {
|
||||
return new BooleanQuery.Builder().build();
|
||||
public static Query newMatchNoDocsQuery(String reason) {
|
||||
return new MatchNoDocsQuery(reason);
|
||||
}
|
||||
|
||||
public static Query newNestedFilter() {
|
||||
|
|
|
@ -408,6 +408,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
BootstrapSettings.MLOCKALL_SETTING,
|
||||
BootstrapSettings.SECCOMP_SETTING,
|
||||
BootstrapSettings.CTRLHANDLER_SETTING,
|
||||
BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS,
|
||||
IndexingMemoryController.INDEX_BUFFER_SIZE_SETTING,
|
||||
IndexingMemoryController.MIN_INDEX_BUFFER_SIZE_SETTING,
|
||||
IndexingMemoryController.MAX_INDEX_BUFFER_SIZE_SETTING,
|
||||
|
|
|
@ -344,7 +344,7 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return get(primary);
|
||||
}
|
||||
if (fallbackSetting == null) {
|
||||
return get(secondary);
|
||||
return get(secondary);
|
||||
}
|
||||
if (exists(secondary)) {
|
||||
return get(secondary);
|
||||
|
@ -599,7 +599,6 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
|
||||
return new Setting<List<T>>(new ListKey(key),
|
||||
(s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) {
|
||||
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
|
||||
@Override
|
||||
public String getRaw(Settings settings) {
|
||||
String[] array = settings.getAsArray(getKey(), null);
|
||||
|
@ -610,6 +609,12 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
boolean hasComplexMatcher() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean exists(Settings settings) {
|
||||
boolean exists = super.exists(settings);
|
||||
return exists || settings.get(getKey() + ".0") != null;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -175,9 +175,7 @@ public class SizeValue implements Streamable {
|
|||
}
|
||||
long singles;
|
||||
try {
|
||||
if (sValue.endsWith("b")) {
|
||||
singles = Long.parseLong(sValue.substring(0, sValue.length() - 1));
|
||||
} else if (sValue.endsWith("k") || sValue.endsWith("K")) {
|
||||
if (sValue.endsWith("k") || sValue.endsWith("K")) {
|
||||
singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C1);
|
||||
} else if (sValue.endsWith("m") || sValue.endsWith("M")) {
|
||||
singles = (long) (Double.parseDouble(sValue.substring(0, sValue.length() - 1)) * SizeUnit.C2);
|
||||
|
@ -232,4 +230,4 @@ public class SizeValue implements Streamable {
|
|||
result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public class KeyedLock<T> {
|
|||
this(false);
|
||||
}
|
||||
|
||||
private final ConcurrentMap<T, KeyLock> map = ConcurrentCollections.newConcurrentMap();
|
||||
private final ConcurrentMap<T, KeyLock> map = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
|
||||
|
||||
public Releasable acquire(T key) {
|
||||
assert isHeldByCurrentThread(key) == false : "lock for " + key + " is already heald by this thread";
|
||||
|
|
|
@ -50,9 +50,11 @@ public final class HttpTransportSettings {
|
|||
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS =
|
||||
Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION =
|
||||
Setting.boolSetting("http.compression", false, Property.NodeScope);
|
||||
Setting.boolSetting("http.compression", true, Property.NodeScope);
|
||||
// we intentionally use a different compression level as Netty here as our benchmarks have shown that a compression level of 3 is the
|
||||
// best compromise between reduction in network traffic and added latency. For more details please check #7309.
|
||||
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL =
|
||||
Setting.intSetting("http.compression_level", 6, Property.NodeScope);
|
||||
Setting.intSetting("http.compression_level", 3, Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_HOST =
|
||||
listSetting("http.host", emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_PUBLISH_HOST =
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.handler.codec.embedder.DecoderEmbedder;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentDecompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpHeaders;
|
||||
|
||||
public class ESHttpContentDecompressor extends HttpContentDecompressor {
|
||||
private final boolean compression;
|
||||
|
||||
public ESHttpContentDecompressor(boolean compression) {
|
||||
super();
|
||||
this.compression = compression;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DecoderEmbedder<ChannelBuffer> newContentDecoder(String contentEncoding) throws Exception {
|
||||
if (compression) {
|
||||
// compression is enabled so handle the request according to the headers (compressed and uncompressed)
|
||||
return super.newContentDecoder(contentEncoding);
|
||||
} else {
|
||||
// if compression is disabled only allow "identity" (uncompressed) requests
|
||||
if (HttpHeaders.Values.IDENTITY.equals(contentEncoding)) {
|
||||
// nothing to handle here
|
||||
return null;
|
||||
} else {
|
||||
throw new TransportException("Support for compressed content is disabled. You can enable it with http.compression=true");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -70,6 +70,7 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
|
|||
import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
|
||||
import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentCompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentDecompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import org.jboss.netty.handler.timeout.ReadTimeoutException;
|
||||
|
@ -544,19 +545,19 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
requestDecoder.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
|
||||
}
|
||||
pipeline.addLast("decoder", requestDecoder);
|
||||
pipeline.addLast("decoder_compress", new ESHttpContentDecompressor(transport.compression));
|
||||
pipeline.addLast("decoder_compress", new HttpContentDecompressor());
|
||||
HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.bytes());
|
||||
if (transport.maxCompositeBufferComponents != -1) {
|
||||
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
|
||||
}
|
||||
pipeline.addLast("aggregator", httpChunkAggregator);
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
pipeline.addLast("cors", new CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
pipeline.addLast("encoder", new ESHttpResponseEncoder());
|
||||
if (transport.compression) {
|
||||
pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));
|
||||
}
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
pipeline.addLast("cors", new CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
if (transport.pipelining) {
|
||||
pipeline.addLast("pipelining", new HttpPipeliningHandler(transport.pipeliningMaxEvents));
|
||||
}
|
||||
|
|
|
@ -382,7 +382,7 @@ public final class IndexSettings {
|
|||
*/
|
||||
synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) {
|
||||
final Settings newSettings = indexMetaData.getSettings();
|
||||
if (Version.indexCreated(newSettings) != version) {
|
||||
if (version.equals(Version.indexCreated(newSettings)) == false) {
|
||||
throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + Version.indexCreated(newSettings));
|
||||
}
|
||||
final String newUUID = newSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.InfoStream;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -51,6 +50,7 @@ import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
|||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.KeyedLock;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
|
@ -100,7 +100,7 @@ public class InternalEngine extends Engine {
|
|||
// we use the hashed variant since we iterate over it and check removal and additions on existing keys
|
||||
private final LiveVersionMap versionMap;
|
||||
|
||||
private final Object[] dirtyLocks;
|
||||
private final KeyedLock<BytesRef> keyedLock = new KeyedLock<>();
|
||||
|
||||
private final AtomicBoolean versionMapRefreshPending = new AtomicBoolean();
|
||||
|
||||
|
@ -128,10 +128,6 @@ public class InternalEngine extends Engine {
|
|||
try {
|
||||
this.lastDeleteVersionPruneTimeMSec = engineConfig.getThreadPool().estimatedTimeInMillis();
|
||||
mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings());
|
||||
this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough...
|
||||
for (int i = 0; i < dirtyLocks.length; i++) {
|
||||
dirtyLocks[i] = new Object();
|
||||
}
|
||||
throttle = new IndexThrottle();
|
||||
this.searcherFactory = new SearchFactory(logger, isClosed, engineConfig);
|
||||
try {
|
||||
|
@ -356,7 +352,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
private boolean innerIndex(Index index) throws IOException {
|
||||
synchronized (dirtyLock(index.uid())) {
|
||||
try (Releasable ignored = acquireLock(index.uid())) {
|
||||
lastWriteNanos = index.startTime();
|
||||
final long currentVersion;
|
||||
final boolean deleted;
|
||||
|
@ -451,7 +447,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
private void innerDelete(Delete delete) throws IOException {
|
||||
synchronized (dirtyLock(delete.uid())) {
|
||||
try (Releasable ignored = acquireLock(delete.uid())) {
|
||||
lastWriteNanos = delete.startTime();
|
||||
final long currentVersion;
|
||||
final boolean deleted;
|
||||
|
@ -708,7 +704,7 @@ public class InternalEngine extends Engine {
|
|||
// we only need to prune the deletes map; the current/old version maps are cleared on refresh:
|
||||
for (Map.Entry<BytesRef, VersionValue> entry : versionMap.getAllTombstones()) {
|
||||
BytesRef uid = entry.getKey();
|
||||
synchronized (dirtyLock(uid)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
|
||||
try (Releasable ignored = acquireLock(uid)) { // can we do it without this lock on each value? maybe batch to a set and get the lock once per set?
|
||||
|
||||
// Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator:
|
||||
VersionValue versionValue = versionMap.getTombstoneUnderLock(uid);
|
||||
|
@ -908,13 +904,12 @@ public class InternalEngine extends Engine {
|
|||
return searcherManager;
|
||||
}
|
||||
|
||||
private Object dirtyLock(BytesRef uid) {
|
||||
int hash = Murmur3HashFunction.hash(uid.bytes, uid.offset, uid.length);
|
||||
return dirtyLocks[Math.floorMod(hash, dirtyLocks.length)];
|
||||
private Releasable acquireLock(BytesRef uid) {
|
||||
return keyedLock.acquire(uid);
|
||||
}
|
||||
|
||||
private Object dirtyLock(Term uid) {
|
||||
return dirtyLock(uid.bytes());
|
||||
private Releasable acquireLock(Term uid) {
|
||||
return acquireLock(uid.bytes());
|
||||
}
|
||||
|
||||
private long loadCurrentVersionFromIndex(Term uid) throws IOException {
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.search.RegexpQuery;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -204,6 +203,9 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
public Mapper.Builder parse(String fieldName, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
|
||||
if (parserContext.indexVersionCreated().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
final Object index = node.get("index");
|
||||
if (Arrays.asList(null, "no", "not_analyzed", "analyzed").contains(index) == false) {
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [no], [not_analyzed] or [analyzed]");
|
||||
}
|
||||
final boolean keyword = index != null && "analyzed".equals(index) == false;
|
||||
|
||||
// Automatically upgrade simple mappings for ease of upgrade, otherwise fail
|
||||
|
@ -283,7 +285,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
|
|||
node.put("index", false);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [true], [false], [no], [not_analyzed] or [analyzed]");
|
||||
throw new IllegalArgumentException("Can't parse [index] value [" + index + "] for field [" + fieldName + "], expected [no], [not_analyzed] or [analyzed]");
|
||||
}
|
||||
}
|
||||
final Object fielddataObject = node.get("fielddata");
|
||||
|
|
|
@ -144,7 +144,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
if (isSameIndex(value, context.index().getName())) {
|
||||
return Queries.newMatchAllQuery();
|
||||
} else {
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName() + " vs. " + value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
|
|||
}
|
||||
}
|
||||
// None of the listed index names are this one
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("Index didn't match. Index queried: " + context.index().getName() + " vs. " + values);
|
||||
}
|
||||
|
||||
private boolean isSameIndex(Object value, String indexName) {
|
||||
|
|
|
@ -207,7 +207,6 @@ public class PercolatorFieldMapper extends FieldMapper {
|
|||
}
|
||||
|
||||
static Query toQuery(QueryShardContext context, boolean mapUnmappedFieldsAsString, QueryBuilder<?> queryBuilder) throws IOException {
|
||||
context.reset();
|
||||
// This means that fields in the query need to exist in the mapping prior to registering this query
|
||||
// The reason that this is required, is that if a field doesn't exist then the query assumes defaults, which may be undesired.
|
||||
//
|
||||
|
@ -222,11 +221,7 @@ public class PercolatorFieldMapper extends FieldMapper {
|
|||
// as an analyzed string.
|
||||
context.setAllowUnmappedFields(false);
|
||||
context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString);
|
||||
try {
|
||||
return queryBuilder.toQuery(context);
|
||||
} finally {
|
||||
context.reset();
|
||||
}
|
||||
return queryBuilder.toQuery(context);
|
||||
}
|
||||
|
||||
static QueryBuilder<?> parseQueryBuilder(QueryParseContext context, XContentLocation location) {
|
||||
|
|
|
@ -36,6 +36,7 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -108,12 +109,12 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder<QB>>
|
|||
@Override
|
||||
public final Query toFilter(QueryShardContext context) throws IOException {
|
||||
Query result = null;
|
||||
final boolean originalIsFilter = context.isFilter;
|
||||
final boolean originalIsFilter = context.isFilter();
|
||||
try {
|
||||
context.isFilter = true;
|
||||
context.setIsFilter(true);
|
||||
result = toQuery(context);
|
||||
} finally {
|
||||
context.isFilter = originalIsFilter;
|
||||
context.setIsFilter(originalIsFilter);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -273,6 +274,15 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder<QB>>
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* For internal usage only!
|
||||
*
|
||||
* Extracts the inner hits from the query tree.
|
||||
* While it extracts inner hits, child inner hits are inlined into the inner hit builder they belong to.
|
||||
*/
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
}
|
||||
|
||||
// Like Objects.requireNotNull(...) but instead throws a IllegalArgumentException
|
||||
protected static <T> T requireValue(T value, String message) {
|
||||
if (value == null) {
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
|
@ -495,6 +496,17 @@ public class BoolQueryBuilder extends AbstractQueryBuilder<BoolQueryBuilder> {
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
List<QueryBuilder<?>> clauses = new ArrayList<>(filter());
|
||||
clauses.addAll(must());
|
||||
clauses.addAll(should());
|
||||
// no need to include must_not (since there will be no hits for it)
|
||||
for (QueryBuilder<?> clause : clauses) {
|
||||
InnerHitBuilder.extractInnerHits(clause, innerHits);
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean rewriteClauses(QueryRewriteContext queryRewriteContext, List<QueryBuilder<?>> builders,
|
||||
Consumer<QueryBuilder<?>> consumer) throws IOException {
|
||||
boolean changed = false;
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -235,4 +236,10 @@ public class BoostingQueryBuilder extends AbstractQueryBuilder<BoostingQueryBuil
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
InnerHitBuilder.extractInnerHits(positiveQuery, innerHits);
|
||||
InnerHitBuilder.extractInnerHits(negativeQuery, innerHits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -169,4 +170,9 @@ public class ConstantScoreQueryBuilder extends AbstractQueryBuilder<ConstantScor
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
InnerHitBuilder.extractInnerHits(filterBuilder, innerHits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -134,7 +134,7 @@ public class ExistsQueryBuilder extends AbstractQueryBuilder<ExistsQueryBuilder>
|
|||
(FieldNamesFieldMapper.FieldNamesFieldType)context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
|
||||
if (fieldNamesFieldType == null) {
|
||||
// can only happen when no types exist, so no docs exist either
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("Missing types in \"" + NAME + "\" query.");
|
||||
}
|
||||
|
||||
final Collection<String> fields;
|
||||
|
|
|
@ -38,10 +38,10 @@ import org.elasticsearch.index.fielddata.IndexParentChildFieldData;
|
|||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -151,9 +151,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
}
|
||||
|
||||
public HasChildQueryBuilder innerHit(InnerHitBuilder innerHit) {
|
||||
innerHit.setParentChildType(type);
|
||||
innerHit.setQuery(query);
|
||||
this.innerHitBuilder = innerHit;
|
||||
this.innerHitBuilder = new InnerHitBuilder(Objects.requireNonNull(innerHit), query, type);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -274,8 +272,11 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
}
|
||||
}
|
||||
}
|
||||
HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder(childType, iqb, minChildren, maxChildren,
|
||||
scoreMode, innerHitBuilder);
|
||||
HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder(childType, iqb, scoreMode);
|
||||
if (innerHitBuilder != null) {
|
||||
hasChildQueryBuilder.innerHit(innerHitBuilder);
|
||||
}
|
||||
hasChildQueryBuilder.minMaxChildren(minChildren, maxChildren);
|
||||
hasChildQueryBuilder.queryName(queryName);
|
||||
hasChildQueryBuilder.boost(boost);
|
||||
hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped);
|
||||
|
@ -337,10 +338,6 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
if (parentFieldMapper.active() == false) {
|
||||
throw new QueryShardException(context, "[" + NAME + "] _parent field has no parent type configured");
|
||||
}
|
||||
if (innerHitBuilder != null) {
|
||||
context.addInnerHit(innerHitBuilder);
|
||||
}
|
||||
|
||||
String parentType = parentFieldMapper.type();
|
||||
DocumentMapper parentDocMapper = context.getMapperService().documentMapper(parentType);
|
||||
if (parentDocMapper == null) {
|
||||
|
@ -477,4 +474,11 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
if (innerHitBuilder != null) {
|
||||
innerHitBuilder.inlineInnerHits(innerHits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,10 +33,10 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -127,9 +127,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
|
||||
public HasParentQueryBuilder innerHit(InnerHitBuilder innerHit) {
|
||||
innerHit.setParentChildType(type);
|
||||
innerHit.setQuery(query);
|
||||
this.innerHit = innerHit;
|
||||
this.innerHit = new InnerHitBuilder(innerHit, query, type);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -175,10 +173,6 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
}
|
||||
|
||||
if (innerHit != null) {
|
||||
context.addInnerHit(innerHit);
|
||||
}
|
||||
|
||||
Set<String> childTypes = new HashSet<>();
|
||||
ParentChildIndexFieldData parentChildIndexFieldData = null;
|
||||
for (DocumentMapper documentMapper : context.getMapperService().docMappers(false)) {
|
||||
|
@ -282,8 +276,14 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
}
|
||||
}
|
||||
return new HasParentQueryBuilder(parentType, iqb, score, innerHits).ignoreUnmapped(ignoreUnmapped).queryName(queryName)
|
||||
HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder(parentType, iqb, score)
|
||||
.ignoreUnmapped(ignoreUnmapped)
|
||||
.queryName(queryName)
|
||||
.boost(boost);
|
||||
if (innerHits != null) {
|
||||
queryBuilder.innerHit(innerHits);
|
||||
}
|
||||
return queryBuilder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -313,4 +313,11 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
if (innerHit!= null) {
|
||||
innerHit.inlineInnerHits(innerHits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -204,7 +204,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
|
|||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
Query query;
|
||||
if (this.ids.isEmpty()) {
|
||||
query = Queries.newMatchNoDocsQuery();
|
||||
query = Queries.newMatchNoDocsQuery("Missing ids in \"" + this.getName() + "\" query.");
|
||||
} else {
|
||||
Collection<String> typesForQuery;
|
||||
if (types.length == 0) {
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.query.support;
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
|
@ -30,11 +30,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.SearchScript;
|
||||
|
@ -62,15 +57,12 @@ import static org.elasticsearch.common.xcontent.XContentParser.Token.END_OBJECT;
|
|||
public final class InnerHitBuilder extends ToXContentToBytes implements Writeable {
|
||||
|
||||
public static final ParseField NAME_FIELD = new ParseField("name");
|
||||
public static final ParseField NESTED_PATH_FIELD = new ParseField("path");
|
||||
public static final ParseField PARENT_CHILD_TYPE_FIELD = new ParseField("type");
|
||||
public static final ParseField INNER_HITS_FIELD = new ParseField("inner_hits");
|
||||
|
||||
private final static ObjectParser<InnerHitBuilder, QueryParseContext> PARSER = new ObjectParser<>("inner_hits", InnerHitBuilder::new);
|
||||
|
||||
static {
|
||||
PARSER.declareString(InnerHitBuilder::setName, NAME_FIELD);
|
||||
PARSER.declareString(InnerHitBuilder::setNestedPath, NESTED_PATH_FIELD);
|
||||
PARSER.declareString(InnerHitBuilder::setParentChildType, PARENT_CHILD_TYPE_FIELD);
|
||||
PARSER.declareInt(InnerHitBuilder::setFrom, SearchSourceBuilder.FROM_FIELD);
|
||||
PARSER.declareInt(InnerHitBuilder::setSize, SearchSourceBuilder.SIZE_FIELD);
|
||||
PARSER.declareBoolean(InnerHitBuilder::setExplain, SearchSourceBuilder.EXPLAIN_FIELD);
|
||||
|
@ -100,20 +92,30 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
}, SearchSourceBuilder._SOURCE_FIELD, ObjectParser.ValueType.OBJECT_OR_BOOLEAN);
|
||||
PARSER.declareObject(InnerHitBuilder::setHighlightBuilder, (p, c) -> HighlightBuilder.fromXContent(c),
|
||||
SearchSourceBuilder.HIGHLIGHT_FIELD);
|
||||
PARSER.declareObject(InnerHitBuilder::setQuery, (p, c) ->{
|
||||
PARSER.declareObject(InnerHitBuilder::setChildInnerHits, (p, c) -> {
|
||||
try {
|
||||
return c.parseInnerQueryBuilder();
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
String innerHitName = null;
|
||||
for (XContentParser.Token token = p.nextToken(); token != XContentParser.Token.END_OBJECT; token = p.nextToken()) {
|
||||
switch (token) {
|
||||
case START_OBJECT:
|
||||
InnerHitBuilder innerHitBuilder = InnerHitBuilder.fromXContent(c);
|
||||
innerHitBuilder.setName(innerHitName);
|
||||
innerHitBuilders.put(innerHitName, innerHitBuilder);
|
||||
break;
|
||||
case FIELD_NAME:
|
||||
innerHitName = p.currentName();
|
||||
break;
|
||||
default:
|
||||
throw new ParsingException(p.getTokenLocation(), "Expected [" + XContentParser.Token.START_OBJECT + "] in ["
|
||||
+ p.currentName() + "] but found [" + token + "]", p.getTokenLocation());
|
||||
}
|
||||
}
|
||||
return innerHitBuilders;
|
||||
} catch (IOException e) {
|
||||
throw new ParsingException(p.getTokenLocation(), "Could not parse inner query definition", e);
|
||||
}
|
||||
}, SearchSourceBuilder.QUERY_FIELD);
|
||||
PARSER.declareObject(InnerHitBuilder::setInnerHitsBuilder, (p, c) -> {
|
||||
try {
|
||||
return InnerHitsBuilder.fromXContent(c);
|
||||
} catch (IOException e) {
|
||||
throw new ParsingException(p.getTokenLocation(), "Could not parse inner query definition", e);
|
||||
}
|
||||
}, SearchSourceBuilder.INNER_HITS_FIELD);
|
||||
}, INNER_HITS_FIELD);
|
||||
}
|
||||
|
||||
private String name;
|
||||
|
@ -132,8 +134,8 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
private List<String> fieldDataFields;
|
||||
private List<ScriptField> scriptFields;
|
||||
private HighlightBuilder highlightBuilder;
|
||||
private InnerHitsBuilder innerHitsBuilder;
|
||||
private FetchSourceContext fetchSourceContext;
|
||||
private Map<String, InnerHitBuilder> childInnerHits;
|
||||
|
||||
public InnerHitBuilder() {
|
||||
}
|
||||
|
@ -165,7 +167,62 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
}
|
||||
highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new);
|
||||
query = in.readNamedWriteable(QueryBuilder.class);
|
||||
innerHitsBuilder = in.readOptionalWriteable(InnerHitsBuilder::new);
|
||||
if (in.readBoolean()) {
|
||||
int size = in.readVInt();
|
||||
childInnerHits = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
childInnerHits.put(in.readString(), new InnerHitBuilder(in));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private InnerHitBuilder(InnerHitBuilder other) {
|
||||
name = other.name;
|
||||
from = other.from;
|
||||
size = other.size;
|
||||
explain = other.explain;
|
||||
version = other.version;
|
||||
trackScores = other.trackScores;
|
||||
if (other.fieldNames != null) {
|
||||
fieldNames = new ArrayList<>(other.fieldNames);
|
||||
}
|
||||
if (other.fieldDataFields != null) {
|
||||
fieldDataFields = new ArrayList<>(other.fieldDataFields);
|
||||
}
|
||||
if (other.scriptFields != null) {
|
||||
scriptFields = new ArrayList<>(other.scriptFields);
|
||||
}
|
||||
if (other.fetchSourceContext != null) {
|
||||
fetchSourceContext = new FetchSourceContext(
|
||||
other.fetchSourceContext.fetchSource(), other.fetchSourceContext.includes(), other.fetchSourceContext.excludes()
|
||||
);
|
||||
}
|
||||
if (other.sorts != null) {
|
||||
sorts = new ArrayList<>(other.sorts);
|
||||
}
|
||||
highlightBuilder = other.highlightBuilder;
|
||||
if (other.childInnerHits != null) {
|
||||
childInnerHits = new HashMap<>(other.childInnerHits);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
InnerHitBuilder(InnerHitBuilder other, String nestedPath, QueryBuilder query) {
|
||||
this(other);
|
||||
this.query = query;
|
||||
this.nestedPath = nestedPath;
|
||||
if (name == null) {
|
||||
this.name = nestedPath;
|
||||
}
|
||||
}
|
||||
|
||||
InnerHitBuilder(InnerHitBuilder other, QueryBuilder query, String parentChildType) {
|
||||
this(other);
|
||||
this.query = query;
|
||||
this.parentChildType = parentChildType;
|
||||
if (name == null) {
|
||||
this.name = parentChildType;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -196,17 +253,15 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
}
|
||||
out.writeOptionalWriteable(highlightBuilder);
|
||||
out.writeNamedWriteable(query);
|
||||
out.writeOptionalWriteable(innerHitsBuilder);
|
||||
}
|
||||
|
||||
public InnerHitBuilder setParentChildType(String parentChildType) {
|
||||
this.parentChildType = parentChildType;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InnerHitBuilder setNestedPath(String nestedPath) {
|
||||
this.nestedPath = nestedPath;
|
||||
return this;
|
||||
boolean hasChildInnerHits = childInnerHits != null;
|
||||
out.writeBoolean(hasChildInnerHits);
|
||||
if (hasChildInnerHits) {
|
||||
out.writeVInt(childInnerHits.size());
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : childInnerHits.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
|
@ -347,72 +402,53 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
return this;
|
||||
}
|
||||
|
||||
public QueryBuilder<?> getQuery() {
|
||||
QueryBuilder<?> getQuery() {
|
||||
return query;
|
||||
}
|
||||
|
||||
public InnerHitBuilder setQuery(QueryBuilder<?> query) {
|
||||
this.query = Objects.requireNonNull(query);
|
||||
return this;
|
||||
void setChildInnerHits(Map<String, InnerHitBuilder> childInnerHits) {
|
||||
this.childInnerHits = childInnerHits;
|
||||
}
|
||||
|
||||
public InnerHitBuilder setInnerHitsBuilder(InnerHitsBuilder innerHitsBuilder) {
|
||||
this.innerHitsBuilder = innerHitsBuilder;
|
||||
return this;
|
||||
String getParentChildType() {
|
||||
return parentChildType;
|
||||
}
|
||||
|
||||
public InnerHitsContext.BaseInnerHits buildInline(SearchContext parentSearchContext, QueryShardContext context) throws IOException {
|
||||
InnerHitsContext.BaseInnerHits innerHitsContext;
|
||||
if (nestedPath != null) {
|
||||
ObjectMapper nestedObjectMapper = context.getObjectMapper(nestedPath);
|
||||
ObjectMapper parentObjectMapper = context.nestedScope().getObjectMapper();
|
||||
innerHitsContext = new InnerHitsContext.NestedInnerHits(
|
||||
name, parentSearchContext, parentObjectMapper, nestedObjectMapper
|
||||
);
|
||||
} else if (parentChildType != null) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(parentChildType);
|
||||
innerHitsContext = new InnerHitsContext.ParentChildInnerHits(
|
||||
name, parentSearchContext, context.getMapperService(), documentMapper
|
||||
);
|
||||
} else {
|
||||
throw new IllegalStateException("Neither a nested or parent/child inner hit");
|
||||
String getNestedPath() {
|
||||
return nestedPath;
|
||||
}
|
||||
|
||||
void addChildInnerHit(InnerHitBuilder innerHitBuilder) {
|
||||
if (childInnerHits == null) {
|
||||
childInnerHits = new HashMap<>();
|
||||
}
|
||||
setupInnerHitsContext(context, innerHitsContext);
|
||||
return innerHitsContext;
|
||||
this.childInnerHits.put(innerHitBuilder.getName(), innerHitBuilder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Top level inner hits are different than inline inner hits:
|
||||
* 1) Nesting. Top level inner hits can be hold nested inner hits, that why this method is recursive (via buildChildInnerHits)
|
||||
* 2) Top level inner hits query is an option, whereas with inline inner hits that is based on the nested, has_child
|
||||
* or has_parent's inner query.
|
||||
*
|
||||
* Because of these changes there are different methods for building inline (which is simpler) and top level inner
|
||||
* hits. Also top level inner hits will soon be deprecated.
|
||||
*/
|
||||
public InnerHitsContext.BaseInnerHits buildTopLevel(SearchContext parentSearchContext, QueryShardContext context,
|
||||
InnerHitsContext innerHitsContext) throws IOException {
|
||||
public InnerHitsContext.BaseInnerHits build(SearchContext parentSearchContext,
|
||||
InnerHitsContext innerHitsContext) throws IOException {
|
||||
QueryShardContext queryShardContext = parentSearchContext.getQueryShardContext();
|
||||
if (nestedPath != null) {
|
||||
ObjectMapper nestedObjectMapper = context.getObjectMapper(nestedPath);
|
||||
ObjectMapper parentObjectMapper = context.nestedScope().nextLevel(nestedObjectMapper);
|
||||
ObjectMapper nestedObjectMapper = queryShardContext.getObjectMapper(nestedPath);
|
||||
ObjectMapper parentObjectMapper = queryShardContext.nestedScope().nextLevel(nestedObjectMapper);
|
||||
InnerHitsContext.NestedInnerHits nestedInnerHits = new InnerHitsContext.NestedInnerHits(
|
||||
name, parentSearchContext, parentObjectMapper, nestedObjectMapper
|
||||
);
|
||||
setupInnerHitsContext(context, nestedInnerHits);
|
||||
if (innerHitsBuilder != null) {
|
||||
buildChildInnerHits(parentSearchContext, context, nestedInnerHits);
|
||||
setupInnerHitsContext(queryShardContext, nestedInnerHits);
|
||||
if (childInnerHits != null) {
|
||||
buildChildInnerHits(parentSearchContext, nestedInnerHits);
|
||||
}
|
||||
context.nestedScope().previousLevel();
|
||||
queryShardContext.nestedScope().previousLevel();
|
||||
innerHitsContext.addInnerHitDefinition(nestedInnerHits);
|
||||
return nestedInnerHits;
|
||||
} else if (parentChildType != null) {
|
||||
DocumentMapper documentMapper = context.getMapperService().documentMapper(parentChildType);
|
||||
DocumentMapper documentMapper = queryShardContext.getMapperService().documentMapper(parentChildType);
|
||||
InnerHitsContext.ParentChildInnerHits parentChildInnerHits = new InnerHitsContext.ParentChildInnerHits(
|
||||
name, parentSearchContext, context.getMapperService(), documentMapper
|
||||
name, parentSearchContext, queryShardContext.getMapperService(), documentMapper
|
||||
);
|
||||
setupInnerHitsContext(context, parentChildInnerHits);
|
||||
if (innerHitsBuilder != null) {
|
||||
buildChildInnerHits(parentSearchContext, context, parentChildInnerHits);
|
||||
setupInnerHitsContext(queryShardContext, parentChildInnerHits);
|
||||
if (childInnerHits != null) {
|
||||
buildChildInnerHits(parentSearchContext, parentChildInnerHits);
|
||||
}
|
||||
innerHitsContext.addInnerHitDefinition( parentChildInnerHits);
|
||||
return parentChildInnerHits;
|
||||
|
@ -421,12 +457,11 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
}
|
||||
}
|
||||
|
||||
private void buildChildInnerHits(SearchContext parentSearchContext, QueryShardContext context,
|
||||
InnerHitsContext.BaseInnerHits innerHits) throws IOException {
|
||||
private void buildChildInnerHits(SearchContext parentSearchContext, InnerHitsContext.BaseInnerHits innerHits) throws IOException {
|
||||
Map<String, InnerHitsContext.BaseInnerHits> childInnerHits = new HashMap<>();
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : innerHitsBuilder.getInnerHitsBuilders().entrySet()) {
|
||||
InnerHitsContext.BaseInnerHits childInnerHit = entry.getValue().buildTopLevel(
|
||||
parentSearchContext, context, new InnerHitsContext()
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : this.childInnerHits.entrySet()) {
|
||||
InnerHitsContext.BaseInnerHits childInnerHit = entry.getValue().build(
|
||||
parentSearchContext, new InnerHitsContext()
|
||||
);
|
||||
childInnerHits.put(entry.getKey(), childInnerHit);
|
||||
}
|
||||
|
@ -480,16 +515,23 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
innerHitsContext.parsedQuery(parsedQuery);
|
||||
}
|
||||
|
||||
public void inlineInnerHits(Map<String, InnerHitBuilder> innerHits) {
|
||||
InnerHitBuilder copy = new InnerHitBuilder(this);
|
||||
copy.parentChildType = this.parentChildType;
|
||||
copy.nestedPath = this.nestedPath;
|
||||
copy.query = this.query;
|
||||
innerHits.put(copy.getName(), copy);
|
||||
|
||||
Map<String, InnerHitBuilder> childInnerHits = new HashMap<>();
|
||||
extractInnerHits(query, childInnerHits);
|
||||
if (childInnerHits.size() > 0) {
|
||||
copy.setChildInnerHits(childInnerHits);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
||||
if (nestedPath != null) {
|
||||
builder.field(NESTED_PATH_FIELD.getPreferredName(), nestedPath);
|
||||
}
|
||||
if (parentChildType != null) {
|
||||
builder.field(PARENT_CHILD_TYPE_FIELD.getPreferredName(), parentChildType);
|
||||
}
|
||||
if (name != null) {
|
||||
builder.field(NAME_FIELD.getPreferredName(), name);
|
||||
}
|
||||
|
@ -536,9 +578,12 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
if (highlightBuilder != null) {
|
||||
builder.field(SearchSourceBuilder.HIGHLIGHT_FIELD.getPreferredName(), highlightBuilder, params);
|
||||
}
|
||||
builder.field(SearchSourceBuilder.QUERY_FIELD.getPreferredName(), query, params);
|
||||
if (innerHitsBuilder != null) {
|
||||
builder.field(SearchSourceBuilder.INNER_HITS_FIELD.getPreferredName(), innerHitsBuilder, params);
|
||||
if (childInnerHits != null) {
|
||||
builder.startObject(INNER_HITS_FIELD.getPreferredName());
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : childInnerHits.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue(), params);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
|
@ -565,17 +610,26 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
|
|||
Objects.equals(sorts, that.sorts) &&
|
||||
Objects.equals(highlightBuilder, that.highlightBuilder) &&
|
||||
Objects.equals(query, that.query) &&
|
||||
Objects.equals(innerHitsBuilder, that.innerHitsBuilder);
|
||||
Objects.equals(childInnerHits, that.childInnerHits);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(name, nestedPath, parentChildType, from, size, explain, version, trackScores, fieldNames,
|
||||
fieldDataFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, query, innerHitsBuilder);
|
||||
fieldDataFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, query, childInnerHits);
|
||||
}
|
||||
|
||||
public static InnerHitBuilder fromXContent(QueryParseContext context) throws IOException {
|
||||
return PARSER.parse(context.parser(), new InnerHitBuilder(), context);
|
||||
}
|
||||
|
||||
public static void extractInnerHits(QueryBuilder<?> query, Map<String, InnerHitBuilder> innerHitBuilders) {
|
||||
if (query instanceof AbstractQueryBuilder) {
|
||||
((AbstractQueryBuilder) query).extractInnerHitBuilders(innerHitBuilders);
|
||||
} else {
|
||||
throw new IllegalStateException("provided query builder [" + query.getClass() +
|
||||
"] class should inherit from AbstractQueryBuilder, but it doesn't");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -93,7 +93,7 @@ public class MatchNoneQueryBuilder extends AbstractQueryBuilder<MatchNoneQueryBu
|
|||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("User requested \"" + this.getName() + "\" query.");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,9 +32,9 @@ import org.elasticsearch.common.lucene.search.Queries;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder> {
|
||||
|
@ -109,9 +109,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
}
|
||||
|
||||
public NestedQueryBuilder innerHit(InnerHitBuilder innerHit) {
|
||||
innerHit.setNestedPath(path);
|
||||
innerHit.setQuery(query);
|
||||
this.innerHitBuilder = innerHit;
|
||||
this.innerHitBuilder = new InnerHitBuilder(innerHit, path, query);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -196,8 +194,14 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
}
|
||||
}
|
||||
}
|
||||
return new NestedQueryBuilder(path, query, scoreMode, innerHitBuilder).ignoreUnmapped(ignoreUnmapped).queryName(queryName)
|
||||
NestedQueryBuilder queryBuilder = new NestedQueryBuilder(path, query, scoreMode)
|
||||
.ignoreUnmapped(ignoreUnmapped)
|
||||
.queryName(queryName)
|
||||
.boost(boost);
|
||||
if (innerHitBuilder != null) {
|
||||
queryBuilder.innerHit(innerHitBuilder);
|
||||
}
|
||||
return queryBuilder;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -236,9 +240,6 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
final Query childFilter;
|
||||
final Query innerQuery;
|
||||
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
|
||||
if (innerHitBuilder != null) {
|
||||
context.addInnerHit(innerHitBuilder);
|
||||
}
|
||||
if (objectMapper == null) {
|
||||
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter());
|
||||
} else {
|
||||
|
@ -265,4 +266,11 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
|
|||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
if (innerHitBuilder != null) {
|
||||
innerHitBuilder.inlineInnerHits(innerHits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,12 +57,10 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
import org.elasticsearch.index.mapper.core.TextFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
import org.elasticsearch.index.percolator.PercolatorQueryCache;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.index.query.support.NestedScope;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
|
||||
|
@ -93,7 +91,7 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
private boolean allowUnmappedFields;
|
||||
private boolean mapUnmappedFieldAsString;
|
||||
private NestedScope nestedScope;
|
||||
boolean isFilter; // pkg private for testing
|
||||
private boolean isFilter;
|
||||
|
||||
public QueryShardContext(IndexSettings indexSettings, BitsetFilterCache bitsetFilterCache, IndexFieldDataService indexFieldDataService,
|
||||
MapperService mapperService, SimilarityService similarityService, ScriptService scriptService,
|
||||
|
@ -118,7 +116,7 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
this.types = source.getTypes();
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
private void reset() {
|
||||
allowUnmappedFields = indexSettings.isDefaultAllowUnmappedFields();
|
||||
this.lookup = null;
|
||||
this.namedQueries.clear();
|
||||
|
@ -185,14 +183,8 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
return isFilter;
|
||||
}
|
||||
|
||||
public void addInnerHit(InnerHitBuilder innerHitBuilder) throws IOException {
|
||||
SearchContext sc = SearchContext.current();
|
||||
if (sc == null) {
|
||||
throw new QueryShardException(this, "inner_hits unsupported");
|
||||
}
|
||||
|
||||
InnerHitsContext innerHitsContext = sc.innerHits();
|
||||
innerHitsContext.addInnerHitDefinition(innerHitBuilder.buildInline(sc, this));
|
||||
void setIsFilter(boolean isFilter) {
|
||||
this.isFilter = isFilter;
|
||||
}
|
||||
|
||||
public Collection<String> simpleMatchToIndexNames(String pattern) {
|
||||
|
@ -373,7 +365,7 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
private static Query toQuery(final QueryBuilder<?> queryBuilder, final QueryShardContext context) throws IOException {
|
||||
final Query query = QueryBuilder.rewriteQuery(queryBuilder, context).toQuery(context);
|
||||
if (query == null) {
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("No query left after rewrite.");
|
||||
}
|
||||
return query;
|
||||
}
|
||||
|
@ -381,5 +373,4 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
public final Index index() {
|
||||
return indexSettings.getIndex();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -317,7 +317,7 @@ public class TermsQueryBuilder extends AbstractQueryBuilder<TermsQueryBuilder> {
|
|||
throw new UnsupportedOperationException("query must be rewritten first");
|
||||
}
|
||||
if (values == null || values.isEmpty()) {
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
return Queries.newMatchNoDocsQuery("No terms supplied for \"" + getName() + "\" query.");
|
||||
}
|
||||
return handleTermsQuery(values, fieldName, context);
|
||||
}
|
||||
|
|
|
@ -42,12 +42,14 @@ import org.elasticsearch.index.query.QueryBuilder;
|
|||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryRewriteContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.InnerHitBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
|
@ -429,8 +431,15 @@ public class FunctionScoreQueryBuilder extends AbstractQueryBuilder<FunctionScor
|
|||
return this;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Override
|
||||
protected void extractInnerHitBuilders(Map<String, InnerHitBuilder> innerHits) {
|
||||
InnerHitBuilder.extractInnerHits(query(), innerHits);
|
||||
}
|
||||
|
||||
public static FunctionScoreQueryBuilder fromXContent(ParseFieldRegistry<ScoreFunctionParser<?>> scoreFunctionsRegistry,
|
||||
QueryParseContext parseContext) throws IOException {
|
||||
QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
QueryBuilder<?> query = null;
|
||||
|
|
|
@ -1,126 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.query.support;
|
||||
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public final class InnerHitsBuilder extends ToXContentToBytes implements Writeable {
|
||||
private final Map<String, InnerHitBuilder> innerHitsBuilders;
|
||||
|
||||
public InnerHitsBuilder() {
|
||||
this.innerHitsBuilders = new HashMap<>();
|
||||
}
|
||||
|
||||
public InnerHitsBuilder(Map<String, InnerHitBuilder> innerHitsBuilders) {
|
||||
this.innerHitsBuilders = Objects.requireNonNull(innerHitsBuilders);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public InnerHitsBuilder(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
innerHitsBuilders = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
innerHitsBuilders.put(in.readString(), new InnerHitBuilder(in));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(innerHitsBuilders.size());
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : innerHitsBuilders.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
entry.getValue().writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
public InnerHitsBuilder addInnerHit(String name, InnerHitBuilder builder) {
|
||||
Objects.requireNonNull(name);
|
||||
Objects.requireNonNull(builder);
|
||||
this.innerHitsBuilders.put(name, builder.setName(name));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Map<String, InnerHitBuilder> getInnerHitsBuilders() {
|
||||
return innerHitsBuilders;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : innerHitsBuilders.entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue(), params);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
InnerHitsBuilder that = (InnerHitsBuilder) o;
|
||||
return innerHitsBuilders.equals(that.innerHitsBuilders);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return innerHitsBuilders.hashCode();
|
||||
}
|
||||
|
||||
public static InnerHitsBuilder fromXContent(QueryParseContext context) throws IOException {
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
String innerHitName = null;
|
||||
XContentParser parser = context.parser();
|
||||
for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
|
||||
switch (token) {
|
||||
case START_OBJECT:
|
||||
InnerHitBuilder innerHitBuilder = InnerHitBuilder.fromXContent(context);
|
||||
innerHitBuilder.setName(innerHitName);
|
||||
innerHitBuilders.put(innerHitName, innerHitBuilder);
|
||||
break;
|
||||
case FIELD_NAME:
|
||||
innerHitName = parser.currentName();
|
||||
break;
|
||||
default:
|
||||
throw new ParsingException(parser.getTokenLocation(), "Expected [" + XContentParser.Token.START_OBJECT + "] in ["
|
||||
+ parser.currentName() + "] but found [" + token + "]", parser.getTokenLocation());
|
||||
}
|
||||
}
|
||||
return new InnerHitsBuilder(innerHitBuilders);
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -286,7 +286,11 @@ public class MatchQuery {
|
|||
}
|
||||
|
||||
protected Query zeroTermsQuery() {
|
||||
return zeroTermsQuery == DEFAULT_ZERO_TERMS_QUERY ? Queries.newMatchNoDocsQuery() : Queries.newMatchAllQuery();
|
||||
if (zeroTermsQuery == DEFAULT_ZERO_TERMS_QUERY) {
|
||||
return Queries.newMatchNoDocsQuery("Matching no documents because no terms present.");
|
||||
}
|
||||
|
||||
return Queries.newMatchAllQuery();
|
||||
}
|
||||
|
||||
private class MatchQueryBuilder extends QueryBuilder {
|
||||
|
|
|
@ -680,6 +680,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
CompletionStats completionStats = new CompletionStats();
|
||||
try (final Engine.Searcher currentSearcher = acquireSearcher("completion_stats")) {
|
||||
completionStats.add(CompletionFieldStats.completionStats(currentSearcher.reader(), fields));
|
||||
// Necessary for 2.x shards:
|
||||
Completion090PostingsFormat postingsFormat = ((Completion090PostingsFormat)
|
||||
PostingsFormat.forName(Completion090PostingsFormat.CODEC_NAME));
|
||||
completionStats.add(postingsFormat.completionStats(currentSearcher.reader(), fields));
|
||||
|
|
|
@ -681,8 +681,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
*/
|
||||
@Nullable
|
||||
public IndexMetaData verifyIndexIsDeleted(final Index index, final ClusterState clusterState) {
|
||||
// this method should only be called when we know the index is not part of the cluster state
|
||||
if (clusterState.metaData().hasIndex(index.getName())) {
|
||||
// this method should only be called when we know the index (name + uuid) is not part of the cluster state
|
||||
if (clusterState.metaData().index(index) != null) {
|
||||
throw new IllegalStateException("Cannot delete index [" + index + "], it is still part of the cluster state.");
|
||||
}
|
||||
if (nodeEnv.hasNodeFile() && FileSystemUtils.exists(nodeEnv.indexPaths(index))) {
|
||||
|
|
|
@ -83,6 +83,28 @@ public final class ConfigurationUtils {
|
|||
value.getClass().getName() + "]");
|
||||
}
|
||||
|
||||
public static Boolean readBooleanProperty(String processorType, String processorTag, Map<String, Object> configuration,
|
||||
String propertyName, boolean defaultValue) {
|
||||
Object value = configuration.remove(propertyName);
|
||||
if (value == null) {
|
||||
return defaultValue;
|
||||
} else {
|
||||
return readBoolean(processorType, processorTag, propertyName, value).booleanValue();
|
||||
}
|
||||
}
|
||||
|
||||
private static Boolean readBoolean(String processorType, String processorTag, String propertyName, Object value) {
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
if (value instanceof Boolean) {
|
||||
return (Boolean) value;
|
||||
}
|
||||
throw newConfigurationException(processorType, processorTag, propertyName, "property isn't a boolean, but of type [" +
|
||||
value.getClass().getName() + "]");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Returns and removes the specified property from the specified configuration map.
|
||||
*
|
||||
|
|
|
@ -51,6 +51,8 @@ import static java.nio.charset.StandardCharsets.UTF_8;
|
|||
public final class IngestDocument {
|
||||
|
||||
public final static String INGEST_KEY = "_ingest";
|
||||
private static final String INGEST_KEY_PREFIX = INGEST_KEY + ".";
|
||||
private static final String SOURCE_PREFIX = SourceFieldMapper.NAME + ".";
|
||||
|
||||
static final String TIMESTAMP = "timestamp";
|
||||
|
||||
|
@ -116,6 +118,18 @@ public final class IngestDocument {
|
|||
return cast(path, context, clazz);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value contained in the document with the provided templated path
|
||||
* @param pathTemplate The path within the document in dot-notation
|
||||
* @param clazz The expected class fo the field value
|
||||
* @return the value fro the provided path if existing, null otherwise
|
||||
* @throws IllegalArgumentException if the pathTemplate is null, empty, invalid, if the field doesn't exist,
|
||||
* or if the field that is found at the provided path is not of the expected type.
|
||||
*/
|
||||
public <T> T getFieldValue(TemplateService.Template pathTemplate, Class<T> clazz) {
|
||||
return getFieldValue(renderTemplate(pathTemplate), clazz);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value contained in the document for the provided path as a byte array.
|
||||
* If the path value is a string, a base64 decode operation will happen.
|
||||
|
@ -141,6 +155,16 @@ public final class IngestDocument {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether the document contains a value for the provided templated path
|
||||
* @param fieldPathTemplate the template for the path within the document in dot-notation
|
||||
* @return true if the document contains a value for the field, false otherwise
|
||||
* @throws IllegalArgumentException if the path is null, empty or invalid
|
||||
*/
|
||||
public boolean hasField(TemplateService.Template fieldPathTemplate) {
|
||||
return hasField(renderTemplate(fieldPathTemplate));
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether the document contains a value for the provided path
|
||||
* @param path The path within the document in dot-notation
|
||||
|
@ -578,6 +602,7 @@ public final class IngestDocument {
|
|||
}
|
||||
|
||||
private class FieldPath {
|
||||
|
||||
private final String[] pathElements;
|
||||
private final Object initialContext;
|
||||
|
||||
|
@ -586,13 +611,13 @@ public final class IngestDocument {
|
|||
throw new IllegalArgumentException("path cannot be null nor empty");
|
||||
}
|
||||
String newPath;
|
||||
if (path.startsWith(INGEST_KEY + ".")) {
|
||||
if (path.startsWith(INGEST_KEY_PREFIX)) {
|
||||
initialContext = ingestMetadata;
|
||||
newPath = path.substring(8, path.length());
|
||||
newPath = path.substring(INGEST_KEY_PREFIX.length(), path.length());
|
||||
} else {
|
||||
initialContext = sourceAndMetadata;
|
||||
if (path.startsWith(SourceFieldMapper.NAME + ".")) {
|
||||
newPath = path.substring(8, path.length());
|
||||
if (path.startsWith(SOURCE_PREFIX)) {
|
||||
newPath = path.substring(SOURCE_PREFIX.length(), path.length());
|
||||
} else {
|
||||
newPath = path;
|
||||
}
|
||||
|
@ -602,5 +627,6 @@ public final class IngestDocument {
|
|||
throw new IllegalArgumentException("path [" + path + "] is not valid");
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,156 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.processor;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessor;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||
import org.elasticsearch.ingest.core.ConfigurationUtils;
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.format.DateTimeFormat;
|
||||
import org.joda.time.format.DateTimeFormatter;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.IllformedLocaleException;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
|
||||
public final class DateIndexNameProcessor extends AbstractProcessor {
|
||||
|
||||
public static final String TYPE = "date_index_name";
|
||||
|
||||
private final String field;
|
||||
private final String indexNamePrefix;
|
||||
private final String dateRounding;
|
||||
private final String indexNameFormat;
|
||||
private final DateTimeZone timezone;
|
||||
private final List<Function<String, DateTime>> dateFormats;
|
||||
|
||||
DateIndexNameProcessor(String tag, String field, List<Function<String, DateTime>> dateFormats, DateTimeZone timezone,
|
||||
String indexNamePrefix, String dateRounding, String indexNameFormat) {
|
||||
super(tag);
|
||||
this.field = field;
|
||||
this.timezone = timezone;
|
||||
this.dateFormats = dateFormats;
|
||||
this.indexNamePrefix = indexNamePrefix;
|
||||
this.dateRounding = dateRounding;
|
||||
this.indexNameFormat = indexNameFormat;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void execute(IngestDocument ingestDocument) throws Exception {
|
||||
String date = ingestDocument.getFieldValue(field, String.class);
|
||||
|
||||
DateTime dateTime = null;
|
||||
Exception lastException = null;
|
||||
for (Function<String, DateTime> dateParser : dateFormats) {
|
||||
try {
|
||||
dateTime = dateParser.apply(date);
|
||||
} catch (Exception e) {
|
||||
//try the next parser and keep track of the exceptions
|
||||
lastException = ExceptionsHelper.useOrSuppress(lastException, e);
|
||||
}
|
||||
}
|
||||
|
||||
if (dateTime == null) {
|
||||
throw new IllegalArgumentException("unable to parse date [" + date + "]", lastException);
|
||||
}
|
||||
|
||||
DateTimeFormatter formatter = DateTimeFormat.forPattern(indexNameFormat);
|
||||
StringBuilder builder = new StringBuilder()
|
||||
.append('<')
|
||||
.append(indexNamePrefix)
|
||||
.append('{')
|
||||
.append(formatter.print(dateTime)).append("||/").append(dateRounding)
|
||||
.append('{').append(indexNameFormat).append('|').append(timezone).append('}')
|
||||
.append('}')
|
||||
.append('>');
|
||||
String dynamicIndexName = builder.toString();
|
||||
ingestDocument.setFieldValue(IngestDocument.MetaData.INDEX.getFieldName(), dynamicIndexName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
String getField() {
|
||||
return field;
|
||||
}
|
||||
|
||||
String getIndexNamePrefix() {
|
||||
return indexNamePrefix;
|
||||
}
|
||||
|
||||
String getDateRounding() {
|
||||
return dateRounding;
|
||||
}
|
||||
|
||||
String getIndexNameFormat() {
|
||||
return indexNameFormat;
|
||||
}
|
||||
|
||||
DateTimeZone getTimezone() {
|
||||
return timezone;
|
||||
}
|
||||
|
||||
List<Function<String, DateTime>> getDateFormats() {
|
||||
return dateFormats;
|
||||
}
|
||||
|
||||
public static final class Factory extends AbstractProcessorFactory<DateIndexNameProcessor> {
|
||||
|
||||
@Override
|
||||
protected DateIndexNameProcessor doCreate(String tag, Map<String, Object> config) throws Exception {
|
||||
String localeString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "locale");
|
||||
String timezoneString = ConfigurationUtils.readOptionalStringProperty(TYPE, tag, config, "timezone");
|
||||
DateTimeZone timezone = timezoneString == null ? DateTimeZone.UTC : DateTimeZone.forID(timezoneString);
|
||||
Locale locale = Locale.ENGLISH;
|
||||
if (localeString != null) {
|
||||
try {
|
||||
locale = (new Locale.Builder()).setLanguageTag(localeString).build();
|
||||
} catch (IllformedLocaleException e) {
|
||||
throw new IllegalArgumentException("Invalid language tag specified: " + localeString);
|
||||
}
|
||||
}
|
||||
List<String> dateFormatStrings = ConfigurationUtils.readOptionalList(TYPE, tag, config, "date_formats");
|
||||
if (dateFormatStrings == null) {
|
||||
dateFormatStrings = Collections.singletonList("yyyy-MM-dd'T'HH:mm:ss.SSSZ");
|
||||
}
|
||||
List<Function<String, DateTime>> dateFormats = new ArrayList<>(dateFormatStrings.size());
|
||||
for (String format : dateFormatStrings) {
|
||||
DateFormat dateFormat = DateFormat.fromString(format);
|
||||
dateFormats.add(dateFormat.getFunction(format, timezone, locale));
|
||||
}
|
||||
|
||||
String field = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field");
|
||||
String indexNamePrefix = ConfigurationUtils.readStringProperty(TYPE, tag, config, "index_name_prefix", "");
|
||||
String dateRounding = ConfigurationUtils.readStringProperty(TYPE, tag, config, "date_rounding");
|
||||
String indexNameFormat = ConfigurationUtils.readStringProperty(TYPE, tag, config, "index_name_format", "yyyy-MM-dd");
|
||||
return new DateIndexNameProcessor(tag, field, dateFormats, timezone, indexNamePrefix, dateRounding, indexNameFormat);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -36,15 +36,25 @@ public final class SetProcessor extends AbstractProcessor {
|
|||
|
||||
public static final String TYPE = "set";
|
||||
|
||||
private final boolean overrideEnabled;
|
||||
private final TemplateService.Template field;
|
||||
private final ValueSource value;
|
||||
|
||||
SetProcessor(String tag, TemplateService.Template field, ValueSource value) {
|
||||
SetProcessor(String tag, TemplateService.Template field, ValueSource value) {
|
||||
this(tag, field, value, true);
|
||||
}
|
||||
|
||||
SetProcessor(String tag, TemplateService.Template field, ValueSource value, boolean overrideEnabled) {
|
||||
super(tag);
|
||||
this.overrideEnabled = overrideEnabled;
|
||||
this.field = field;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public boolean isOverrideEnabled() {
|
||||
return overrideEnabled;
|
||||
}
|
||||
|
||||
public TemplateService.Template getField() {
|
||||
return field;
|
||||
}
|
||||
|
@ -55,7 +65,9 @@ public final class SetProcessor extends AbstractProcessor {
|
|||
|
||||
@Override
|
||||
public void execute(IngestDocument document) {
|
||||
document.setFieldValue(field, value);
|
||||
if (overrideEnabled || document.hasField(field) == false || document.getFieldValue(field, Object.class) == null) {
|
||||
document.setFieldValue(field, value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,7 +87,12 @@ public final class SetProcessor extends AbstractProcessor {
|
|||
public SetProcessor doCreate(String processorTag, Map<String, Object> config) throws Exception {
|
||||
String field = ConfigurationUtils.readStringProperty(TYPE, processorTag, config, "field");
|
||||
Object value = ConfigurationUtils.readObject(TYPE, processorTag, config, "value");
|
||||
return new SetProcessor(processorTag, templateService.compile(field), ValueSource.wrap(value, templateService));
|
||||
boolean overrideEnabled = ConfigurationUtils.readBooleanProperty(TYPE, processorTag, config, "override", true);
|
||||
return new SetProcessor(
|
||||
processorTag,
|
||||
templateService.compile(field),
|
||||
ValueSource.wrap(value, templateService),
|
||||
overrideEnabled);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.ingest.core.TemplateService;
|
|||
import org.elasticsearch.ingest.processor.AppendProcessor;
|
||||
import org.elasticsearch.ingest.processor.ConvertProcessor;
|
||||
import org.elasticsearch.ingest.processor.DateProcessor;
|
||||
import org.elasticsearch.ingest.processor.DateIndexNameProcessor;
|
||||
import org.elasticsearch.ingest.processor.FailProcessor;
|
||||
import org.elasticsearch.ingest.processor.ForEachProcessor;
|
||||
import org.elasticsearch.ingest.processor.GsubProcessor;
|
||||
|
@ -76,6 +77,7 @@ public class NodeModule extends AbstractModule {
|
|||
registerProcessor(GsubProcessor.TYPE, (templateService, registry) -> new GsubProcessor.Factory());
|
||||
registerProcessor(FailProcessor.TYPE, (templateService, registry) -> new FailProcessor.Factory(templateService));
|
||||
registerProcessor(ForEachProcessor.TYPE, (templateService, registry) -> new ForEachProcessor.Factory(registry));
|
||||
registerProcessor(DateIndexNameProcessor.TYPE, (templateService, registry) -> new DateIndexNameProcessor.Factory());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -303,15 +303,15 @@ public class RestTable {
|
|||
String resolution = request.param("bytes");
|
||||
if ("b".equals(resolution)) {
|
||||
return Long.toString(v.bytes());
|
||||
} else if ("k".equals(resolution)) {
|
||||
} else if ("k".equals(resolution) || "kb".equals(resolution)) {
|
||||
return Long.toString(v.kb());
|
||||
} else if ("m".equals(resolution)) {
|
||||
} else if ("m".equals(resolution) || "mb".equals(resolution)) {
|
||||
return Long.toString(v.mb());
|
||||
} else if ("g".equals(resolution)) {
|
||||
} else if ("g".equals(resolution) || "gb".equals(resolution)) {
|
||||
return Long.toString(v.gb());
|
||||
} else if ("t".equals(resolution)) {
|
||||
} else if ("t".equals(resolution) || "tb".equals(resolution)) {
|
||||
return Long.toString(v.tb());
|
||||
} else if ("p".equals(resolution)) {
|
||||
} else if ("p".equals(resolution) || "pb".equals(resolution)) {
|
||||
return Long.toString(v.pb());
|
||||
} else {
|
||||
return v.toString();
|
||||
|
@ -320,7 +320,7 @@ public class RestTable {
|
|||
if (value instanceof SizeValue) {
|
||||
SizeValue v = (SizeValue) value;
|
||||
String resolution = request.param("size");
|
||||
if ("b".equals(resolution)) {
|
||||
if ("".equals(resolution)) {
|
||||
return Long.toString(v.singles());
|
||||
} else if ("k".equals(resolution)) {
|
||||
return Long.toString(v.kilo());
|
||||
|
@ -339,7 +339,11 @@ public class RestTable {
|
|||
if (value instanceof TimeValue) {
|
||||
TimeValue v = (TimeValue) value;
|
||||
String resolution = request.param("time");
|
||||
if ("ms".equals(resolution)) {
|
||||
if ("nanos".equals(resolution)) {
|
||||
return Long.toString(v.nanos());
|
||||
} else if ("micros".equals(resolution)) {
|
||||
return Long.toString(v.micros());
|
||||
} else if ("ms".equals(resolution)) {
|
||||
return Long.toString(v.millis());
|
||||
} else if ("s".equals(resolution)) {
|
||||
return Long.toString(v.seconds());
|
||||
|
@ -347,6 +351,8 @@ public class RestTable {
|
|||
return Long.toString(v.minutes());
|
||||
} else if ("h".equals(resolution)) {
|
||||
return Long.toString(v.hours());
|
||||
} else if ("d".equals(resolution)) {
|
||||
return Long.toString(v.days());
|
||||
} else {
|
||||
return v.toString();
|
||||
}
|
||||
|
|
|
@ -82,12 +82,6 @@ public final class ScriptMetaData implements MetaData.Custom {
|
|||
parser.nextToken();
|
||||
switch (parser.currentName()) {
|
||||
case "script":
|
||||
if (parser.nextToken() == Token.VALUE_STRING) {
|
||||
return parser.text();
|
||||
} else {
|
||||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
break;
|
||||
case "template":
|
||||
if (parser.nextToken() == Token.VALUE_STRING) {
|
||||
return parser.text();
|
||||
|
|
|
@ -62,7 +62,7 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.index.query.InnerHitBuilder;
|
||||
import org.elasticsearch.index.search.stats.StatsGroupsParseElement;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -88,7 +88,6 @@ import org.elasticsearch.search.fetch.ShardFetchRequest;
|
|||
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext;
|
||||
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField;
|
||||
import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
|
||||
import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
|
@ -679,12 +678,24 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
context.queryBoost(indexBoost);
|
||||
}
|
||||
}
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
if (source.query() != null) {
|
||||
InnerHitBuilder.extractInnerHits(source.query(), innerHitBuilders);
|
||||
context.parsedQuery(queryShardContext.toQuery(source.query()));
|
||||
}
|
||||
if (source.postFilter() != null) {
|
||||
InnerHitBuilder.extractInnerHits(source.postFilter(), innerHitBuilders);
|
||||
context.parsedPostFilter(queryShardContext.toQuery(source.postFilter()));
|
||||
}
|
||||
if (innerHitBuilders.size() > 0) {
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : innerHitBuilders.entrySet()) {
|
||||
try {
|
||||
entry.getValue().build(context, context.innerHits());
|
||||
} catch (IOException e) {
|
||||
throw new SearchContextException(context, "failed to build inner_hits", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (source.sorts() != null) {
|
||||
try {
|
||||
Optional<Sort> optionalSort = SortBuilder.buildSort(source.sorts(), context.getQueryShardContext());
|
||||
|
@ -754,25 +765,6 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
|
||||
}
|
||||
}
|
||||
if (source.innerHits() != null) {
|
||||
for (Map.Entry<String, InnerHitBuilder> entry : source.innerHits().getInnerHitsBuilders().entrySet()) {
|
||||
try {
|
||||
// This is the same logic in QueryShardContext#toQuery() where we reset also twice.
|
||||
// Personally I think a reset at the end is sufficient, but I kept the logic consistent with this method.
|
||||
|
||||
// The reason we need to invoke reset at all here is because inner hits may modify the QueryShardContext#nestedScope,
|
||||
// so we need to reset at the end.
|
||||
queryShardContext.reset();
|
||||
InnerHitBuilder innerHitBuilder = entry.getValue();
|
||||
InnerHitsContext innerHitsContext = context.innerHits();
|
||||
innerHitBuilder.buildTopLevel(context, queryShardContext, innerHitsContext);
|
||||
} catch (IOException e) {
|
||||
throw new SearchContextException(context, "failed to create InnerHitsContext", e);
|
||||
} finally {
|
||||
queryShardContext.reset();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (source.scriptFields() != null) {
|
||||
for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) {
|
||||
SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH,
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.support.InnerHitsBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.aggregations.AggregatorBuilder;
|
||||
import org.elasticsearch.search.aggregations.AggregatorFactories;
|
||||
|
@ -93,7 +92,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
public static final ParseField INDICES_BOOST_FIELD = new ParseField("indices_boost");
|
||||
public static final ParseField AGGREGATIONS_FIELD = new ParseField("aggregations", "aggs");
|
||||
public static final ParseField HIGHLIGHT_FIELD = new ParseField("highlight");
|
||||
public static final ParseField INNER_HITS_FIELD = new ParseField("inner_hits");
|
||||
public static final ParseField SUGGEST_FIELD = new ParseField("suggest");
|
||||
public static final ParseField RESCORE_FIELD = new ParseField("rescore");
|
||||
public static final ParseField STATS_FIELD = new ParseField("stats");
|
||||
|
@ -156,8 +154,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
|
||||
private SuggestBuilder suggestBuilder;
|
||||
|
||||
private InnerHitsBuilder innerHitsBuilder;
|
||||
|
||||
private List<RescoreBuilder<?>> rescoreBuilders;
|
||||
|
||||
private ObjectFloatHashMap<String> indexBoost = null;
|
||||
|
@ -205,14 +201,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
boolean hasIndexBoost = in.readBoolean();
|
||||
if (hasIndexBoost) {
|
||||
int size = in.readVInt();
|
||||
indexBoost = new ObjectFloatHashMap<String>(size);
|
||||
indexBoost = new ObjectFloatHashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
indexBoost.put(in.readString(), in.readFloat());
|
||||
}
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
innerHitsBuilder = new InnerHitsBuilder(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
minScore = in.readFloat();
|
||||
}
|
||||
|
@ -303,11 +296,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
out.writeFloat(indexBoost.get(key.value));
|
||||
}
|
||||
}
|
||||
boolean hasInnerHitsBuilder = innerHitsBuilder != null;
|
||||
out.writeBoolean(hasInnerHitsBuilder);
|
||||
if (hasInnerHitsBuilder) {
|
||||
innerHitsBuilder.writeTo(out);
|
||||
}
|
||||
boolean hasMinScore = minScore != null;
|
||||
out.writeBoolean(hasMinScore);
|
||||
if (hasMinScore) {
|
||||
|
@ -653,15 +641,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
return highlightBuilder;
|
||||
}
|
||||
|
||||
public SearchSourceBuilder innerHits(InnerHitsBuilder innerHitsBuilder) {
|
||||
this.innerHitsBuilder = innerHitsBuilder;
|
||||
return this;
|
||||
}
|
||||
|
||||
public InnerHitsBuilder innerHits() {
|
||||
return innerHitsBuilder;
|
||||
}
|
||||
|
||||
public SearchSourceBuilder suggest(SuggestBuilder suggestBuilder) {
|
||||
this.suggestBuilder = suggestBuilder;
|
||||
return this;
|
||||
|
@ -957,7 +936,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
rewrittenBuilder.from = from;
|
||||
rewrittenBuilder.highlightBuilder = highlightBuilder;
|
||||
rewrittenBuilder.indexBoost = indexBoost;
|
||||
rewrittenBuilder.innerHitsBuilder = innerHitsBuilder;
|
||||
rewrittenBuilder.minScore = minScore;
|
||||
rewrittenBuilder.postQueryBuilder = postQueryBuilder;
|
||||
rewrittenBuilder.profile = profile;
|
||||
|
@ -1051,8 +1029,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
aggregations = aggParsers.parseAggregators(context);
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) {
|
||||
highlightBuilder = HighlightBuilder.fromXContent(context);
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) {
|
||||
innerHitsBuilder = InnerHitsBuilder.fromXContent(context);
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, SUGGEST_FIELD)) {
|
||||
suggestBuilder = SuggestBuilder.fromXContent(context, suggesters);
|
||||
} else if (context.getParseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
|
||||
|
@ -1235,10 +1211,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
builder.field(HIGHLIGHT_FIELD.getPreferredName(), highlightBuilder);
|
||||
}
|
||||
|
||||
if (innerHitsBuilder != null) {
|
||||
builder.field(INNER_HITS_FIELD.getPreferredName(), innerHitsBuilder, params);
|
||||
}
|
||||
|
||||
if (suggestBuilder != null) {
|
||||
builder.field(SUGGEST_FIELD.getPreferredName(), suggestBuilder);
|
||||
}
|
||||
|
@ -1379,7 +1351,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from,
|
||||
highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
|
||||
highlightBuilder, indexBoost, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
|
||||
size, sorts, searchAfterBuilder, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile);
|
||||
}
|
||||
|
||||
|
@ -1400,7 +1372,6 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
|
|||
&& Objects.equals(from, other.from)
|
||||
&& Objects.equals(highlightBuilder, other.highlightBuilder)
|
||||
&& Objects.equals(indexBoost, other.indexBoost)
|
||||
&& Objects.equals(innerHitsBuilder, other.innerHitsBuilder)
|
||||
&& Objects.equals(minScore, other.minScore)
|
||||
&& Objects.equals(postQueryBuilder, other.postQueryBuilder)
|
||||
&& Objects.equals(queryBuilder, other.queryBuilder)
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.snapshots;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
|
||||
|
@ -43,7 +44,7 @@ public class SnapshotUtils {
|
|||
* @return filtered out indices
|
||||
*/
|
||||
public static List<String> filterIndices(List<String> availableIndices, String[] selectedIndices, IndicesOptions indicesOptions) {
|
||||
if (selectedIndices == null || selectedIndices.length == 0) {
|
||||
if (IndexNameExpressionResolver.isAllIndices(Arrays.asList(selectedIndices))) {
|
||||
return availableIndices;
|
||||
}
|
||||
Set<String> result = null;
|
||||
|
|
|
@ -20,14 +20,21 @@
|
|||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
@ -68,6 +75,101 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
|||
assertThat(cae.getShard().getIndexName(), equalTo("test"));
|
||||
assertFalse(cae.isPrimary());
|
||||
assertFalse(cae.isAssigned());
|
||||
assertThat("expecting a remaining delay, got: " + cae.getRemainingDelayNanos(), cae.getRemainingDelayNanos(), greaterThan(0L));
|
||||
assertThat("expecting a remaining delay, got: " + cae.getRemainingDelayMillis(), cae.getRemainingDelayMillis(), greaterThan(0L));
|
||||
}
|
||||
|
||||
public void testUnassignedShards() throws Exception {
|
||||
logger.info("--> starting 3 nodes");
|
||||
String noAttrNode = internalCluster().startNode();
|
||||
String barAttrNode = internalCluster().startNode(Settings.builder().put("node.attr.bar", "baz"));
|
||||
String fooBarAttrNode = internalCluster().startNode(Settings.builder()
|
||||
.put("node.attr.foo", "bar")
|
||||
.put("node.attr.bar", "baz"));
|
||||
|
||||
// Wait for all 3 nodes to be up
|
||||
logger.info("--> waiting for 3 nodes to be up");
|
||||
client().admin().cluster().health(Requests.clusterHealthRequest().waitForNodes("3")).actionGet();
|
||||
|
||||
client().admin().indices().prepareCreate("anywhere")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 5)
|
||||
.put("index.number_of_replicas", 1))
|
||||
.get();
|
||||
|
||||
client().admin().indices().prepareCreate("only-baz")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.include.bar", "baz")
|
||||
.put("index.number_of_shards", 5)
|
||||
.put("index.number_of_replicas", 1))
|
||||
.get();
|
||||
|
||||
client().admin().indices().prepareCreate("only-foo")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.routing.allocation.include.foo", "bar")
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 1))
|
||||
.get();
|
||||
|
||||
ensureGreen("anywhere", "only-baz");
|
||||
ensureYellow("only-foo");
|
||||
|
||||
ClusterAllocationExplainResponse resp = client().admin().cluster().prepareAllocationExplain()
|
||||
.setIndex("only-foo")
|
||||
.setShard(0)
|
||||
.setPrimary(false)
|
||||
.get();
|
||||
ClusterAllocationExplanation cae = resp.getExplanation();
|
||||
assertThat(cae.getShard().getIndexName(), equalTo("only-foo"));
|
||||
assertFalse(cae.isPrimary());
|
||||
assertFalse(cae.isAssigned());
|
||||
assertThat(UnassignedInfo.Reason.INDEX_CREATED, equalTo(cae.getUnassignedInfo().getReason()));
|
||||
assertThat("expecting no remaining delay: " + cae.getRemainingDelayMillis(), cae.getRemainingDelayMillis(), equalTo(0L));
|
||||
|
||||
Map<DiscoveryNode, NodeExplanation> explanations = cae.getNodeExplanations();
|
||||
|
||||
Float noAttrWeight = -1f;
|
||||
Float barAttrWeight = -1f;
|
||||
Float fooBarAttrWeight = -1f;
|
||||
for (Map.Entry<DiscoveryNode, NodeExplanation> entry : explanations.entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
String nodeName = node.getName();
|
||||
NodeExplanation explanation = entry.getValue();
|
||||
ClusterAllocationExplanation.FinalDecision finalDecision = explanation.getFinalDecision();
|
||||
String finalExplanation = explanation.getFinalExplanation();
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy();
|
||||
Decision d = explanation.getDecision();
|
||||
float weight = explanation.getWeight();
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = explanation.getStoreStatus();
|
||||
|
||||
assertEquals(d.type(), Decision.Type.NO);
|
||||
if (noAttrNode.equals(nodeName)) {
|
||||
assertThat(d.toString(), containsString("node does not match index include filters [foo:\"bar\"]"));
|
||||
noAttrWeight = weight;
|
||||
assertNull(storeStatus);
|
||||
assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
explanation.getFinalExplanation());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision);
|
||||
} else if (barAttrNode.equals(nodeName)) {
|
||||
assertThat(d.toString(), containsString("node does not match index include filters [foo:\"bar\"]"));
|
||||
barAttrWeight = weight;
|
||||
assertNull(storeStatus);
|
||||
assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
explanation.getFinalExplanation());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision);
|
||||
} else if (fooBarAttrNode.equals(nodeName)) {
|
||||
assertThat(d.toString(), containsString("the shard cannot be allocated on the same node id"));
|
||||
fooBarAttrWeight = weight;
|
||||
assertEquals(storeStatus.getAllocationStatus(),
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY);
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.NO, finalDecision);
|
||||
assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy);
|
||||
assertEquals("the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
explanation.getFinalExplanation());
|
||||
} else {
|
||||
fail("unexpected node with name: " + nodeName +
|
||||
", I have: " + noAttrNode + ", " + barAttrNode + ", " + fooBarAttrNode);
|
||||
}
|
||||
}
|
||||
assertFalse(barAttrWeight == fooBarAttrWeight);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,16 +43,22 @@ public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase {
|
|||
assertEquals(false, cae.isPrimary());
|
||||
assertNull(cae.getAssignedNodeId());
|
||||
assertNotNull(cae.getUnassignedInfo());
|
||||
Decision d = cae.getNodeDecisions().values().iterator().next();
|
||||
NodeExplanation explanation = cae.getNodeExplanations().values().iterator().next();
|
||||
ClusterAllocationExplanation.FinalDecision fd = explanation.getFinalDecision();
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy = explanation.getStoreCopy();
|
||||
String finalExplanation = explanation.getFinalExplanation();
|
||||
Decision d = explanation.getDecision();
|
||||
assertNotNull("should have a decision", d);
|
||||
assertEquals(Decision.Type.NO, d.type());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.NO, fd);
|
||||
assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy);
|
||||
assertTrue(d.toString(), d.toString().contains("NO(the shard cannot be allocated on the same node id"));
|
||||
assertTrue(d instanceof Decision.Multi);
|
||||
Decision.Multi md = (Decision.Multi) d;
|
||||
Decision ssd = md.getDecisions().get(0);
|
||||
assertEquals(Decision.Type.NO, ssd.type());
|
||||
assertTrue(ssd.toString(), ssd.toString().contains("NO(the shard cannot be allocated on the same node id"));
|
||||
Float weight = cae.getNodeWeights().values().iterator().next();
|
||||
Float weight = explanation.getWeight();
|
||||
assertNotNull("should have a weight", weight);
|
||||
|
||||
resp = client().admin().cluster().prepareAllocationExplain().setIndex("test").setShard(0).setPrimary(true).get();
|
||||
|
@ -64,16 +70,22 @@ public final class ClusterAllocationExplainTests extends ESSingleNodeTestCase {
|
|||
assertEquals(true, cae.isPrimary());
|
||||
assertNotNull("shard should have assigned node id", cae.getAssignedNodeId());
|
||||
assertNull("assigned shard should not have unassigned info", cae.getUnassignedInfo());
|
||||
d = cae.getNodeDecisions().values().iterator().next();
|
||||
explanation = cae.getNodeExplanations().values().iterator().next();
|
||||
d = explanation.getDecision();
|
||||
fd = explanation.getFinalDecision();
|
||||
storeCopy = explanation.getStoreCopy();
|
||||
finalExplanation = explanation.getFinalExplanation();
|
||||
assertNotNull("should have a decision", d);
|
||||
assertEquals(Decision.Type.NO, d.type());
|
||||
assertEquals(ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, fd);
|
||||
assertEquals(ClusterAllocationExplanation.StoreCopy.AVAILABLE, storeCopy);
|
||||
assertTrue(d.toString(), d.toString().contains("NO(the shard cannot be allocated on the same node id"));
|
||||
assertTrue(d instanceof Decision.Multi);
|
||||
md = (Decision.Multi) d;
|
||||
ssd = md.getDecisions().get(0);
|
||||
assertEquals(Decision.Type.NO, ssd.type());
|
||||
assertTrue(ssd.toString(), ssd.toString().contains("NO(the shard cannot be allocated on the same node id"));
|
||||
weight = cae.getNodeWeights().values().iterator().next();
|
||||
weight = explanation.getWeight();
|
||||
assertNotNull("should have a weight", weight);
|
||||
|
||||
resp = client().admin().cluster().prepareAllocationExplain().useAnyUnassignedShard().get();
|
||||
|
|
|
@ -19,17 +19,36 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
|
@ -39,6 +58,131 @@ import static java.util.Collections.emptySet;
|
|||
*/
|
||||
public final class ClusterAllocationExplanationTests extends ESTestCase {
|
||||
|
||||
private Index i = new Index("foo", "uuid");
|
||||
private ShardRouting primaryShard = ShardRouting.newUnassigned(i, 0, null, true,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
private ShardRouting replicaShard = ShardRouting.newUnassigned(i, 0, null, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
private IndexMetaData indexMetaData = IndexMetaData.builder("foo")
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "uuid"))
|
||||
.putActiveAllocationIds(0, new HashSet<String>(Arrays.asList("aid1", "aid2")))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(1)
|
||||
.build();
|
||||
private DiscoveryNode node = new DiscoveryNode("node-0", DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT);
|
||||
private static Decision.Multi yesDecision = new Decision.Multi();
|
||||
private static Decision.Multi noDecision = new Decision.Multi();
|
||||
|
||||
static {
|
||||
yesDecision.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
|
||||
noDecision.add(Decision.single(Decision.Type.NO, "no label", "no thanks"));
|
||||
}
|
||||
|
||||
|
||||
private NodeExplanation makeNodeExplanation(boolean primary, boolean isAssigned, boolean hasErr, boolean hasActiveId) {
|
||||
Float nodeWeight = randomFloat();
|
||||
Exception e = hasErr ? new ElasticsearchException("stuff's broke, yo") : null;
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, e);
|
||||
String assignedNodeId;
|
||||
if (isAssigned) {
|
||||
assignedNodeId = "node-0";
|
||||
} else {
|
||||
assignedNodeId = "node-9";
|
||||
}
|
||||
Set<String> activeAllocationIds = new HashSet<>();
|
||||
if (hasActiveId) {
|
||||
activeAllocationIds.add("eggplant");
|
||||
}
|
||||
|
||||
return TransportClusterAllocationExplainAction.calculateNodeExplanation(primary ? primaryShard : replicaShard,
|
||||
indexMetaData, node, noDecision, nodeWeight, storeStatus, assignedNodeId, activeAllocationIds);
|
||||
}
|
||||
|
||||
private void assertExplanations(NodeExplanation ne, String finalExplanation, ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
assertEquals(finalExplanation, ne.getFinalExplanation());
|
||||
assertEquals(finalDecision, ne.getFinalDecision());
|
||||
assertEquals(storeCopy, ne.getStoreCopy());
|
||||
}
|
||||
|
||||
public void testDecisionAndExplanation() {
|
||||
Exception e = new IOException("stuff's broke, yo");
|
||||
Exception corruptE = new CorruptIndexException("stuff's corrupt, yo", "");
|
||||
Float nodeWeight = randomFloat();
|
||||
Set<String> activeAllocationIds = new HashSet<>();
|
||||
activeAllocationIds.add("eggplant");
|
||||
ShardRouting primaryStartedShard = ShardRouting.newUnassigned(i, 0, null, true,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "foo"));
|
||||
assertTrue(primaryStartedShard.allocatedPostIndexCreate(indexMetaData));
|
||||
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, e);
|
||||
NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
|
||||
yesDecision, nodeWeight, storeStatus, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the copy of the shard cannot be read",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.IO_ERROR);
|
||||
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
null, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the shard can be assigned",
|
||||
ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.NONE);
|
||||
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
|
||||
nodeWeight, null, "", activeAllocationIds);
|
||||
assertExplanations(ne, "there is no copy of the shard available",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE);
|
||||
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, noDecision, nodeWeight,
|
||||
null, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.NONE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, noDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, corruptE);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the copy of the shard is corrupt",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.CORRUPT);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the shard can be assigned",
|
||||
ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.STALE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "banana",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryStartedShard, indexMetaData, node, yesDecision,
|
||||
nodeWeight, storeStatus, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the copy of the shard is stale, allocation ids do not match",
|
||||
ClusterAllocationExplanation.FinalDecision.NO, ClusterAllocationExplanation.StoreCopy.STALE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "node-0", activeAllocationIds);
|
||||
assertExplanations(ne, "the shard is already assigned to this node",
|
||||
ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
|
||||
storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node, yesDecision, nodeWeight,
|
||||
storeStatus, "", activeAllocationIds);
|
||||
assertExplanations(ne, "the shard can be assigned and the node contains a valid copy of the shard data",
|
||||
ClusterAllocationExplanation.FinalDecision.YES, ClusterAllocationExplanation.StoreCopy.AVAILABLE);
|
||||
}
|
||||
|
||||
public void testDecisionEquality() {
|
||||
Decision.Multi d = new Decision.Multi();
|
||||
Decision.Multi d2 = new Decision.Multi();
|
||||
|
@ -53,21 +197,19 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
|
|||
|
||||
public void testExplanationSerialization() throws Exception {
|
||||
ShardId shard = new ShardId("test", "uuid", 0);
|
||||
Map<DiscoveryNode, Decision> nodeToDecisions = new HashMap<>();
|
||||
Map<DiscoveryNode, Float> nodeToWeight = new HashMap<>();
|
||||
for (int i = randomIntBetween(2, 5); i > 0; i--) {
|
||||
DiscoveryNode dn = new DiscoveryNode("node-" + i, DummyTransportAddress.INSTANCE, emptyMap(), emptySet(), Version.CURRENT);
|
||||
Decision.Multi d = new Decision.Multi();
|
||||
d.add(Decision.single(Decision.Type.NO, "no label", "because I said no"));
|
||||
d.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
|
||||
d.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec"));
|
||||
nodeToDecisions.put(dn, d);
|
||||
nodeToWeight.put(dn, randomFloat());
|
||||
}
|
||||
|
||||
long remainingDelay = randomIntBetween(0, 500);
|
||||
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true, "assignedNode", null,
|
||||
nodeToDecisions, nodeToWeight, remainingDelay);
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations = new HashMap<>(1);
|
||||
Float nodeWeight = randomFloat();
|
||||
Set<String> activeAllocationIds = new HashSet<>();
|
||||
activeAllocationIds.add("eggplant");
|
||||
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null);
|
||||
NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
|
||||
yesDecision, nodeWeight, storeStatus, "", activeAllocationIds);
|
||||
nodeExplanations.put(ne.getNode(), ne);
|
||||
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shard, true,
|
||||
"assignedNode", remainingDelay, null, nodeExplanations);
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
cae.writeTo(out);
|
||||
StreamInput in = StreamInput.wrap(out.bytes());
|
||||
|
@ -77,10 +219,45 @@ public final class ClusterAllocationExplanationTests extends ESTestCase {
|
|||
assertTrue(cae2.isAssigned());
|
||||
assertEquals("assignedNode", cae2.getAssignedNodeId());
|
||||
assertNull(cae2.getUnassignedInfo());
|
||||
for (Map.Entry<DiscoveryNode, Decision> entry : cae2.getNodeDecisions().entrySet()) {
|
||||
assertEquals(nodeToDecisions.get(entry.getKey()), entry.getValue());
|
||||
assertEquals(remainingDelay, cae2.getRemainingDelayMillis());
|
||||
for (Map.Entry<DiscoveryNode, NodeExplanation> entry : cae2.getNodeExplanations().entrySet()) {
|
||||
DiscoveryNode node = entry.getKey();
|
||||
NodeExplanation explanation = entry.getValue();
|
||||
IndicesShardStoresResponse.StoreStatus status = explanation.getStoreStatus();
|
||||
assertNotNull(explanation.getStoreStatus());
|
||||
assertNotNull(explanation.getDecision());
|
||||
assertEquals(nodeWeight, explanation.getWeight());
|
||||
}
|
||||
assertEquals(nodeToWeight, cae2.getNodeWeights());
|
||||
assertEquals(remainingDelay, cae2.getRemainingDelayNanos());
|
||||
}
|
||||
|
||||
public void testExplanationToXContent() throws Exception {
|
||||
ShardId shardId = new ShardId("foo", "uuid", 0);
|
||||
long remainingDelay = 42;
|
||||
Decision.Multi d = new Decision.Multi();
|
||||
d.add(Decision.single(Decision.Type.NO, "no label", "because I said no"));
|
||||
d.add(Decision.single(Decision.Type.YES, "yes label", "yes please"));
|
||||
d.add(Decision.single(Decision.Type.THROTTLE, "throttle label", "wait a sec"));
|
||||
Float nodeWeight = 1.5f;
|
||||
Set<String> allocationIds = new HashSet<>();
|
||||
allocationIds.add("bar");
|
||||
IndicesShardStoresResponse.StoreStatus storeStatus = new IndicesShardStoresResponse.StoreStatus(node, 42, "eggplant",
|
||||
IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, new ElasticsearchException("stuff's broke, yo"));
|
||||
NodeExplanation ne = TransportClusterAllocationExplainAction.calculateNodeExplanation(primaryShard, indexMetaData, node,
|
||||
d, nodeWeight, storeStatus, "node-0", allocationIds);
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations = new HashMap<>(1);
|
||||
nodeExplanations.put(ne.getNode(), ne);
|
||||
ClusterAllocationExplanation cae = new ClusterAllocationExplanation(shardId, true,
|
||||
"assignedNode", remainingDelay, null, nodeExplanations);
|
||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||
cae.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
assertEquals("{\"shard\":{\"index\":\"foo\",\"index_uuid\":\"uuid\",\"id\":0,\"primary\":true},\"assigned\":true," +
|
||||
"\"assigned_node_id\":\"assignedNode\",\"nodes\":{\"node-0\":{\"node_name\":\"\",\"node_attribute" +
|
||||
"s\":{},\"store\":{\"shard_copy\":\"IO_ERROR\",\"store_exception\":\"ElasticsearchException[stuff" +
|
||||
"'s broke, yo]\"},\"final_decision\":\"ALREADY_ASSIGNED\",\"final_explanation\":\"the shard is al" +
|
||||
"ready assigned to this node\",\"weight\":1.5,\"decisions\":[{\"decider\":\"no label\",\"decision" +
|
||||
"\":\"NO\",\"explanation\":\"because I said no\"},{\"decider\":\"yes label\",\"decision\":\"YES\"" +
|
||||
",\"explanation\":\"yes please\"},{\"decider\":\"throttle label\",\"decision\":\"THROTTLE\",\"exp" +
|
||||
"lanation\":\"wait a sec\"}]}}}",
|
||||
builder.string());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
@ -38,6 +39,8 @@ import static org.hamcrest.CoreMatchers.instanceOf;
|
|||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.reset;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class BootstrapCheckTests extends ESTestCase {
|
||||
|
@ -113,6 +116,11 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
public String errorMessage() {
|
||||
return "first";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
},
|
||||
new BootstrapCheck.Check() {
|
||||
@Override
|
||||
|
@ -124,11 +132,16 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
public String errorMessage() {
|
||||
return "second";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
final RuntimeException e =
|
||||
expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, checks, "testExceptionAggregation"));
|
||||
expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, false, checks, "testExceptionAggregation"));
|
||||
assertThat(e, hasToString(allOf(containsString("bootstrap checks failed"), containsString("first"), containsString("second"))));
|
||||
final Throwable[] suppressed = e.getSuppressed();
|
||||
assertThat(suppressed.length, equalTo(2));
|
||||
|
@ -159,7 +172,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
final RuntimeException e =
|
||||
expectThrows(
|
||||
RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck"));
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck"));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("initial heap size [" + initialHeapSize.get() + "] " +
|
||||
|
@ -167,7 +180,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
initialHeapSize.set(maxHeapSize.get());
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
|
||||
// nothing should happen if the initial heap size or the max
|
||||
// heap size is not available
|
||||
|
@ -176,7 +189,7 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
} else {
|
||||
maxHeapSize.set(0);
|
||||
}
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testHeapSizeCheck");
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimits() {
|
||||
|
@ -202,17 +215,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final RuntimeException e =
|
||||
expectThrows(RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits"));
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits"));
|
||||
assertThat(e.getMessage(), containsString("max file descriptors"));
|
||||
|
||||
maxFileDescriptorCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
|
||||
// nothing should happen if current file descriptor count is
|
||||
// not available
|
||||
maxFileDescriptorCount.set(-1);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimits");
|
||||
}
|
||||
|
||||
public void testFileDescriptorLimitsThrowsOnInvalidLimit() {
|
||||
|
@ -255,13 +268,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
if (testCase.shouldFail) {
|
||||
final RuntimeException e = expectThrows(
|
||||
RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit"));
|
||||
() -> BootstrapCheck.check(
|
||||
true,
|
||||
false,
|
||||
Collections.singletonList(check),
|
||||
"testFileDescriptorLimitsThrowsOnInvalidLimit"));
|
||||
assertThat(
|
||||
e.getMessage(),
|
||||
containsString("memory locking requested for elasticsearch process but memory is not locked"));
|
||||
} else {
|
||||
// nothing should happen
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testFileDescriptorLimitsThrowsOnInvalidLimit");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -278,17 +295,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final RuntimeException e = expectThrows(
|
||||
RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck"));
|
||||
assertThat(e.getMessage(), containsString("max number of threads"));
|
||||
|
||||
maxNumberOfThreads.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
|
||||
// nothing should happen if current max number of threads is
|
||||
// not available
|
||||
maxNumberOfThreads.set(-1);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxNumberOfThreadsCheck");
|
||||
}
|
||||
|
||||
public void testMaxSizeVirtualMemory() {
|
||||
|
@ -309,17 +326,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
final RuntimeException e = expectThrows(
|
||||
RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory"));
|
||||
assertThat(e.getMessage(), containsString("max size virtual memory"));
|
||||
|
||||
maxSizeVirtualMemory.set(rlimInfinity);
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
|
||||
// nothing should happen if max size virtual memory is not
|
||||
// available
|
||||
maxSizeVirtualMemory.set(Long.MIN_VALUE);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxSizeVirtualMemory");
|
||||
}
|
||||
|
||||
public void testMaxMapCountCheck() {
|
||||
|
@ -334,17 +351,17 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
|
||||
RuntimeException e = expectThrows(
|
||||
RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck"));
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck"));
|
||||
assertThat(e.getMessage(), containsString("max virtual memory areas vm.max_map_count"));
|
||||
|
||||
maxMapCount.set(randomIntBetween(limit + 1, Integer.MAX_VALUE));
|
||||
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
|
||||
// nothing should happen if current vm.max_map_count is not
|
||||
// available
|
||||
maxMapCount.set(-1);
|
||||
BootstrapCheck.check(true, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
BootstrapCheck.check(true, false, Collections.singletonList(check), "testMaxMapCountCheck");
|
||||
}
|
||||
|
||||
public void testMinMasterNodes() {
|
||||
|
@ -353,7 +370,42 @@ public class BootstrapCheckTests extends ESTestCase {
|
|||
assertThat(check.check(), not(equalTo(isSet)));
|
||||
List<BootstrapCheck.Check> defaultChecks = BootstrapCheck.checks(Settings.EMPTY);
|
||||
|
||||
expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, defaultChecks, "testMinMasterNodes"));
|
||||
expectThrows(RuntimeException.class, () -> BootstrapCheck.check(true, false, defaultChecks, "testMinMasterNodes"));
|
||||
}
|
||||
|
||||
public void testIgnoringSystemChecks() {
|
||||
BootstrapCheck.Check check = new BootstrapCheck.Check() {
|
||||
@Override
|
||||
public boolean check() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String errorMessage() {
|
||||
return "error";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSystemCheck() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
final RuntimeException notIgnored = expectThrows(
|
||||
RuntimeException.class,
|
||||
() -> BootstrapCheck.check(true, false, Collections.singletonList(check), "testIgnoringSystemChecks"));
|
||||
assertThat(notIgnored, hasToString(containsString("error")));
|
||||
|
||||
final ESLogger logger = mock(ESLogger.class);
|
||||
|
||||
// nothing should happen if we ignore system checks
|
||||
BootstrapCheck.check(true, true, Collections.singletonList(check), logger);
|
||||
verify(logger).warn("error");
|
||||
reset(logger);
|
||||
|
||||
// nothing should happen if we ignore all checks
|
||||
BootstrapCheck.check(false, randomBoolean(), Collections.singletonList(check), logger);
|
||||
verify(logger).warn("error");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -294,11 +294,13 @@ public class SettingTests extends ESTestCase {
|
|||
Setting<List<String>> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(),
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
List<String> value = listSetting.get(Settings.EMPTY);
|
||||
assertFalse(listSetting.exists(Settings.EMPTY));
|
||||
assertEquals(1, value.size());
|
||||
assertEquals("foo,bar", value.get(0));
|
||||
|
||||
List<String> input = Arrays.asList("test", "test1, test2", "test", ",,,,");
|
||||
Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0]));
|
||||
assertTrue(listSetting.exists(builder.build()));
|
||||
value = listSetting.get(builder.build());
|
||||
assertEquals(input.size(), value.size());
|
||||
assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0]));
|
||||
|
@ -311,6 +313,7 @@ public class SettingTests extends ESTestCase {
|
|||
value = listSetting.get(builder.build());
|
||||
assertEquals(input.size(), value.size());
|
||||
assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0]));
|
||||
assertTrue(listSetting.exists(builder.build()));
|
||||
|
||||
AtomicReference<List<String>> ref = new AtomicReference<>();
|
||||
AbstractScopedSettings.SettingUpdater<List<String>> settingUpdater = listSetting.newUpdater(ref::set, logger);
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpException;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpResponseInterceptor;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.rest.client.http.HttpResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1, numClientNodes = 1)
|
||||
public class NettyHttpCompressionIT extends ESIntegTestCase {
|
||||
private static final String GZIP_ENCODING = "gzip";
|
||||
|
||||
private static final String SAMPLE_DOCUMENT = "{\n" +
|
||||
" \"name\": {\n" +
|
||||
" \"first name\": \"Steve\",\n" +
|
||||
" \"last name\": \"Jobs\"\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.put(HttpTransportSettings.SETTING_HTTP_COMPRESSION.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testCompressesResponseIfRequested() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
// we need to intercept early, otherwise internal logic in HttpClient will just remove the header and we cannot verify it
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
CloseableHttpClient internalClient = HttpClients.custom().addInterceptorFirst(headerExtractor).build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient).path("/").addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING).execute();
|
||||
assertEquals(200, response.getStatusCode());
|
||||
assertTrue(headerExtractor.hasContentEncodingHeader());
|
||||
assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue());
|
||||
}
|
||||
|
||||
public void testUncompressedResponseByDefault() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
CloseableHttpClient internalClient = HttpClients
|
||||
.custom()
|
||||
.disableContentCompression()
|
||||
.addInterceptorFirst(headerExtractor)
|
||||
.build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient).path("/").execute();
|
||||
assertEquals(200, response.getStatusCode());
|
||||
assertFalse(headerExtractor.hasContentEncodingHeader());
|
||||
}
|
||||
|
||||
public void testCanInterpretUncompressedRequest() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
CloseableHttpClient internalClient = HttpClients
|
||||
.custom()
|
||||
// this disable content compression in both directions (request and response)
|
||||
.disableContentCompression()
|
||||
.addInterceptorFirst(headerExtractor)
|
||||
.build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient)
|
||||
.path("/company/employees/1")
|
||||
.method("POST")
|
||||
.body(SAMPLE_DOCUMENT)
|
||||
.execute();
|
||||
|
||||
assertEquals(201, response.getStatusCode());
|
||||
assertFalse(headerExtractor.hasContentEncodingHeader());
|
||||
}
|
||||
|
||||
public void testCanInterpretCompressedRequest() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
// we don't call #disableContentCompression() hence the client will send the content compressed
|
||||
CloseableHttpClient internalClient = HttpClients.custom().addInterceptorFirst(headerExtractor).build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient)
|
||||
.path("/company/employees/2")
|
||||
.method("POST")
|
||||
.body(SAMPLE_DOCUMENT)
|
||||
.execute();
|
||||
|
||||
assertEquals(201, response.getStatusCode());
|
||||
assertTrue(headerExtractor.hasContentEncodingHeader());
|
||||
assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue());
|
||||
}
|
||||
|
||||
private static class ContentEncodingHeaderExtractor implements HttpResponseInterceptor {
|
||||
private Header contentEncodingHeader;
|
||||
|
||||
@Override
|
||||
public void process(org.apache.http.HttpResponse response, HttpContext context) throws HttpException, IOException {
|
||||
final Header[] headers = response.getHeaders(HttpHeaders.CONTENT_ENCODING);
|
||||
if (headers.length == 1) {
|
||||
this.contentEncodingHeader = headers[0];
|
||||
} else if (headers.length > 1) {
|
||||
throw new AssertionError("Expected none or one content encoding header but got " + headers.length + " headers.");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasContentEncodingHeader() {
|
||||
return contentEncodingHeader != null;
|
||||
}
|
||||
|
||||
public Header getContentEncodingHeader() {
|
||||
return contentEncodingHeader;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -110,6 +110,15 @@ public class IndexSettingsTests extends ESTestCase {
|
|||
assertTrue(ex.getMessage(), ex.getMessage().startsWith("version mismatch on settings update expected: "));
|
||||
}
|
||||
|
||||
// use version number that is unknown
|
||||
metaData = newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.fromId(999999))
|
||||
.build());
|
||||
settings = new IndexSettings(metaData, Settings.EMPTY);
|
||||
assertEquals(Version.fromId(999999), settings.getIndexVersionCreated());
|
||||
assertEquals("_na_", settings.getUUID());
|
||||
settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED,
|
||||
Version.fromId(999999)).put("index.test.setting.int", 42).build()));
|
||||
|
||||
metaData = newIndexMeta("index", Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "0xdeadbeef").build());
|
||||
settings = new IndexSettings(metaData, Settings.EMPTY);
|
||||
|
|
|
@ -103,6 +103,22 @@ public class StringMappingUpgradeTests extends ESSingleNodeTestCase {
|
|||
assertEquals(IndexOptions.NONE, field.fieldType().indexOptions());
|
||||
}
|
||||
|
||||
public void testIllegalIndexValue() throws IOException {
|
||||
IndexService indexService = createIndex("test");
|
||||
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("properties")
|
||||
.startObject("field")
|
||||
.field("type", "string")
|
||||
.field("index", false)
|
||||
.endObject()
|
||||
.endObject() .endObject().endObject().string();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> parser.parse("type", new CompressedXContent(mapping)));
|
||||
assertThat(e.getMessage(),
|
||||
containsString("Can't parse [index] value [false] for field [field], expected [no], [not_analyzed] or [analyzed]"));
|
||||
}
|
||||
|
||||
public void testNotSupportedUpgrade() throws IOException {
|
||||
IndexService indexService = createIndex("test");
|
||||
DocumentMapperParser parser = indexService.mapperService().documentMapperParser();
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.fasterxml.jackson.core.JsonParseException;
|
|||
import com.fasterxml.jackson.core.io.JsonStringEncoder;
|
||||
|
||||
import org.apache.lucene.search.BoostQuery;
|
||||
import org.apache.lucene.search.PrefixQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.spans.SpanBoostQuery;
|
||||
|
|
|
@ -24,10 +24,12 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
|
||||
|
@ -56,9 +58,9 @@ public class ExistsQueryBuilderTests extends AbstractQueryTestCase<ExistsQueryBu
|
|||
String fieldPattern = queryBuilder.fieldName();
|
||||
Collection<String> fields = context.simpleMatchToIndexNames(fieldPattern);
|
||||
if (getCurrentTypes().length == 0) {
|
||||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
BooleanQuery booleanQuery = (BooleanQuery) query;
|
||||
assertThat(booleanQuery.clauses().size(), equalTo(0));
|
||||
assertThat(query, instanceOf(MatchNoDocsQuery.class));
|
||||
MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query;
|
||||
assertThat(matchNoDocsQuery.toString(null), containsString("Missing types in \"exists\" query."));
|
||||
} else {
|
||||
assertThat(query, instanceOf(ConstantScoreQuery.class));
|
||||
ConstantScoreQuery constantScoreQuery = (ConstantScoreQuery) query;
|
||||
|
@ -79,11 +81,11 @@ public class ExistsQueryBuilderTests extends AbstractQueryTestCase<ExistsQueryBu
|
|||
|
||||
public void testFromJson() throws IOException {
|
||||
String json =
|
||||
"{\n" +
|
||||
" \"exists\" : {\n" +
|
||||
" \"field\" : \"user\",\n" +
|
||||
" \"boost\" : 42.0\n" +
|
||||
" }\n" +
|
||||
"{\n" +
|
||||
" \"exists\" : {\n" +
|
||||
" \"field\" : \"user\",\n" +
|
||||
" \"boost\" : 42.0\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
ExistsQueryBuilder parsed = (ExistsQueryBuilder) parseQuery(json);
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.UidFieldMapper;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.script.Script.ScriptParseException;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
|
||||
|
@ -53,6 +52,8 @@ import org.junit.BeforeClass;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -125,18 +126,24 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
|||
assertEquals(queryBuilder.scoreMode(), lpq.getScoreMode()); // WTF is this why do we have two?
|
||||
}
|
||||
if (queryBuilder.innerHit() != null) {
|
||||
assertNotNull(SearchContext.current());
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
assertNotNull(searchContext);
|
||||
if (query != null) {
|
||||
assertNotNull(SearchContext.current().innerHits());
|
||||
assertEquals(1, SearchContext.current().innerHits().getInnerHits().size());
|
||||
assertTrue(SearchContext.current().innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);
|
||||
for (InnerHitBuilder builder : innerHitBuilders.values()) {
|
||||
builder.build(searchContext, searchContext.innerHits());
|
||||
}
|
||||
assertNotNull(searchContext.innerHits());
|
||||
assertEquals(1, searchContext.innerHits().getInnerHits().size());
|
||||
assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));
|
||||
InnerHitsContext.BaseInnerHits innerHits =
|
||||
SearchContext.current().innerHits().getInnerHits().get(queryBuilder.innerHit().getName());
|
||||
searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName());
|
||||
assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());
|
||||
assertEquals(innerHits.sort().getSort().length, 1);
|
||||
assertEquals(innerHits.sort().getSort()[0].getField(), STRING_FIELD_NAME_2);
|
||||
} else {
|
||||
assertThat(SearchContext.current().innerHits().getInnerHits().size(), equalTo(0));
|
||||
assertThat(searchContext.innerHits().getInnerHits().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -188,7 +195,6 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
|||
" \"boost\" : 2.0,\n" +
|
||||
" \"_name\" : \"WNzYMJKRwePuRBh\",\n" +
|
||||
" \"inner_hits\" : {\n" +
|
||||
" \"type\" : \"child\",\n" +
|
||||
" \"name\" : \"inner_hits_name\",\n" +
|
||||
" \"from\" : 0,\n" +
|
||||
" \"size\" : 100,\n" +
|
||||
|
@ -199,18 +205,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
|||
" \"mapped_string\" : {\n" +
|
||||
" \"order\" : \"asc\"\n" +
|
||||
" }\n" +
|
||||
" } ],\n" +
|
||||
" \"query\" : {\n" +
|
||||
" \"range\" : {\n" +
|
||||
" \"mapped_string\" : {\n" +
|
||||
" \"from\" : \"agJhRET\",\n" +
|
||||
" \"to\" : \"zvqIq\",\n" +
|
||||
" \"include_lower\" : true,\n" +
|
||||
" \"include_upper\" : true,\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" } ]\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
@ -223,11 +218,11 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
|
|||
assertEquals(query, queryBuilder.childType(), "child");
|
||||
assertEquals(query, queryBuilder.scoreMode(), ScoreMode.Avg);
|
||||
assertNotNull(query, queryBuilder.innerHit());
|
||||
assertEquals(query, queryBuilder.innerHit(), new InnerHitBuilder().setParentChildType("child")
|
||||
InnerHitBuilder expected = new InnerHitBuilder(new InnerHitBuilder(), queryBuilder.query(), "child")
|
||||
.setName("inner_hits_name")
|
||||
.setSize(100)
|
||||
.addSort(new FieldSortBuilder("mapped_string").order(SortOrder.ASC))
|
||||
.setQuery(queryBuilder.query()));
|
||||
.addSort(new FieldSortBuilder("mapped_string").order(SortOrder.ASC));
|
||||
assertEquals(query, queryBuilder.innerHit(), expected);
|
||||
|
||||
}
|
||||
public void testToQueryInnerQueryType() throws IOException {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
|
@ -34,7 +33,6 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.script.Script.ScriptParseException;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -43,7 +41,8 @@ import org.elasticsearch.search.sort.SortOrder;
|
|||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -108,18 +107,24 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
|
|||
assertEquals(queryBuilder.score() ? ScoreMode.Max : ScoreMode.None, lpq.getScoreMode());
|
||||
}
|
||||
if (queryBuilder.innerHit() != null) {
|
||||
assertNotNull(SearchContext.current());
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
assertNotNull(searchContext);
|
||||
if (query != null) {
|
||||
assertNotNull(SearchContext.current().innerHits());
|
||||
assertEquals(1, SearchContext.current().innerHits().getInnerHits().size());
|
||||
assertTrue(SearchContext.current().innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));
|
||||
InnerHitsContext.BaseInnerHits innerHits = SearchContext.current().innerHits()
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);
|
||||
for (InnerHitBuilder builder : innerHitBuilders.values()) {
|
||||
builder.build(searchContext, searchContext.innerHits());
|
||||
}
|
||||
assertNotNull(searchContext.innerHits());
|
||||
assertEquals(1, searchContext.innerHits().getInnerHits().size());
|
||||
assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));
|
||||
InnerHitsContext.BaseInnerHits innerHits = searchContext.innerHits()
|
||||
.getInnerHits().get(queryBuilder.innerHit().getName());
|
||||
assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());
|
||||
assertEquals(innerHits.sort().getSort().length, 1);
|
||||
assertEquals(innerHits.sort().getSort()[0].getField(), STRING_FIELD_NAME_2);
|
||||
} else {
|
||||
assertThat(SearchContext.current().innerHits().getInnerHits().size(), equalTo(0));
|
||||
assertThat(searchContext.innerHits().getInnerHits().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,16 +21,15 @@ package org.elasticsearch.index.query;
|
|||
|
||||
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -88,8 +87,7 @@ public class IdsQueryBuilderTests extends AbstractQueryTestCase<IdsQueryBuilder>
|
|||
@Override
|
||||
protected void doAssertLuceneQuery(IdsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
|
||||
if (queryBuilder.ids().size() == 0) {
|
||||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
assertThat(((BooleanQuery)query).clauses().size(), equalTo(0));
|
||||
assertThat(query, instanceOf(MatchNoDocsQuery.class));
|
||||
} else {
|
||||
assertThat(query, instanceOf(TermsQuery.class));
|
||||
}
|
||||
|
|
|
@ -16,11 +16,13 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.query.support;
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -29,6 +31,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
|
@ -41,8 +44,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
@ -87,7 +89,7 @@ public class InnerHitBuilderTests extends ESTestCase {
|
|||
|
||||
public void testFromAndToXContent() throws Exception {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
InnerHitBuilder innerHit = randomInnerHits();
|
||||
InnerHitBuilder innerHit = randomInnerHits(true, false);
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
|
||||
if (randomBoolean()) {
|
||||
builder.prettyPrint();
|
||||
|
@ -111,7 +113,7 @@ public class InnerHitBuilderTests extends ESTestCase {
|
|||
assertTrue("inner it is not equal to self", firstInnerHit.equals(firstInnerHit));
|
||||
assertThat("same inner hit's hashcode returns different values if called multiple times", firstInnerHit.hashCode(),
|
||||
equalTo(firstInnerHit.hashCode()));
|
||||
assertThat("different inner hits should not be equal", mutate(firstInnerHit), not(equalTo(firstInnerHit)));
|
||||
assertThat("different inner hits should not be equal", mutate(serializedCopy(firstInnerHit)), not(equalTo(firstInnerHit)));
|
||||
|
||||
InnerHitBuilder secondBuilder = serializedCopy(firstInnerHit);
|
||||
assertTrue("inner hit is not equal to self", secondBuilder.equals(secondBuilder));
|
||||
|
@ -133,18 +135,83 @@ public class InnerHitBuilderTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public static InnerHitBuilder randomInnerHits() {
|
||||
return randomInnerHits(true);
|
||||
public void testInlineLeafInnerHitsNestedQuery() throws Exception {
|
||||
InnerHitBuilder leafInnerHits = randomInnerHits();
|
||||
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None);
|
||||
nestedQueryBuilder.innerHit(leafInnerHits);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
nestedQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public static InnerHitBuilder randomInnerHits(boolean recursive) {
|
||||
InnerHitBuilder innerHits = new InnerHitBuilder();
|
||||
if (randomBoolean()) {
|
||||
innerHits.setNestedPath(randomAsciiOfLengthBetween(1, 16));
|
||||
} else {
|
||||
innerHits.setParentChildType(randomAsciiOfLengthBetween(1, 16));
|
||||
}
|
||||
public void testInlineLeafInnerHitsHasChildQuery() throws Exception {
|
||||
InnerHitBuilder leafInnerHits = randomInnerHits();
|
||||
HasChildQueryBuilder hasChildQueryBuilder = new HasChildQueryBuilder("type", new MatchAllQueryBuilder(), ScoreMode.None)
|
||||
.innerHit(leafInnerHits);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
hasChildQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public void testInlineLeafInnerHitsHasParentQuery() throws Exception {
|
||||
InnerHitBuilder leafInnerHits = randomInnerHits();
|
||||
HasParentQueryBuilder hasParentQueryBuilder = new HasParentQueryBuilder("type", new MatchAllQueryBuilder(), false)
|
||||
.innerHit(leafInnerHits);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
hasParentQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() {
|
||||
InnerHitBuilder leafInnerHits = randomInnerHits();
|
||||
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None)
|
||||
.innerHit(leafInnerHits);
|
||||
BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().should(nestedQueryBuilder);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
boolQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() {
|
||||
InnerHitBuilder leafInnerHits = randomInnerHits();
|
||||
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None)
|
||||
.innerHit(leafInnerHits);
|
||||
ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(nestedQueryBuilder);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
constantScoreQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() {
|
||||
InnerHitBuilder leafInnerHits1 = randomInnerHits();
|
||||
NestedQueryBuilder nestedQueryBuilder1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None)
|
||||
.innerHit(leafInnerHits1);
|
||||
InnerHitBuilder leafInnerHits2 = randomInnerHits();
|
||||
NestedQueryBuilder nestedQueryBuilder2 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None)
|
||||
.innerHit(leafInnerHits2);
|
||||
BoostingQueryBuilder constantScoreQueryBuilder = new BoostingQueryBuilder(nestedQueryBuilder1, nestedQueryBuilder2);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
constantScoreQueryBuilder.extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits1.getName()), notNullValue());
|
||||
assertThat(innerHitBuilders.get(leafInnerHits2.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public void testInlineLeafInnerHitsNestedQueryViaFunctionScoreQuery() {
|
||||
InnerHitBuilder leafInnerHits = randomInnerHits();
|
||||
NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None)
|
||||
.innerHit(leafInnerHits);
|
||||
FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(nestedQueryBuilder);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
((AbstractQueryBuilder) functionScoreQueryBuilder).extractInnerHitBuilders(innerHitBuilders);
|
||||
assertThat(innerHitBuilders.get(leafInnerHits.getName()), notNullValue());
|
||||
}
|
||||
|
||||
public static InnerHitBuilder randomInnerHits() {
|
||||
return randomInnerHits(true, true);
|
||||
}
|
||||
|
||||
public static InnerHitBuilder randomInnerHits(boolean recursive, boolean includeQueryTypeOrPath) {
|
||||
InnerHitBuilder innerHits = new InnerHitBuilder();
|
||||
innerHits.setName(randomAsciiOfLengthBetween(1, 16));
|
||||
innerHits.setFrom(randomIntBetween(0, 128));
|
||||
innerHits.setSize(randomIntBetween(0, 128));
|
||||
|
@ -170,54 +237,76 @@ public class InnerHitBuilderTests extends ESTestCase {
|
|||
);
|
||||
}
|
||||
innerHits.setHighlightBuilder(HighlightBuilderTests.randomHighlighterBuilder());
|
||||
if (randomBoolean()) {
|
||||
innerHits.setQuery(new MatchQueryBuilder(randomAsciiOfLengthBetween(1, 16), randomAsciiOfLengthBetween(1, 16)));
|
||||
}
|
||||
if (recursive && randomBoolean()) {
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
int size = randomIntBetween(1, 16);
|
||||
for (int i = 0; i < size; i++) {
|
||||
innerHitsBuilder.addInnerHit(randomAsciiOfLengthBetween(1, 16), randomInnerHits(false));
|
||||
innerHits.addChildInnerHit(randomInnerHits(false, includeQueryTypeOrPath));
|
||||
}
|
||||
innerHits.setInnerHitsBuilder(innerHitsBuilder);
|
||||
}
|
||||
|
||||
return innerHits;
|
||||
if (includeQueryTypeOrPath) {
|
||||
QueryBuilder query = new MatchQueryBuilder(randomAsciiOfLengthBetween(1, 16), randomAsciiOfLengthBetween(1, 16));
|
||||
if (randomBoolean()) {
|
||||
return new InnerHitBuilder(innerHits, randomAsciiOfLength(8), query);
|
||||
} else {
|
||||
return new InnerHitBuilder(innerHits, query, randomAsciiOfLength(8));
|
||||
}
|
||||
} else {
|
||||
return innerHits;
|
||||
}
|
||||
}
|
||||
|
||||
static InnerHitBuilder mutate(InnerHitBuilder innerHits) throws IOException {
|
||||
InnerHitBuilder copy = serializedCopy(innerHits);
|
||||
int surprise = randomIntBetween(0, 10);
|
||||
public void testCopyConstructor() throws Exception {
|
||||
InnerHitBuilder original = randomInnerHits();
|
||||
InnerHitBuilder copy = original.getNestedPath() != null ?
|
||||
new InnerHitBuilder(original, original.getNestedPath(), original.getQuery()) :
|
||||
new InnerHitBuilder(original, original.getQuery(), original.getParentChildType());
|
||||
assertThat(copy, equalTo(original));
|
||||
copy = mutate(copy);
|
||||
assertThat(copy, not(equalTo(original)));
|
||||
}
|
||||
|
||||
static InnerHitBuilder mutate(InnerHitBuilder instance) throws IOException {
|
||||
int surprise = randomIntBetween(0, 11);
|
||||
switch (surprise) {
|
||||
case 0:
|
||||
copy.setFrom(randomValueOtherThan(innerHits.getFrom(), () -> randomIntBetween(0, 128)));
|
||||
instance.setFrom(randomValueOtherThan(instance.getFrom(), () -> randomIntBetween(0, 128)));
|
||||
break;
|
||||
case 1:
|
||||
copy.setSize(randomValueOtherThan(innerHits.getSize(), () -> randomIntBetween(0, 128)));
|
||||
instance.setSize(randomValueOtherThan(instance.getSize(), () -> randomIntBetween(0, 128)));
|
||||
break;
|
||||
case 2:
|
||||
copy.setExplain(!copy.isExplain());
|
||||
instance.setExplain(!instance.isExplain());
|
||||
break;
|
||||
case 3:
|
||||
copy.setVersion(!copy.isVersion());
|
||||
instance.setVersion(!instance.isVersion());
|
||||
break;
|
||||
case 4:
|
||||
copy.setTrackScores(!copy.isTrackScores());
|
||||
instance.setTrackScores(!instance.isTrackScores());
|
||||
break;
|
||||
case 5:
|
||||
copy.setName(randomValueOtherThan(innerHits.getName(), () -> randomAsciiOfLengthBetween(1, 16)));
|
||||
instance.setName(randomValueOtherThan(instance.getName(), () -> randomAsciiOfLengthBetween(1, 16)));
|
||||
break;
|
||||
case 6:
|
||||
copy.setFieldDataFields(randomValueOtherThan(copy.getFieldDataFields(), () -> {
|
||||
return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16));
|
||||
}));
|
||||
if (randomBoolean()) {
|
||||
instance.setFieldDataFields(randomValueOtherThan(instance.getFieldDataFields(), () -> {
|
||||
return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16));
|
||||
}));
|
||||
} else {
|
||||
instance.addFieldDataField(randomAsciiOfLengthBetween(1, 16));
|
||||
}
|
||||
break;
|
||||
case 7:
|
||||
copy.setScriptFields(randomValueOtherThan(copy.getScriptFields(), () -> {
|
||||
return randomListStuff(16, InnerHitBuilderTests::randomScript);}));
|
||||
if (randomBoolean()) {
|
||||
instance.setScriptFields(randomValueOtherThan(instance.getScriptFields(), () -> {
|
||||
return randomListStuff(16, InnerHitBuilderTests::randomScript);}));
|
||||
} else {
|
||||
SearchSourceBuilder.ScriptField script = randomScript();
|
||||
instance.addScriptField(script.fieldName(), script.script());
|
||||
}
|
||||
break;
|
||||
case 8:
|
||||
copy.setFetchSourceContext(randomValueOtherThan(copy.getFetchSourceContext(), () -> {
|
||||
instance.setFetchSourceContext(randomValueOtherThan(instance.getFetchSourceContext(), () -> {
|
||||
FetchSourceContext randomFetchSourceContext;
|
||||
if (randomBoolean()) {
|
||||
randomFetchSourceContext = new FetchSourceContext(randomBoolean());
|
||||
|
@ -231,21 +320,34 @@ public class InnerHitBuilderTests extends ESTestCase {
|
|||
}));
|
||||
break;
|
||||
case 9:
|
||||
final List<SortBuilder<?>> sortBuilders = randomValueOtherThan(copy.getSorts(), () -> {
|
||||
List<SortBuilder<?>> builders = randomListStuff(16,
|
||||
() -> SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
|
||||
return builders;
|
||||
});
|
||||
copy.setSorts(sortBuilders);
|
||||
if (randomBoolean()) {
|
||||
final List<SortBuilder<?>> sortBuilders = randomValueOtherThan(instance.getSorts(), () -> {
|
||||
List<SortBuilder<?>> builders = randomListStuff(16,
|
||||
() -> SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)).order(randomFrom(SortOrder.values())));
|
||||
return builders;
|
||||
});
|
||||
instance.setSorts(sortBuilders);
|
||||
} else {
|
||||
instance.addSort(SortBuilders.fieldSort(randomAsciiOfLengthBetween(5, 20)));
|
||||
}
|
||||
break;
|
||||
case 10:
|
||||
copy.setHighlightBuilder(randomValueOtherThan(copy.getHighlightBuilder(),
|
||||
instance.setHighlightBuilder(randomValueOtherThan(instance.getHighlightBuilder(),
|
||||
HighlightBuilderTests::randomHighlighterBuilder));
|
||||
break;
|
||||
case 11:
|
||||
if (instance.getFieldNames() == null || randomBoolean()) {
|
||||
instance.setFieldNames(randomValueOtherThan(instance.getFieldNames(), () -> {
|
||||
return randomListStuff(16, () -> randomAsciiOfLengthBetween(1, 16));
|
||||
}));
|
||||
} else {
|
||||
instance.getFieldNames().add(randomAsciiOfLengthBetween(1, 16));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("unexpected surprise [" + surprise + "]");
|
||||
}
|
||||
return copy;
|
||||
return instance;
|
||||
}
|
||||
|
||||
static SearchSourceBuilder.ScriptField randomScript() {
|
|
@ -19,12 +19,11 @@
|
|||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.instanceOf;
|
||||
|
||||
public class MatchNoneQueryBuilderTests extends AbstractQueryTestCase<MatchNoneQueryBuilder> {
|
||||
|
@ -36,9 +35,7 @@ public class MatchNoneQueryBuilderTests extends AbstractQueryTestCase<MatchNoneQ
|
|||
|
||||
@Override
|
||||
protected void doAssertLuceneQuery(MatchNoneQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
|
||||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
BooleanQuery booleanQuery = (BooleanQuery) query;
|
||||
assertThat(booleanQuery.clauses().size(), equalTo(0));
|
||||
assertThat(query, instanceOf(MatchNoDocsQuery.class));
|
||||
}
|
||||
|
||||
public void testFromJson() throws IOException {
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.BooleanQuery;
|
|||
import org.apache.lucene.search.PointRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import java.io.IOException;
|
||||
import static org.hamcrest.CoreMatchers.either;
|
||||
|
@ -72,7 +73,7 @@ public class MatchPhrasePrefixQueryBuilderTests extends AbstractQueryTestCase<Ma
|
|||
assertThat(query, notNullValue());
|
||||
assertThat(query,
|
||||
either(instanceOf(BooleanQuery.class)).or(instanceOf(MultiPhrasePrefixQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)));
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)).or(instanceOf(MatchNoDocsQuery.class)));
|
||||
}
|
||||
|
||||
public void testIllegalValues() {
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.search.PhraseQuery;
|
|||
import org.apache.lucene.search.PointRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -68,7 +69,7 @@ public class MatchPhraseQueryBuilderTests extends AbstractQueryTestCase<MatchPhr
|
|||
protected void doAssertLuceneQuery(MatchPhraseQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
|
||||
assertThat(query, notNullValue());
|
||||
assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(PhraseQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)));
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(PointRangeQuery.class)).or(instanceOf(MatchNoDocsQuery.class)));
|
||||
}
|
||||
|
||||
public void testIllegalValues() {
|
||||
|
|
|
@ -30,7 +30,7 @@ import org.apache.lucene.search.PointRangeQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
|
@ -127,7 +127,7 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
switch (queryBuilder.type()) {
|
||||
case BOOLEAN:
|
||||
assertThat(query, either(instanceOf(BooleanQuery.class)).or(instanceOf(ExtendedCommonTermsQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class))
|
||||
.or(instanceOf(TermQuery.class)).or(instanceOf(FuzzyQuery.class)).or(instanceOf(MatchNoDocsQuery.class))
|
||||
.or(instanceOf(LegacyNumericRangeQuery.class)).or(instanceOf(PointRangeQuery.class)));
|
||||
break;
|
||||
case PHRASE:
|
||||
|
|
|
@ -27,12 +27,12 @@ import org.apache.lucene.search.DisjunctionMaxQuery;
|
|||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.LegacyNumericRangeQuery;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.PointRangeQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.common.lucene.all.AllTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
|
||||
|
|
|
@ -21,20 +21,25 @@ package org.elasticsearch.index.query;
|
|||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
@ -66,11 +71,11 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
|
|||
protected NestedQueryBuilder doCreateTestQueryBuilder() {
|
||||
NestedQueryBuilder nqb = new NestedQueryBuilder("nested1", RandomQueryBuilder.createQuery(random()),
|
||||
RandomPicks.randomFrom(random(), ScoreMode.values()));
|
||||
if (SearchContext.current() != null) {
|
||||
if (randomBoolean()) {
|
||||
nqb.innerHit(new InnerHitBuilder()
|
||||
.setName(randomAsciiOfLengthBetween(1, 10))
|
||||
.setSize(randomIntBetween(0, 100))
|
||||
.addSort(new FieldSortBuilder(STRING_FIELD_NAME).order(SortOrder.ASC)));
|
||||
.addSort(new FieldSortBuilder(INT_FIELD_NAME).order(SortOrder.ASC)));
|
||||
}
|
||||
nqb.ignoreUnmapped(randomBoolean());
|
||||
return nqb;
|
||||
|
@ -87,17 +92,23 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
|
|||
//TODO how to assert this?
|
||||
}
|
||||
if (queryBuilder.innerHit() != null) {
|
||||
assertNotNull(SearchContext.current());
|
||||
SearchContext searchContext = SearchContext.current();
|
||||
assertNotNull(searchContext);
|
||||
if (query != null) {
|
||||
assertNotNull(SearchContext.current().innerHits());
|
||||
assertEquals(1, SearchContext.current().innerHits().getInnerHits().size());
|
||||
assertTrue(SearchContext.current().innerHits().getInnerHits().containsKey("inner_hits_name"));
|
||||
InnerHitsContext.BaseInnerHits innerHits = SearchContext.current().innerHits().getInnerHits().get("inner_hits_name");
|
||||
assertEquals(innerHits.size(), 100);
|
||||
Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
|
||||
InnerHitBuilder.extractInnerHits(queryBuilder, innerHitBuilders);
|
||||
for (InnerHitBuilder builder : innerHitBuilders.values()) {
|
||||
builder.build(searchContext, searchContext.innerHits());
|
||||
}
|
||||
assertNotNull(searchContext.innerHits());
|
||||
assertEquals(1, searchContext.innerHits().getInnerHits().size());
|
||||
assertTrue(searchContext.innerHits().getInnerHits().containsKey(queryBuilder.innerHit().getName()));
|
||||
InnerHitsContext.BaseInnerHits innerHits = searchContext.innerHits().getInnerHits().get(queryBuilder.innerHit().getName());
|
||||
assertEquals(innerHits.size(), queryBuilder.innerHit().getSize());
|
||||
assertEquals(innerHits.sort().getSort().length, 1);
|
||||
assertEquals(innerHits.sort().getSort()[0].getField(), STRING_FIELD_NAME);
|
||||
assertEquals(innerHits.sort().getSort()[0].getField(), INT_FIELD_NAME);
|
||||
} else {
|
||||
assertThat(SearchContext.current().innerHits().getInnerHits().size(), equalTo(0));
|
||||
assertThat(searchContext.innerHits().getInnerHits().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -163,6 +174,36 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
|
|||
assertEquals(json, ScoreMode.Avg, parsed.scoreMode());
|
||||
}
|
||||
|
||||
/**
|
||||
* override superclass test, because here we need to take care that mutation doesn't happen inside
|
||||
* `inner_hits` structure, because we don't parse them yet and so no exception will be triggered
|
||||
* for any mutation there.
|
||||
*/
|
||||
@Override
|
||||
public void testUnknownObjectException() throws IOException {
|
||||
String validQuery = createTestQueryBuilder().toString();
|
||||
assertThat(validQuery, containsString("{"));
|
||||
int endPosition = validQuery.indexOf("inner_hits");
|
||||
if (endPosition == -1) {
|
||||
endPosition = validQuery.length() - 1;
|
||||
}
|
||||
for (int insertionPosition = 0; insertionPosition < endPosition; insertionPosition++) {
|
||||
if (validQuery.charAt(insertionPosition) == '{') {
|
||||
String testQuery = validQuery.substring(0, insertionPosition) + "{ \"newField\" : " +
|
||||
validQuery.substring(insertionPosition) + "}";
|
||||
try {
|
||||
parseQuery(testQuery);
|
||||
fail("some parsing exception expected for query: " + testQuery);
|
||||
} catch (ParsingException | Script.ScriptParseException | ElasticsearchParseException e) {
|
||||
// different kinds of exception wordings depending on location
|
||||
// of mutation, so no simple asserts possible here
|
||||
} catch (JsonParseException e) {
|
||||
// mutation produced invalid json
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testIgnoreUnmapped() throws IOException {
|
||||
final NestedQueryBuilder queryBuilder = new NestedQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None);
|
||||
queryBuilder.ignoreUnmapped(true);
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.action.get.GetResponse;
|
|||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.lucene.search.MatchNoDocsQuery;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
|
@ -93,41 +94,51 @@ public class TermsQueryBuilderTests extends AbstractQueryTestCase<TermsQueryBuil
|
|||
|
||||
@Override
|
||||
protected void doAssertLuceneQuery(TermsQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
|
||||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
BooleanQuery booleanQuery = (BooleanQuery) query;
|
||||
|
||||
// we only do the check below for string fields (otherwise we'd have to decode the values)
|
||||
if (queryBuilder.fieldName().equals(INT_FIELD_NAME) || queryBuilder.fieldName().equals(DOUBLE_FIELD_NAME)
|
||||
|| queryBuilder.fieldName().equals(BOOLEAN_FIELD_NAME) || queryBuilder.fieldName().equals(DATE_FIELD_NAME)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// expected returned terms depending on whether we have a terms query or a terms lookup query
|
||||
List<Object> terms;
|
||||
if (queryBuilder.termsLookup() != null) {
|
||||
terms = randomTerms;
|
||||
if (queryBuilder.termsLookup() == null && (queryBuilder.values() == null || queryBuilder.values().isEmpty())) {
|
||||
assertThat(query, instanceOf(MatchNoDocsQuery.class));
|
||||
MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query;
|
||||
assertThat(matchNoDocsQuery.toString(), containsString("No terms supplied for \"terms\" query."));
|
||||
} else if (queryBuilder.termsLookup() != null && randomTerms.size() == 0){
|
||||
assertThat(query, instanceOf(MatchNoDocsQuery.class));
|
||||
MatchNoDocsQuery matchNoDocsQuery = (MatchNoDocsQuery) query;
|
||||
assertThat(matchNoDocsQuery.toString(), containsString("No terms supplied for \"terms\" query."));
|
||||
} else {
|
||||
terms = queryBuilder.values();
|
||||
}
|
||||
assertThat(query, instanceOf(BooleanQuery.class));
|
||||
BooleanQuery booleanQuery = (BooleanQuery) query;
|
||||
|
||||
// compare whether we have the expected list of terms returned
|
||||
final List<Term> booleanTerms = new ArrayList<>();
|
||||
for (BooleanClause booleanClause : booleanQuery) {
|
||||
assertThat(booleanClause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
|
||||
assertThat(booleanClause.getQuery(), instanceOf(TermQuery.class));
|
||||
Term term = ((TermQuery) booleanClause.getQuery()).getTerm();
|
||||
booleanTerms.add(term);
|
||||
}
|
||||
CollectionUtil.timSort(booleanTerms);
|
||||
List<Term> expectedTerms = new ArrayList<>();
|
||||
for (Object term : terms) {
|
||||
if (term != null) { // terms lookup filters this out
|
||||
expectedTerms.add(new Term(queryBuilder.fieldName(), term.toString()));
|
||||
// we only do the check below for string fields (otherwise we'd have to decode the values)
|
||||
if (queryBuilder.fieldName().equals(INT_FIELD_NAME) || queryBuilder.fieldName().equals(DOUBLE_FIELD_NAME)
|
||||
|| queryBuilder.fieldName().equals(BOOLEAN_FIELD_NAME) || queryBuilder.fieldName().equals(DATE_FIELD_NAME)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// expected returned terms depending on whether we have a terms query or a terms lookup query
|
||||
List<Object> terms;
|
||||
if (queryBuilder.termsLookup() != null) {
|
||||
terms = randomTerms;
|
||||
} else {
|
||||
terms = queryBuilder.values();
|
||||
}
|
||||
|
||||
// compare whether we have the expected list of terms returned
|
||||
final List<Term> booleanTerms = new ArrayList<>();
|
||||
for (BooleanClause booleanClause : booleanQuery) {
|
||||
assertThat(booleanClause.getOccur(), equalTo(BooleanClause.Occur.SHOULD));
|
||||
assertThat(booleanClause.getQuery(), instanceOf(TermQuery.class));
|
||||
Term term = ((TermQuery) booleanClause.getQuery()).getTerm();
|
||||
booleanTerms.add(term);
|
||||
}
|
||||
CollectionUtil.timSort(booleanTerms);
|
||||
List<Term> expectedTerms = new ArrayList<>();
|
||||
for (Object term : terms) {
|
||||
if (term != null) { // terms lookup filters this out
|
||||
expectedTerms.add(new Term(queryBuilder.fieldName(), term.toString()));
|
||||
}
|
||||
}
|
||||
CollectionUtil.timSort(expectedTerms);
|
||||
assertEquals(expectedTerms + " vs. " + booleanTerms, expectedTerms.size(), booleanTerms.size());
|
||||
assertEquals(expectedTerms + " vs. " + booleanTerms, expectedTerms, booleanTerms);
|
||||
}
|
||||
CollectionUtil.timSort(expectedTerms);
|
||||
assertEquals(expectedTerms + " vs. " + booleanTerms, expectedTerms.size(), booleanTerms.size());
|
||||
assertEquals(expectedTerms + " vs. " + booleanTerms, expectedTerms, booleanTerms);
|
||||
}
|
||||
|
||||
public void testEmtpyFieldName() {
|
||||
|
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.query.support;
|
||||
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
||||
public class InnerHitsBuilderTests extends ESTestCase {
|
||||
|
||||
private static final int NUMBER_OF_TESTBUILDERS = 20;
|
||||
private static NamedWriteableRegistry namedWriteableRegistry;
|
||||
private static IndicesQueriesRegistry indicesQueriesRegistry;
|
||||
|
||||
@BeforeClass
|
||||
public static void init() {
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).getQueryParserRegistry();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void afterClass() throws Exception {
|
||||
namedWriteableRegistry = null;
|
||||
indicesQueriesRegistry = null;
|
||||
}
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
InnerHitsBuilder original = randomInnerHits();
|
||||
InnerHitsBuilder deserialized = serializedCopy(original);
|
||||
assertEquals(deserialized, original);
|
||||
assertEquals(deserialized.hashCode(), original.hashCode());
|
||||
assertNotSame(deserialized, original);
|
||||
}
|
||||
}
|
||||
|
||||
public void testFromAndToXContent() throws Exception {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
InnerHitsBuilder innerHits = randomInnerHits();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values()));
|
||||
if (randomBoolean()) {
|
||||
builder.prettyPrint();
|
||||
}
|
||||
innerHits.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
|
||||
XContentParser parser = XContentHelper.createParser(builder.bytes());
|
||||
QueryParseContext context = new QueryParseContext(indicesQueriesRegistry, parser, ParseFieldMatcher.EMPTY);
|
||||
parser.nextToken();
|
||||
InnerHitsBuilder secondInnerHits = InnerHitsBuilder.fromXContent(context);
|
||||
assertThat(innerHits, not(sameInstance(secondInnerHits)));
|
||||
assertThat(innerHits, equalTo(secondInnerHits));
|
||||
assertThat(innerHits.hashCode(), equalTo(secondInnerHits.hashCode()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testEqualsAndHashcode() throws IOException {
|
||||
for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) {
|
||||
InnerHitsBuilder firstInnerHits = randomInnerHits();
|
||||
assertFalse("inner hit is equal to null", firstInnerHits.equals(null));
|
||||
assertFalse("inner hit is equal to incompatible type", firstInnerHits.equals(""));
|
||||
assertTrue("inner it is not equal to self", firstInnerHits.equals(firstInnerHits));
|
||||
assertThat("same inner hit's hashcode returns different values if called multiple times", firstInnerHits.hashCode(),
|
||||
equalTo(firstInnerHits.hashCode()));
|
||||
|
||||
InnerHitsBuilder secondBuilder = serializedCopy(firstInnerHits);
|
||||
assertTrue("inner hit is not equal to self", secondBuilder.equals(secondBuilder));
|
||||
assertTrue("inner hit is not equal to its copy", firstInnerHits.equals(secondBuilder));
|
||||
assertTrue("equals is not symmetric", secondBuilder.equals(firstInnerHits));
|
||||
assertThat("inner hits copy's hashcode is different from original hashcode", secondBuilder.hashCode(),
|
||||
equalTo(firstInnerHits.hashCode()));
|
||||
|
||||
InnerHitsBuilder thirdBuilder = serializedCopy(secondBuilder);
|
||||
assertTrue("inner hit is not equal to self", thirdBuilder.equals(thirdBuilder));
|
||||
assertTrue("inner hit is not equal to its copy", secondBuilder.equals(thirdBuilder));
|
||||
assertThat("inner hit copy's hashcode is different from original hashcode", secondBuilder.hashCode(),
|
||||
equalTo(thirdBuilder.hashCode()));
|
||||
assertTrue("equals is not transitive", firstInnerHits.equals(thirdBuilder));
|
||||
assertThat("inner hit copy's hashcode is different from original hashcode", firstInnerHits.hashCode(),
|
||||
equalTo(thirdBuilder.hashCode()));
|
||||
assertTrue("equals is not symmetric", thirdBuilder.equals(secondBuilder));
|
||||
assertTrue("equals is not symmetric", thirdBuilder.equals(firstInnerHits));
|
||||
}
|
||||
}
|
||||
|
||||
public static InnerHitsBuilder randomInnerHits() {
|
||||
InnerHitsBuilder innerHits = new InnerHitsBuilder();
|
||||
int numInnerHits = randomIntBetween(0, 12);
|
||||
for (int i = 0; i < numInnerHits; i++) {
|
||||
innerHits.addInnerHit(randomAsciiOfLength(5), InnerHitBuilderTests.randomInnerHits());
|
||||
}
|
||||
return innerHits;
|
||||
}
|
||||
|
||||
private static InnerHitsBuilder serializedCopy(InnerHitsBuilder original) throws IOException {
|
||||
try (BytesStreamOutput output = new BytesStreamOutput()) {
|
||||
original.writeTo(output);
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
|
||||
return new InnerHitsBuilder(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -21,8 +21,10 @@ package org.elasticsearch.indices;
|
|||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexGraveyard;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
|
@ -283,6 +285,36 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
|
|||
indicesService.deleteIndex(test.index(), "finished with test");
|
||||
}
|
||||
|
||||
/**
|
||||
* This test checks an edge case where, if a node had an index (lets call it A with UUID 1), then
|
||||
* deleted it (so a tombstone entry for A will exist in the cluster state), then created
|
||||
* a new index A with UUID 2, then shutdown, when the node comes back online, it will look at the
|
||||
* tombstones for deletions, and it should proceed with trying to delete A with UUID 1 and not
|
||||
* throw any errors that the index still exists in the cluster state. This is a case of ensuring
|
||||
* that tombstones that have the same name as current valid indices don't cause confusion by
|
||||
* trying to delete an index that exists.
|
||||
* See https://github.com/elastic/elasticsearch/issues/18054
|
||||
*/
|
||||
public void testIndexAndTombstoneWithSameNameOnStartup() throws Exception {
|
||||
final String indexName = "test";
|
||||
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
|
||||
final IndicesService indicesService = getIndicesService();
|
||||
final Settings idxSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.build();
|
||||
final IndexMetaData indexMetaData = new IndexMetaData.Builder(index.getName())
|
||||
.settings(idxSettings)
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)
|
||||
.build();
|
||||
final Index tombstonedIndex = new Index(indexName, UUIDs.randomBase64UUID());
|
||||
final IndexGraveyard graveyard = IndexGraveyard.builder().addTombstone(tombstonedIndex).build();
|
||||
final MetaData metaData = MetaData.builder().put(indexMetaData, true).indexGraveyard(graveyard).build();
|
||||
final ClusterState clusterState = new ClusterState.Builder(new ClusterName("testCluster")).metaData(metaData).build();
|
||||
// if all goes well, this won't throw an exception, otherwise, it will throw an IllegalStateException
|
||||
indicesService.verifyIndexIsDeleted(tombstonedIndex, clusterState);
|
||||
}
|
||||
|
||||
private static class DanglingListener implements LocalAllocateDangledIndices.Listener {
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ import java.util.Map;
|
|||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
|
||||
|
@ -45,6 +44,8 @@ public class ConfigurationUtilsTests extends ESTestCase {
|
|||
public void setConfig() {
|
||||
config = new HashMap<>();
|
||||
config.put("foo", "bar");
|
||||
config.put("boolVal", true);
|
||||
config.put("null", null);
|
||||
config.put("arr", Arrays.asList("1", "2", "3"));
|
||||
List<Integer> list = new ArrayList<>();
|
||||
list.add(2);
|
||||
|
@ -68,6 +69,24 @@ public class ConfigurationUtilsTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testReadBooleanProperty() {
|
||||
Boolean val = ConfigurationUtils.readBooleanProperty(null, null, config, "boolVal", false);
|
||||
assertThat(val, equalTo(true));
|
||||
}
|
||||
|
||||
public void testReadNullBooleanProperty() {
|
||||
Boolean val = ConfigurationUtils.readBooleanProperty(null, null, config, "null", false);
|
||||
assertThat(val, equalTo(false));
|
||||
}
|
||||
|
||||
public void testReadBooleanPropertyInvalidType() {
|
||||
try {
|
||||
ConfigurationUtils.readBooleanProperty(null, null, config, "arr", true);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
assertThat(e.getMessage(), equalTo("[arr] property isn't a boolean, but of type [java.util.Arrays$ArrayList]"));
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(talevy): Issue with generics. This test should fail, "int" is of type List<Integer>
|
||||
public void testOptional_InvalidType() {
|
||||
List<String> val = ConfigurationUtils.readList(null, null, config, "int");
|
||||
|
|
|
@ -199,7 +199,7 @@ public class IngestDocumentTests extends ESTestCase {
|
|||
|
||||
public void testGetFieldValueNull() {
|
||||
try {
|
||||
ingestDocument.getFieldValue(null, String.class);
|
||||
ingestDocument.getFieldValue((String) null, String.class);
|
||||
fail("get field value should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("path cannot be null nor empty"));
|
||||
|
@ -263,7 +263,7 @@ public class IngestDocumentTests extends ESTestCase {
|
|||
|
||||
public void testHasFieldNull() {
|
||||
try {
|
||||
ingestDocument.hasField(null);
|
||||
ingestDocument.hasField((String) null);
|
||||
fail("has field should have failed");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("path cannot be null nor empty"));
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.processor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class DateIndexNameFactoryTests extends ESTestCase {
|
||||
|
||||
public void testDefaults() throws Exception {
|
||||
DateIndexNameProcessor.Factory factory = new DateIndexNameProcessor.Factory();
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("date_rounding", "y");
|
||||
|
||||
DateIndexNameProcessor processor = factory.create(config);
|
||||
assertThat(processor.getDateFormats().size(), Matchers.equalTo(1));
|
||||
assertThat(processor.getField(), Matchers.equalTo("_field"));
|
||||
assertThat(processor.getIndexNamePrefix(), Matchers.equalTo(""));
|
||||
assertThat(processor.getDateRounding(), Matchers.equalTo("y"));
|
||||
assertThat(processor.getIndexNameFormat(), Matchers.equalTo("yyyy-MM-dd"));
|
||||
assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.UTC));
|
||||
}
|
||||
|
||||
public void testSpecifyOptionalSettings() throws Exception {
|
||||
DateIndexNameProcessor.Factory factory = new DateIndexNameProcessor.Factory();
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("index_name_prefix", "_prefix");
|
||||
config.put("date_rounding", "y");
|
||||
config.put("date_formats", Arrays.asList("UNIX", "UNIX_MS"));
|
||||
|
||||
DateIndexNameProcessor processor = factory.create(config);
|
||||
assertThat(processor.getDateFormats().size(), Matchers.equalTo(2));
|
||||
|
||||
config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("index_name_prefix", "_prefix");
|
||||
config.put("date_rounding", "y");
|
||||
config.put("index_name_format", "yyyyMMdd");
|
||||
|
||||
processor = factory.create(config);
|
||||
assertThat(processor.getIndexNameFormat(), Matchers.equalTo("yyyyMMdd"));
|
||||
|
||||
config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("index_name_prefix", "_prefix");
|
||||
config.put("date_rounding", "y");
|
||||
config.put("timezone", "+02:00");
|
||||
|
||||
processor = factory.create(config);
|
||||
assertThat(processor.getTimezone(), Matchers.equalTo(DateTimeZone.forOffsetHours(2)));
|
||||
|
||||
config = new HashMap<>();
|
||||
config.put("field", "_field");
|
||||
config.put("index_name_prefix", "_prefix");
|
||||
config.put("date_rounding", "y");
|
||||
|
||||
processor = factory.create(config);
|
||||
assertThat(processor.getIndexNamePrefix(), Matchers.equalTo("_prefix"));
|
||||
}
|
||||
|
||||
public void testRequiredFields() throws Exception {
|
||||
DateIndexNameProcessor.Factory factory = new DateIndexNameProcessor.Factory();
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("date_rounding", "y");
|
||||
ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config));
|
||||
assertThat(e.getMessage(), Matchers.equalTo("[field] required property is missing"));
|
||||
|
||||
config.clear();
|
||||
config.put("field", "_field");
|
||||
e = expectThrows(ElasticsearchParseException.class, () -> factory.create(config));
|
||||
assertThat(e.getMessage(), Matchers.equalTo("[date_rounding] required property is missing"));
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.ingest.processor;
|
||||
|
||||
import org.elasticsearch.ingest.core.IngestDocument;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class DateIndexNameProcessorTests extends ESTestCase {
|
||||
|
||||
public void testJodaPattern() throws Exception {
|
||||
Function<String, DateTime> function = DateFormat.Joda.getFunction("yyyy-MM-dd'T'HH:mm:ss.SSSZ", DateTimeZone.UTC, Locale.ROOT);
|
||||
DateIndexNameProcessor processor = new DateIndexNameProcessor(
|
||||
"_tag", "_field", Collections.singletonList(function), DateTimeZone.UTC,
|
||||
"events-", "y", "yyyyMMdd"
|
||||
);
|
||||
|
||||
IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null,
|
||||
Collections.singletonMap("_field", "2016-04-25T12:24:20.101Z"));
|
||||
processor.execute(document);
|
||||
assertThat(document.getSourceAndMetadata().get("_index"), equalTo("<events-{20160425||/y{yyyyMMdd|UTC}}>"));
|
||||
}
|
||||
|
||||
public void testTAI64N()throws Exception {
|
||||
Function<String, DateTime> function = DateFormat.Tai64n.getFunction(null, DateTimeZone.UTC, null);
|
||||
DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function),
|
||||
DateTimeZone.UTC, "events-", "m", "yyyyMMdd");
|
||||
IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null,
|
||||
Collections.singletonMap("_field", (randomBoolean() ? "@" : "") + "4000000050d506482dbdf024"));
|
||||
dateProcessor.execute(document);
|
||||
assertThat(document.getSourceAndMetadata().get("_index"), equalTo("<events-{20121222||/m{yyyyMMdd|UTC}}>"));
|
||||
}
|
||||
|
||||
public void testUnixMs()throws Exception {
|
||||
Function<String, DateTime> function = DateFormat.UnixMs.getFunction(null, DateTimeZone.UTC, null);
|
||||
DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function),
|
||||
DateTimeZone.UTC, "events-", "m", "yyyyMMdd");
|
||||
IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null,
|
||||
Collections.singletonMap("_field", "1000500"));
|
||||
dateProcessor.execute(document);
|
||||
assertThat(document.getSourceAndMetadata().get("_index"), equalTo("<events-{19700101||/m{yyyyMMdd|UTC}}>"));
|
||||
}
|
||||
|
||||
public void testUnix()throws Exception {
|
||||
Function<String, DateTime> function = DateFormat.Unix.getFunction(null, DateTimeZone.UTC, null);
|
||||
DateIndexNameProcessor dateProcessor = new DateIndexNameProcessor("_tag", "_field", Collections.singletonList(function),
|
||||
DateTimeZone.UTC, "events-", "m", "yyyyMMdd");
|
||||
IngestDocument document = new IngestDocument("_index", "_type", "_id", null, null, null, null,
|
||||
Collections.singletonMap("_field", "1000.5"));
|
||||
dateProcessor.execute(document);
|
||||
assertThat(document.getSourceAndMetadata().get("_index"), equalTo("<events-{19700101||/m{yyyyMMdd|UTC}}>"));
|
||||
}
|
||||
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.ingest.processor;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.ingest.TestTemplateService;
|
||||
import org.elasticsearch.ingest.core.AbstractProcessorFactory;
|
||||
import org.elasticsearch.ingest.core.Processor;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -51,6 +50,22 @@ public class SetProcessorFactoryTests extends ESTestCase {
|
|||
assertThat(setProcessor.getTag(), equalTo(processorTag));
|
||||
assertThat(setProcessor.getField().execute(Collections.emptyMap()), equalTo("field1"));
|
||||
assertThat(setProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo("value1"));
|
||||
assertThat(setProcessor.isOverrideEnabled(), equalTo(true));
|
||||
}
|
||||
|
||||
public void testCreateWithOverride() throws Exception {
|
||||
boolean overrideEnabled = randomBoolean();
|
||||
Map<String, Object> config = new HashMap<>();
|
||||
config.put("field", "field1");
|
||||
config.put("value", "value1");
|
||||
config.put("override", overrideEnabled);
|
||||
String processorTag = randomAsciiOfLength(10);
|
||||
config.put(AbstractProcessorFactory.TAG_KEY, processorTag);
|
||||
SetProcessor setProcessor = factory.create(config);
|
||||
assertThat(setProcessor.getTag(), equalTo(processorTag));
|
||||
assertThat(setProcessor.getField().execute(Collections.emptyMap()), equalTo("field1"));
|
||||
assertThat(setProcessor.getValue().copyAndResolve(Collections.emptyMap()), equalTo("value1"));
|
||||
assertThat(setProcessor.isOverrideEnabled(), equalTo(overrideEnabled));
|
||||
}
|
||||
|
||||
public void testCreateNoFieldPresent() throws Exception {
|
||||
|
|
|
@ -38,7 +38,7 @@ public class SetProcessorTests extends ESTestCase {
|
|||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
String fieldName = RandomDocumentPicks.randomExistingFieldName(random(), ingestDocument);
|
||||
Object fieldValue = RandomDocumentPicks.randomFieldValue(random());
|
||||
Processor processor = createSetProcessor(fieldName, fieldValue);
|
||||
Processor processor = createSetProcessor(fieldName, fieldValue, true);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.hasField(fieldName), equalTo(true));
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Object.class), equalTo(fieldValue));
|
||||
|
@ -50,7 +50,7 @@ public class SetProcessorTests extends ESTestCase {
|
|||
IngestDocument testIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
Object fieldValue = RandomDocumentPicks.randomFieldValue(random());
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), testIngestDocument, fieldValue);
|
||||
Processor processor = createSetProcessor(fieldName, fieldValue);
|
||||
Processor processor = createSetProcessor(fieldName, fieldValue, true);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.hasField(fieldName), equalTo(true));
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Object.class), equalTo(fieldValue));
|
||||
|
@ -59,7 +59,7 @@ public class SetProcessorTests extends ESTestCase {
|
|||
public void testSetFieldsTypeMismatch() throws Exception {
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>());
|
||||
ingestDocument.setFieldValue("field", "value");
|
||||
Processor processor = createSetProcessor("field.inner", "value");
|
||||
Processor processor = createSetProcessor("field.inner", "value", true);
|
||||
try {
|
||||
processor.execute(ingestDocument);
|
||||
fail("processor execute should have failed");
|
||||
|
@ -68,16 +68,47 @@ public class SetProcessorTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSetNewFieldWithOverrideDisabled() throws Exception {
|
||||
IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>());
|
||||
String fieldName = RandomDocumentPicks.randomFieldName(random());
|
||||
Object fieldValue = RandomDocumentPicks.randomFieldValue(random());
|
||||
Processor processor = createSetProcessor(fieldName, fieldValue, false);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.hasField(fieldName), equalTo(true));
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Object.class), equalTo(fieldValue));
|
||||
}
|
||||
|
||||
public void testSetExistingFieldWithOverrideDisabled() throws Exception {
|
||||
IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>());
|
||||
Object fieldValue = "foo";
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
Processor processor = createSetProcessor(fieldName, "bar", false);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.hasField(fieldName), equalTo(true));
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Object.class), equalTo(fieldValue));
|
||||
}
|
||||
|
||||
public void testSetExistingNullFieldWithOverrideDisabled() throws Exception {
|
||||
IngestDocument ingestDocument = new IngestDocument(new HashMap<>(), new HashMap<>());
|
||||
Object fieldValue = null;
|
||||
Object newValue = "bar";
|
||||
String fieldName = RandomDocumentPicks.addRandomField(random(), ingestDocument, fieldValue);
|
||||
Processor processor = createSetProcessor(fieldName, newValue, false);
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.hasField(fieldName), equalTo(true));
|
||||
assertThat(ingestDocument.getFieldValue(fieldName, Object.class), equalTo(newValue));
|
||||
}
|
||||
|
||||
public void testSetMetadata() throws Exception {
|
||||
IngestDocument.MetaData randomMetaData = randomFrom(IngestDocument.MetaData.values());
|
||||
Processor processor = createSetProcessor(randomMetaData.getFieldName(), "_value");
|
||||
Processor processor = createSetProcessor(randomMetaData.getFieldName(), "_value", true);
|
||||
IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random());
|
||||
processor.execute(ingestDocument);
|
||||
assertThat(ingestDocument.getFieldValue(randomMetaData.getFieldName(), String.class), Matchers.equalTo("_value"));
|
||||
}
|
||||
|
||||
private static Processor createSetProcessor(String fieldName, Object fieldValue) {
|
||||
private static Processor createSetProcessor(String fieldName, Object fieldValue, boolean overrideEnabled) {
|
||||
TemplateService templateService = TestTemplateService.instance();
|
||||
return new SetProcessor(randomAsciiOfLength(10), templateService.compile(fieldName), ValueSource.wrap(fieldValue, templateService));
|
||||
return new SetProcessor(randomAsciiOfLength(10), templateService.compile(fieldName), ValueSource.wrap(fieldValue, templateService), overrideEnabled);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ import org.elasticsearch.index.query.Operator;
|
|||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.index.query.functionscore.WeightBuilder;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.index.query.InnerHitBuilder;
|
||||
import org.elasticsearch.search.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
|
@ -1827,35 +1827,6 @@ public class PercolatorIT extends ESIntegTestCase {
|
|||
assertThat(response1.getMatches()[0].getId().string(), equalTo("1"));
|
||||
}
|
||||
|
||||
public void testFailNicelyWithInnerHits() throws Exception {
|
||||
XContentBuilder mapping = XContentFactory.jsonBuilder().startObject()
|
||||
.startObject("mapping")
|
||||
.startObject("properties")
|
||||
.startObject("nested")
|
||||
.field("type", "nested")
|
||||
.startObject("properties")
|
||||
.startObject("name")
|
||||
.field("type", "text")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject();
|
||||
|
||||
assertAcked(prepareCreate(INDEX_NAME)
|
||||
.addMapping(TYPE_NAME, "query", "type=percolator")
|
||||
.addMapping("mapping", mapping));
|
||||
try {
|
||||
client().prepareIndex(INDEX_NAME, TYPE_NAME, "1")
|
||||
.setSource(jsonBuilder().startObject().field("query", nestedQuery("nested", matchQuery("nested.name", "value"), ScoreMode.Avg).innerHit(new InnerHitBuilder())).endObject())
|
||||
.execute().actionGet();
|
||||
fail("Expected a parse error, because inner_hits isn't supported in the percolate api");
|
||||
} catch (Exception e) {
|
||||
assertThat(e.getCause(), instanceOf(QueryShardException.class));
|
||||
assertThat(e.getCause().getMessage(), containsString("inner_hits unsupported"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testParentChild() throws Exception {
|
||||
// We don't fail p/c queries, but those queries are unusable because only a single document can be provided in
|
||||
// the percolate api
|
||||
|
|
|
@ -52,8 +52,6 @@ import org.elasticsearch.index.query.AbstractQueryTestCase;
|
|||
import org.elasticsearch.index.query.EmptyQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilderTests;
|
||||
import org.elasticsearch.index.query.support.InnerHitsBuilder;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
|
@ -410,14 +408,6 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
builder.suggest(SuggestBuilderTests.randomSuggestBuilder());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
int num = randomIntBetween(0, 3);
|
||||
for (int i = 0; i < num; i++) {
|
||||
innerHitsBuilder.addInnerHit(randomAsciiOfLengthBetween(5, 20), InnerHitBuilderTests.randomInnerHits());
|
||||
}
|
||||
builder.innerHits(innerHitsBuilder);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
int numRescores = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numRescores; i++) {
|
||||
|
|
|
@ -22,14 +22,11 @@ package org.elasticsearch.search.innerhits;
|
|||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.apache.lucene.util.ArrayUtil;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchAllQueryBuilder;
|
||||
import org.elasticsearch.index.query.support.InnerHitBuilder;
|
||||
import org.elasticsearch.index.query.support.InnerHitsBuilder;
|
||||
import org.elasticsearch.index.query.InnerHitBuilder;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.MockScriptEngine;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
@ -68,8 +65,6 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class InnerHitsIT extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
|
@ -112,105 +107,62 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
.endObject()));
|
||||
indexRandom(true, requests);
|
||||
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder()
|
||||
.setNestedPath("comments")
|
||||
.setQuery(matchQuery("comments.message", "fox"))
|
||||
);
|
||||
// Inner hits can be defined in two ways: 1) with the query 2) as separate inner_hit definition
|
||||
SearchRequest[] searchRequests = new SearchRequest[]{
|
||||
client().prepareSearch("articles").setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(
|
||||
new InnerHitBuilder().setName("comment"))).request(),
|
||||
client().prepareSearch("articles").setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg))
|
||||
.innerHits(innerHitsBuilder).request()
|
||||
};
|
||||
for (SearchRequest searchRequest : searchRequests) {
|
||||
SearchResponse response = client().search(searchRequest).actionGet();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("1"));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(2L));
|
||||
assertThat(innerHits.getHits().length, equalTo(2));
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
|
||||
}
|
||||
SearchResponse response = client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg)
|
||||
.innerHit(new InnerHitBuilder().setName("comment"))
|
||||
).get();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("1"));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(2L));
|
||||
assertThat(innerHits.getHits().length, equalTo(2));
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
|
||||
|
||||
innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder()
|
||||
.setQuery(matchQuery("comments.message", "elephant")).setNestedPath("comments")
|
||||
);
|
||||
// Inner hits can be defined in two ways: 1) with the query 2) as
|
||||
// separate inner_hit definition
|
||||
searchRequests = new SearchRequest[] {
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg))
|
||||
.innerHits(innerHitsBuilder).request(),
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("comment"))).request(),
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("comment").addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)))).request()
|
||||
};
|
||||
for (SearchRequest searchRequest : searchRequests) {
|
||||
SearchResponse response = client().search(searchRequest).actionGet();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("2"));
|
||||
assertThat(response.getHits().getAt(0).getShard(), notNullValue());
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(3L));
|
||||
assertThat(innerHits.getHits().length, equalTo(3));
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(innerHits.getAt(2).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2));
|
||||
}
|
||||
InnerHitBuilder innerHit = new InnerHitBuilder();
|
||||
innerHit.setNestedPath("comments");
|
||||
innerHit.setQuery(matchQuery("comments.message", "fox"));
|
||||
innerHit.setHighlightBuilder(new HighlightBuilder().field("comments.message"));
|
||||
innerHit.setExplain(true);
|
||||
innerHit.addFieldDataField("comments.message");
|
||||
innerHit.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap()));
|
||||
innerHit.setSize(1);
|
||||
innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comments", innerHit);
|
||||
searchRequests = new SearchRequest[] {
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg))
|
||||
.innerHits(innerHitsBuilder).request(),
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(
|
||||
new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message"))
|
||||
.setExplain(true)
|
||||
.addFieldDataField("comments.message")
|
||||
.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap()))
|
||||
.setSize(1)
|
||||
)).request()
|
||||
};
|
||||
response = client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "elephant"), ScoreMode.Avg)
|
||||
.innerHit(new InnerHitBuilder().setName("comment"))
|
||||
).get();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("2"));
|
||||
assertThat(response.getHits().getAt(0).getShard(), notNullValue());
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(3L));
|
||||
assertThat(innerHits.getHits().length, equalTo(3));
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getOffset(), equalTo(0));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(1).getNestedIdentity().getOffset(), equalTo(1));
|
||||
assertThat(innerHits.getAt(2).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(2).getNestedIdentity().getField().string(), equalTo("comments"));
|
||||
assertThat(innerHits.getAt(2).getNestedIdentity().getOffset(), equalTo(2));
|
||||
|
||||
for (SearchRequest searchRequest : searchRequests) {
|
||||
SearchResponse response = client().search(searchRequest).actionGet();
|
||||
assertNoFailures(response);
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
|
||||
assertThat(innerHits.getTotalHits(), equalTo(2L));
|
||||
assertThat(innerHits.getHits().length, equalTo(1));
|
||||
assertThat(innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
|
||||
assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(comments.message:fox in"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5"));
|
||||
}
|
||||
response = client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", matchQuery("comments.message", "fox"), ScoreMode.Avg).innerHit(
|
||||
new InnerHitBuilder().setHighlightBuilder(new HighlightBuilder().field("comments.message"))
|
||||
.setExplain(true)
|
||||
.addFieldDataField("comments.message")
|
||||
.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap()))
|
||||
.setSize(1)
|
||||
)).get();
|
||||
assertNoFailures(response);
|
||||
innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
|
||||
assertThat(innerHits.getTotalHits(), equalTo(2L));
|
||||
assertThat(innerHits.getHits().length, equalTo(1));
|
||||
assertThat(innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
|
||||
assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(comments.message:fox in"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5"));
|
||||
}
|
||||
|
||||
public void testRandomNested() throws Exception {
|
||||
|
@ -237,38 +189,16 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
indexRandom(true, requestBuilders);
|
||||
|
||||
int size = randomIntBetween(0, numDocs);
|
||||
SearchResponse searchResponse;
|
||||
if (randomBoolean()) {
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("a", new InnerHitBuilder().setNestedPath("field1")
|
||||
// Sort order is DESC, because we reverse the inner objects during indexing!
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)).setSize(size));
|
||||
innerHitsBuilder.addInnerHit("b", new InnerHitBuilder().setNestedPath("field2")
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)).setSize(size));
|
||||
searchResponse = client().prepareSearch("idx")
|
||||
.setSize(numDocs)
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.innerHits(innerHitsBuilder)
|
||||
.get();
|
||||
} else {
|
||||
BoolQueryBuilder boolQuery = new BoolQueryBuilder();
|
||||
if (randomBoolean()) {
|
||||
boolQuery.should(nestedQuery("field1", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("a").setSize(size)
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC))));
|
||||
boolQuery.should(nestedQuery("field2", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("b")
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)).setSize(size)));
|
||||
} else {
|
||||
boolQuery.should(constantScoreQuery(nestedQuery("field1", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("a")
|
||||
.setSize(size).addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)))));
|
||||
boolQuery.should(constantScoreQuery(nestedQuery("field2", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("b")
|
||||
.setSize(size).addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)))));
|
||||
}
|
||||
searchResponse = client().prepareSearch("idx")
|
||||
.setQuery(boolQuery)
|
||||
.setSize(numDocs)
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.get();
|
||||
}
|
||||
BoolQueryBuilder boolQuery = new BoolQueryBuilder();
|
||||
boolQuery.should(nestedQuery("field1", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("a").setSize(size)
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC))));
|
||||
boolQuery.should(nestedQuery("field2", matchAllQuery(), ScoreMode.Avg).innerHit(new InnerHitBuilder().setName("b")
|
||||
.addSort(new FieldSortBuilder("_doc").order(SortOrder.DESC)).setSize(size)));
|
||||
SearchResponse searchResponse = client().prepareSearch("idx")
|
||||
.setQuery(boolQuery)
|
||||
.setSize(numDocs)
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.get();
|
||||
|
||||
assertNoFailures(searchResponse);
|
||||
assertHitCount(searchResponse, numDocs);
|
||||
|
@ -313,102 +243,59 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
requests.add(client().prepareIndex("articles", "comment", "6").setParent("2").setSource("message", "elephant scared by mice x y"));
|
||||
indexRandom(true, requests);
|
||||
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder().setParentChildType("comment")
|
||||
.setQuery(matchQuery("message", "fox")));
|
||||
SearchRequest[] searchRequests = new SearchRequest[]{
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.request(),
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder().setName("comment")))
|
||||
.request()
|
||||
};
|
||||
for (SearchRequest searchRequest : searchRequests) {
|
||||
SearchResponse response = client().search(searchRequest).actionGet();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("1"));
|
||||
assertThat(response.getHits().getAt(0).getShard(), notNullValue());
|
||||
SearchResponse response = client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit(new InnerHitBuilder()))
|
||||
.get();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("1"));
|
||||
assertThat(response.getHits().getAt(0).getShard(), notNullValue());
|
||||
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(2L));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(2L));
|
||||
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(0).type(), equalTo("comment"));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(1).type(), equalTo("comment"));
|
||||
}
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(0).type(), equalTo("comment"));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("2"));
|
||||
assertThat(innerHits.getAt(1).type(), equalTo("comment"));
|
||||
|
||||
innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder().setParentChildType("comment")
|
||||
.setQuery(matchQuery("message", "elephant")));
|
||||
searchRequests = new SearchRequest[] {
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "elephant"), ScoreMode.None))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.request(),
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "elephant"), ScoreMode.None).innerHit(new InnerHitBuilder()))
|
||||
.request()
|
||||
};
|
||||
for (SearchRequest searchRequest : searchRequests) {
|
||||
SearchResponse response = client().search(searchRequest).actionGet();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("2"));
|
||||
response = client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "elephant"), ScoreMode.None).innerHit(new InnerHitBuilder()))
|
||||
.get();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("2"));
|
||||
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(3L));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.totalHits(), equalTo(3L));
|
||||
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("4"));
|
||||
assertThat(innerHits.getAt(0).type(), equalTo("comment"));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("5"));
|
||||
assertThat(innerHits.getAt(1).type(), equalTo("comment"));
|
||||
assertThat(innerHits.getAt(2).getId(), equalTo("6"));
|
||||
assertThat(innerHits.getAt(2).type(), equalTo("comment"));
|
||||
}
|
||||
InnerHitBuilder innerHit = new InnerHitBuilder();
|
||||
innerHit.setQuery(matchQuery("message", "fox"));
|
||||
innerHit.setParentChildType("comment");
|
||||
innerHit.setHighlightBuilder(new HighlightBuilder().field("message"));
|
||||
innerHit.setExplain(true);
|
||||
innerHit.addFieldDataField("message");
|
||||
innerHit.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, Collections.emptyMap()));
|
||||
innerHit.setSize(1);
|
||||
innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", innerHit);
|
||||
searchRequests = new SearchRequest[] {
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.request(),
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("4"));
|
||||
assertThat(innerHits.getAt(0).type(), equalTo("comment"));
|
||||
assertThat(innerHits.getAt(1).getId(), equalTo("5"));
|
||||
assertThat(innerHits.getAt(1).type(), equalTo("comment"));
|
||||
assertThat(innerHits.getAt(2).getId(), equalTo("6"));
|
||||
assertThat(innerHits.getAt(2).type(), equalTo("comment"));
|
||||
|
||||
client().prepareSearch("articles")
|
||||
.setQuery(
|
||||
hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit(
|
||||
new InnerHitBuilder()
|
||||
.addFieldDataField("message")
|
||||
.setHighlightBuilder(new HighlightBuilder().field("message"))
|
||||
.setExplain(true).setSize(1)
|
||||
.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE,
|
||||
MockScriptEngine.NAME, Collections.emptyMap()))
|
||||
)
|
||||
).request() };
|
||||
|
||||
for (SearchRequest searchRequest : searchRequests) {
|
||||
SearchResponse response = client().search(searchRequest).actionGet();
|
||||
assertNoFailures(response);
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.getHits().length, equalTo(1));
|
||||
assertThat(innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
|
||||
assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(message:fox"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("eat"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5"));
|
||||
}
|
||||
response = client().prepareSearch("articles")
|
||||
.setQuery(
|
||||
hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit(
|
||||
new InnerHitBuilder()
|
||||
.addFieldDataField("message")
|
||||
.setHighlightBuilder(new HighlightBuilder().field("message"))
|
||||
.setExplain(true).setSize(1)
|
||||
.addScriptField("script", new Script("5", ScriptService.ScriptType.INLINE,
|
||||
MockScriptEngine.NAME, Collections.emptyMap()))
|
||||
)
|
||||
).get();
|
||||
assertNoFailures(response);
|
||||
innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
assertThat(innerHits.getHits().length, equalTo(1));
|
||||
assertThat(innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), equalTo("<em>fox</em> eat quick"));
|
||||
assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(message:fox"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("eat"));
|
||||
assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("5"));
|
||||
}
|
||||
|
||||
public void testRandomParentChild() throws Exception {
|
||||
|
@ -442,33 +329,17 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
indexRandom(true, requestBuilders);
|
||||
|
||||
int size = randomIntBetween(0, numDocs);
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("a", new InnerHitBuilder().setParentChildType("child1").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size));
|
||||
innerHitsBuilder.addInnerHit("b", new InnerHitBuilder().setParentChildType("child2").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size));
|
||||
SearchResponse searchResponse;
|
||||
if (randomBoolean()) {
|
||||
searchResponse = client().prepareSearch("idx")
|
||||
.setSize(numDocs)
|
||||
.setTypes("parent")
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.innerHits(innerHitsBuilder)
|
||||
.get();
|
||||
} else {
|
||||
BoolQueryBuilder boolQuery = new BoolQueryBuilder();
|
||||
if (randomBoolean()) {
|
||||
boolQuery.should(hasChildQuery("child1", matchAllQuery(), ScoreMode.None).innerHit(new InnerHitBuilder().setName("a").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size)));
|
||||
boolQuery.should(hasChildQuery("child2", matchAllQuery(), ScoreMode.None).innerHit(new InnerHitBuilder().setName("b").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size)));
|
||||
} else {
|
||||
boolQuery.should(constantScoreQuery(hasChildQuery("child1", matchAllQuery(), ScoreMode.None).innerHit(new InnerHitBuilder().setName("a").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size))));
|
||||
boolQuery.should(constantScoreQuery(hasChildQuery("child2", matchAllQuery(), ScoreMode.None).innerHit(new InnerHitBuilder().setName("b").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size))));
|
||||
}
|
||||
searchResponse = client().prepareSearch("idx")
|
||||
.setSize(numDocs)
|
||||
.setTypes("parent")
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.setQuery(boolQuery)
|
||||
.get();
|
||||
}
|
||||
BoolQueryBuilder boolQuery = new BoolQueryBuilder();
|
||||
boolQuery.should(constantScoreQuery(hasChildQuery("child1", matchAllQuery(), ScoreMode.None)
|
||||
.innerHit(new InnerHitBuilder().setName("a").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size))));
|
||||
boolQuery.should(constantScoreQuery(hasChildQuery("child2", matchAllQuery(), ScoreMode.None)
|
||||
.innerHit(new InnerHitBuilder().setName("b").addSort(new FieldSortBuilder("_uid").order(SortOrder.ASC)).setSize(size))));
|
||||
SearchResponse searchResponse = client().prepareSearch("idx")
|
||||
.setSize(numDocs)
|
||||
.setTypes("parent")
|
||||
.addSort("_uid", SortOrder.ASC)
|
||||
.setQuery(boolQuery)
|
||||
.get();
|
||||
|
||||
assertNoFailures(searchResponse);
|
||||
assertHitCount(searchResponse, numDocs);
|
||||
|
@ -560,19 +431,10 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
requests.add(client().prepareIndex("articles", "remark", "2").setParent("2").setRouting("2").setSource("message", "bad"));
|
||||
indexRandom(true, requests);
|
||||
|
||||
InnerHitsBuilder innerInnerHitsBuilder = new InnerHitsBuilder();
|
||||
innerInnerHitsBuilder.addInnerHit("remark", new InnerHitBuilder()
|
||||
.setParentChildType("remark")
|
||||
.setQuery(matchQuery("message", "good"))
|
||||
);
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder()
|
||||
.setParentChildType("comment")
|
||||
.setQuery(hasChildQuery("remark", matchQuery("message", "good"), ScoreMode.None))
|
||||
.setInnerHitsBuilder(innerInnerHitsBuilder));
|
||||
SearchResponse response = client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", hasChildQuery("remark", matchQuery("message", "good"), ScoreMode.None), ScoreMode.None))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.setQuery(hasChildQuery("comment",
|
||||
hasChildQuery("remark", matchQuery("message", "good"), ScoreMode.None).innerHit(new InnerHitBuilder()),
|
||||
ScoreMode.None).innerHit(new InnerHitBuilder()))
|
||||
.get();
|
||||
|
||||
assertNoFailures(response);
|
||||
|
@ -590,18 +452,10 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
assertThat(innerHits.getAt(0).getId(), equalTo("1"));
|
||||
assertThat(innerHits.getAt(0).type(), equalTo("remark"));
|
||||
|
||||
innerInnerHitsBuilder = new InnerHitsBuilder();
|
||||
innerInnerHitsBuilder.addInnerHit("remark", new InnerHitBuilder()
|
||||
.setParentChildType("remark")
|
||||
.setQuery(matchQuery("message", "bad")));
|
||||
innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder()
|
||||
.setParentChildType("comment")
|
||||
.setQuery(hasChildQuery("remark", matchQuery("message", "bad"), ScoreMode.None))
|
||||
.setInnerHitsBuilder(innerInnerHitsBuilder));
|
||||
response = client().prepareSearch("articles")
|
||||
.setQuery(hasChildQuery("comment", hasChildQuery("remark", matchQuery("message", "bad"), ScoreMode.None), ScoreMode.None))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.setQuery(hasChildQuery("comment",
|
||||
hasChildQuery("remark", matchQuery("message", "bad"), ScoreMode.None).innerHit(new InnerHitBuilder()),
|
||||
ScoreMode.None).innerHit(new InnerHitBuilder()))
|
||||
.get();
|
||||
|
||||
assertNoFailures(response);
|
||||
|
@ -662,24 +516,18 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
.endObject()));
|
||||
indexRandom(true, requests);
|
||||
|
||||
InnerHitsBuilder innerInnerHitsBuilder = new InnerHitsBuilder();
|
||||
innerInnerHitsBuilder.addInnerHit("remark", new InnerHitBuilder()
|
||||
.setNestedPath("comments.remarks")
|
||||
.setQuery(matchQuery("comments.remarks.message", "good")));
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder()
|
||||
.setNestedPath("comments")
|
||||
.setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg))
|
||||
.setInnerHitsBuilder(innerInnerHitsBuilder)
|
||||
);
|
||||
SearchResponse response = client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg), ScoreMode.Avg))
|
||||
.innerHits(innerHitsBuilder).get();
|
||||
.setQuery(
|
||||
nestedQuery("comments",
|
||||
nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "good"), ScoreMode.Avg)
|
||||
.innerHit(new InnerHitBuilder().setName("remark")),
|
||||
ScoreMode.Avg).innerHit(new InnerHitBuilder())
|
||||
).get();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("1"));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
|
||||
assertThat(innerHits.totalHits(), equalTo(1L));
|
||||
assertThat(innerHits.getHits().length, equalTo(1));
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("1"));
|
||||
|
@ -711,24 +559,18 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("remarks"));
|
||||
assertThat(innerHits.getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0));
|
||||
|
||||
innerInnerHitsBuilder = new InnerHitsBuilder();
|
||||
innerInnerHitsBuilder.addInnerHit("remark", new InnerHitBuilder()
|
||||
.setNestedPath("comments.remarks")
|
||||
.setQuery(matchQuery("comments.remarks.message", "bad")));
|
||||
innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("comment", new InnerHitBuilder()
|
||||
.setNestedPath("comments")
|
||||
.setQuery(nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg))
|
||||
.setInnerHitsBuilder(innerInnerHitsBuilder));
|
||||
response = client().prepareSearch("articles")
|
||||
.setQuery(nestedQuery("comments", nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg), ScoreMode.Avg))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.get();
|
||||
.setQuery(
|
||||
nestedQuery("comments",
|
||||
nestedQuery("comments.remarks", matchQuery("comments.remarks.message", "bad"), ScoreMode.Avg)
|
||||
.innerHit(new InnerHitBuilder().setName("remark")),
|
||||
ScoreMode.Avg).innerHit(new InnerHitBuilder())
|
||||
).get();
|
||||
assertNoFailures(response);
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHit(response, 1, hasId("2"));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(1));
|
||||
innerHits = response.getHits().getAt(0).getInnerHits().get("comment");
|
||||
innerHits = response.getHits().getAt(0).getInnerHits().get("comments");
|
||||
assertThat(innerHits.totalHits(), equalTo(1L));
|
||||
assertThat(innerHits.getHits().length, equalTo(1));
|
||||
assertThat(innerHits.getAt(0).getId(), equalTo("2"));
|
||||
|
@ -863,22 +705,21 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
requests.add(client().prepareIndex("royals", "baron", "baron4").setParent("earl4").setRouting("king").setSource("{}"));
|
||||
indexRandom(true, requests);
|
||||
|
||||
InnerHitsBuilder innerInnerHitsBuilder = new InnerHitsBuilder();
|
||||
innerInnerHitsBuilder.addInnerHit("barons", new InnerHitBuilder().setParentChildType("baron"));
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("earls", new InnerHitBuilder()
|
||||
.setParentChildType("earl")
|
||||
.addSort(SortBuilders.fieldSort("_uid").order(SortOrder.ASC))
|
||||
.setSize(4)
|
||||
.setInnerHitsBuilder(innerInnerHitsBuilder)
|
||||
);
|
||||
innerInnerHitsBuilder = new InnerHitsBuilder();
|
||||
innerInnerHitsBuilder.addInnerHit("kings", new InnerHitBuilder().setParentChildType("king"));
|
||||
innerHitsBuilder.addInnerHit("princes", new InnerHitBuilder().setParentChildType("prince")
|
||||
.setInnerHitsBuilder(innerInnerHitsBuilder));
|
||||
SearchResponse response = client().prepareSearch("royals")
|
||||
.setTypes("duke")
|
||||
.innerHits(innerHitsBuilder)
|
||||
.setQuery(boolQuery()
|
||||
.filter(hasParentQuery("prince",
|
||||
hasParentQuery("king", matchAllQuery(), false).innerHit(new InnerHitBuilder().setName("kings")),
|
||||
false).innerHit(new InnerHitBuilder().setName("princes"))
|
||||
)
|
||||
.filter(hasChildQuery("earl",
|
||||
hasChildQuery("baron", matchAllQuery(), ScoreMode.None).innerHit(new InnerHitBuilder().setName("barons")),
|
||||
ScoreMode.None).innerHit(new InnerHitBuilder()
|
||||
.addSort(SortBuilders.fieldSort("_uid").order(SortOrder.ASC))
|
||||
.setName("earls")
|
||||
.setSize(4))
|
||||
)
|
||||
)
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("duke"));
|
||||
|
@ -1086,25 +927,4 @@ public class InnerHitsIT extends ESIntegTestCase {
|
|||
assertHitCount(response, 1);
|
||||
}
|
||||
|
||||
public void testTopLevelInnerHitsWithQueryInnerHits() throws Exception {
|
||||
// top level inner hits shouldn't overwrite query inner hits definitions
|
||||
|
||||
assertAcked(prepareCreate("index1").addMapping("child", "_parent", "type=parent"));
|
||||
List<IndexRequestBuilder> requests = new ArrayList<>();
|
||||
requests.add(client().prepareIndex("index1", "parent", "1").setSource("{}"));
|
||||
requests.add(client().prepareIndex("index1", "child", "2").setParent("1").setSource("{}"));
|
||||
indexRandom(true, requests);
|
||||
|
||||
InnerHitsBuilder innerHitsBuilder = new InnerHitsBuilder();
|
||||
innerHitsBuilder.addInnerHit("my-inner-hit", new InnerHitBuilder().setParentChildType("child"));
|
||||
SearchResponse response = client().prepareSearch("index1")
|
||||
.setQuery(hasChildQuery("child", new MatchAllQueryBuilder(), ScoreMode.None).innerHit(new InnerHitBuilder()))
|
||||
.innerHits(innerHitsBuilder)
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().size(), equalTo(2));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("child").getAt(0).getId(), equalTo("2"));
|
||||
assertThat(response.getHits().getAt(0).getInnerHits().get("my-inner-hit").getAt(0).getId(), equalTo("2"));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ public class SnapshotUtilsTests extends ESTestCase {
|
|||
public void testIndexNameFiltering() {
|
||||
assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{}, new String[]{"foo", "bar", "baz"});
|
||||
assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"*"}, new String[]{"foo", "bar", "baz"});
|
||||
assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"_all"}, new String[]{"foo", "bar", "baz"});
|
||||
assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"}, new String[]{"foo", "bar", "baz"});
|
||||
assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"foo"}, new String[]{"foo"});
|
||||
assertIndexNameFiltering(new String[]{"foo", "bar", "baz"}, new String[]{"baz", "not_available"}, new String[]{"baz"});
|
||||
|
|
|
@ -32,7 +32,8 @@ public abstract class ESThreadPoolTestCase extends ESTestCase {
|
|||
return info;
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException(name);
|
||||
assert "same".equals(name);
|
||||
return null;
|
||||
}
|
||||
|
||||
protected final ThreadPoolStats.Stats stats(final ThreadPool threadPool, final String name) {
|
||||
|
|
|
@ -193,7 +193,6 @@ public class ScalingThreadPoolTests extends ESThreadPoolTestCase {
|
|||
} catch (InterruptedException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
assertThat(stats(threadPool, threadPoolName).getCompleted(), equalTo(128L));
|
||||
}));
|
||||
}
|
||||
|
||||
|
|
|
@ -111,9 +111,9 @@ def wait_for_node_startup(es_dir, timeout=60, header={}):
|
|||
conn = None
|
||||
try:
|
||||
time.sleep(1)
|
||||
host, port = get_host_from_ports_file(es_dir)
|
||||
conn = HTTPConnection(host=host, port=port, timeout=timeout)
|
||||
conn.request('GET', '', headers=header)
|
||||
host = get_host_from_ports_file(es_dir)
|
||||
conn = HTTPConnection(host, timeout=1)
|
||||
conn.request('GET', '/', headers=header)
|
||||
res = conn.getresponse()
|
||||
if res.status == 200:
|
||||
return True
|
||||
|
@ -160,7 +160,7 @@ def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS)
|
|||
# way we keep the executing host unmodified since we don't have to import the key into the default keystore
|
||||
gpg_home_dir = os.path.join(current_artifact_dir, "gpg_home_dir")
|
||||
os.makedirs(gpg_home_dir, 0o700)
|
||||
run('gpg --homedir %s --keyserver pgp.mit.edu --recv-key D88E42B4' % gpg_home_dir)
|
||||
run('gpg --homedir %s --keyserver pool.sks-keyservers.net --recv-key D88E42B4' % gpg_home_dir)
|
||||
run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file)))
|
||||
print(' ' + '*' * 80)
|
||||
print()
|
||||
|
@ -170,9 +170,7 @@ def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS)
|
|||
shutil.rmtree(tmp_dir)
|
||||
|
||||
def get_host_from_ports_file(es_dir):
|
||||
first_host_with_port = read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
|
||||
host = urlparse('http://%s' % first_host_with_port)
|
||||
return host.hostname, host.port
|
||||
return read_fully(os.path.join(es_dir, 'logs/http.ports')).splitlines()[0]
|
||||
|
||||
def smoke_test_release(release, files, expected_hash, plugins):
|
||||
for release_file in files:
|
||||
|
@ -199,7 +197,7 @@ def smoke_test_release(release, files, expected_hash, plugins):
|
|||
headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") }
|
||||
es_shield_path = os.path.join(es_dir, 'bin/x-pack/users')
|
||||
print(" Install dummy shield user")
|
||||
run('%s; %s useradd es_admin -r admin -p foobar' % (java_exe(), es_shield_path))
|
||||
run('%s; %s useradd es_admin -r superuser -p foobar' % (java_exe(), es_shield_path))
|
||||
else:
|
||||
headers = {}
|
||||
print(' Starting elasticsearch deamon from [%s]' % es_dir)
|
||||
|
@ -214,9 +212,9 @@ def smoke_test_release(release, files, expected_hash, plugins):
|
|||
print('*' * 80)
|
||||
raise RuntimeError('server didn\'t start up')
|
||||
try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins
|
||||
host,port = get_host_from_ports_file(es_dir)
|
||||
conn = HTTPConnection(host=host, port=port, timeout=20)
|
||||
conn.request('GET', '', headers=headers)
|
||||
host = get_host_from_ports_file(es_dir)
|
||||
conn = HTTPConnection(host, timeout=20)
|
||||
conn.request('GET', '/', headers=headers)
|
||||
res = conn.getresponse()
|
||||
if res.status == 200:
|
||||
version = json.loads(res.read().decode("utf-8"))['version']
|
||||
|
|
|
@ -51,7 +51,7 @@ Note that you have to set the cluster name if you use one different than
|
|||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
Settings settings = Settings.settingsBuilder()
|
||||
Settings settings = Settings.builder()
|
||||
.put("cluster.name", "myClusterName").build();
|
||||
Client client = TransportClient.builder().settings(settings).build();
|
||||
//Add transport addresses and do something with the client...
|
||||
|
|
|
@ -361,12 +361,14 @@ are:
|
|||
`m`:: Minute
|
||||
`s`:: Second
|
||||
`ms`:: Milli-second
|
||||
`micros`:: Micro-second
|
||||
`nanos`:: Nano-second
|
||||
|
||||
[[size-units]]
|
||||
[[byte-units]]
|
||||
[float]
|
||||
=== Data size units
|
||||
=== Byte size units
|
||||
|
||||
Whenever the size of data needs to be specified, eg when setting a buffer size
|
||||
Whenever the byte size of data needs to be specified, eg when setting a buffer size
|
||||
parameter, the value must specify the unit, like `10kb` for 10 kilobytes. The
|
||||
supported units are:
|
||||
|
||||
|
@ -378,6 +380,23 @@ supported units are:
|
|||
`tb`:: Terabytes
|
||||
`pb`:: Petabytes
|
||||
|
||||
[[size-units]]
|
||||
[float]
|
||||
=== Unit-less quantities
|
||||
|
||||
Unit-less quantities means that they don't have a "unit" like "bytes" or "Hertz" or "meter" or "long tonne".
|
||||
|
||||
If one of these quantities is large we'll print it out like 10m for 10,000,000 or 7k for 7,000. We'll still print 87
|
||||
when we mean 87 though. These are the supported multipliers:
|
||||
|
||||
[horizontal]
|
||||
``:: Single
|
||||
`k`:: Kilo
|
||||
`m`:: Mega
|
||||
`g`:: Giga
|
||||
`t`:: Tera
|
||||
`p`:: Peta
|
||||
|
||||
[[distance-units]]
|
||||
[float]
|
||||
=== Distance Units
|
||||
|
|
|
@ -74,8 +74,8 @@ with `bulk.`.
|
|||
[[numeric-formats]]
|
||||
=== Numeric formats
|
||||
|
||||
Many commands provide a few types of numeric output, either a byte
|
||||
value or a time value. By default, these types are human-formatted,
|
||||
Many commands provide a few types of numeric output, either a byte, size
|
||||
or a time value. By default, these types are human-formatted,
|
||||
for example, `3.5mb` instead of `3763212`. The human values are not
|
||||
sortable numerically, so in order to operate on these values where
|
||||
order is important, you can change it.
|
||||
|
@ -95,6 +95,12 @@ green wiki1 3 0 10000 413 103776272 103776272
|
|||
green foo 1 0 227 0 2065131 2065131
|
||||
--------------------------------------------------
|
||||
|
||||
If you want to change the <<time-units,time units>>, use `time` parameter.
|
||||
|
||||
If you want to change the <<size-units,size units>>, use `size` parameter.
|
||||
|
||||
If you want to change the <<byte-units,byte units>>, use `bytes` parameter.
|
||||
|
||||
[float]
|
||||
=== Response as text, json, smile, yaml or cbor
|
||||
|
||||
|
|
|
@ -34,15 +34,21 @@ The response looks like:
|
|||
"reason" : "INDEX_CREATED", <2>
|
||||
"at" : "2016-03-22T20:04:23.620Z"
|
||||
},
|
||||
"nodes" : { <3>
|
||||
"allocation_delay_ms" : 0, <3>
|
||||
"remaining_delay_ms" : 0, <4>
|
||||
"nodes" : {
|
||||
"V-Spi0AyRZ6ZvKbaI3691w" : {
|
||||
"node_name" : "node1",
|
||||
"node_attributes" : { <4>
|
||||
"node_attributes" : { <5>
|
||||
"bar" : "baz"
|
||||
},
|
||||
"final_decision" : "NO", <5>
|
||||
"weight" : 0.06666675, <6>
|
||||
"decisions" : [ { <7>
|
||||
"store" : {
|
||||
"shard_copy" : "NONE" <6>
|
||||
},
|
||||
"final_decision" : "NO", <7>
|
||||
"final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
"weight" : 0.06666675, <8>
|
||||
"decisions" : [ { <9>
|
||||
"decider" : "filter",
|
||||
"decision" : "NO",
|
||||
"explanation" : "node does not match index include filters [foo:\"bar\"]"
|
||||
|
@ -54,7 +60,11 @@ The response looks like:
|
|||
"bar" : "baz",
|
||||
"foo" : "bar"
|
||||
},
|
||||
"store" : {
|
||||
"shard_copy" : "AVAILABLE"
|
||||
},
|
||||
"final_decision" : "NO",
|
||||
"final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
"weight" : -1.3833332,
|
||||
"decisions" : [ {
|
||||
"decider" : "same_shard",
|
||||
|
@ -65,7 +75,11 @@ The response looks like:
|
|||
"PzdyMZGXQdGhqTJHF_hGgA" : {
|
||||
"node_name" : "node3",
|
||||
"node_attributes" : { },
|
||||
"store" : {
|
||||
"shard_copy" : "NONE"
|
||||
},
|
||||
"final_decision" : "NO",
|
||||
"final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
"weight" : 2.3166666,
|
||||
"decisions" : [ {
|
||||
"decider" : "filter",
|
||||
|
@ -78,11 +92,13 @@ The response looks like:
|
|||
--------------------------------------------------
|
||||
<1> Whether the shard is assigned or unassigned
|
||||
<2> Reason for the shard originally becoming unassigned
|
||||
<3> List of node decisions about the shard
|
||||
<4> User-added attributes the node has
|
||||
<5> Final decision for whether the shard is allowed to be allocated to this node
|
||||
<6> Weight for how much the allocator would like to allocate the shard to this node
|
||||
<7> List of decisions factoring into final decision
|
||||
<3> Configured delay before the shard can be allocated
|
||||
<4> Remaining delay before the shard can be allocated
|
||||
<5> User-added attributes the node has
|
||||
<6> The shard copy information for this node and error (if applicable)
|
||||
<7> Final decision and explanation of whether the shard can be allocated to this node
|
||||
<8> Weight for how much the allocator would like to allocate the shard to this node
|
||||
<9> List of node decisions factoring into final decision about the shard
|
||||
|
||||
For a shard that is already assigned, the output looks similar to:
|
||||
|
||||
|
@ -97,13 +113,19 @@ For a shard that is already assigned, the output looks similar to:
|
|||
},
|
||||
"assigned" : true,
|
||||
"assigned_node_id" : "Qc6VL8c5RWaw1qXZ0Rg57g", <1>
|
||||
"allocation_delay_ms" : 0,
|
||||
"remaining_delay_ms" : 0,
|
||||
"nodes" : {
|
||||
"V-Spi0AyRZ6ZvKbaI3691w" : {
|
||||
"node_name" : "Susan Storm",
|
||||
"node_attributes" : {
|
||||
"bar" : "baz"
|
||||
},
|
||||
"store" : {
|
||||
"shard_copy" : "NONE"
|
||||
},
|
||||
"final_decision" : "NO",
|
||||
"final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
"weight" : 1.4499999,
|
||||
"decisions" : [ {
|
||||
"decider" : "filter",
|
||||
|
@ -117,7 +139,11 @@ For a shard that is already assigned, the output looks similar to:
|
|||
"bar" : "baz",
|
||||
"foo" : "bar"
|
||||
},
|
||||
"final_decision" : "CURRENTLY_ASSIGNED", <2>
|
||||
"store" : {
|
||||
"shard_copy" : "AVAILABLE"
|
||||
},
|
||||
"final_decision" : "ALREADY_ASSIGNED", <2>
|
||||
"final_explanation" : "the shard is already assigned to this node",
|
||||
"weight" : 0.0,
|
||||
"decisions" : [ {
|
||||
"decider" : "same_shard",
|
||||
|
@ -128,7 +154,11 @@ For a shard that is already assigned, the output looks similar to:
|
|||
"PzdyMZGXQdGhqTJHF_hGgA" : {
|
||||
"node_name" : "The Symbiote",
|
||||
"node_attributes" : { },
|
||||
"store" : {
|
||||
"shard_copy" : "NONE"
|
||||
},
|
||||
"final_decision" : "NO",
|
||||
"final_explanation" : "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision",
|
||||
"weight" : 3.6999998,
|
||||
"decisions" : [ {
|
||||
"decider" : "filter",
|
||||
|
@ -140,7 +170,7 @@ For a shard that is already assigned, the output looks similar to:
|
|||
}
|
||||
--------------------------------------------------
|
||||
<1> Node the shard is currently assigned to
|
||||
<2> The decision is "CURRENTLY_ASSIGNED" because the shard is currently assigned to this node
|
||||
<2> The decision is "ALREADY_ASSIGNED" because the shard is currently assigned to this node
|
||||
|
||||
You can also have Elasticsearch explain the allocation of the first unassigned
|
||||
shard it finds by sending an empty body, such as:
|
||||
|
|
|
@ -8,9 +8,8 @@ The store module allows you to control how index data is stored and accessed on
|
|||
=== File system storage types
|
||||
|
||||
There are different file system implementations or _storage types_. The best
|
||||
one for the operating environment will be automatically chosen: `mmapfs` on
|
||||
Windows 64bit, `simplefs` on Windows 32bit, and `default` (hybrid `niofs` and
|
||||
`mmapfs`) for the rest.
|
||||
one for the operating environment will be automatically chosen: `simplefs` on
|
||||
Windows 32bit, `niofs` on other 32bit systems and `mmapfs` on 64bit systems.
|
||||
|
||||
This can be overridden for all indices by adding this to the
|
||||
`config/elasticsearch.yml` file:
|
||||
|
@ -61,12 +60,13 @@ process equal to the size of the file being mapped. Before using this
|
|||
class, be sure you have allowed plenty of
|
||||
<<vm-max-map-count,virtual address space>>.
|
||||
|
||||
[[default_fs]]`default_fs`::
|
||||
[[default_fs]]`default_fs` deprecated[5.0.0, The `default_fs` store type is deprecated - use `mmapfs` instead]::
|
||||
|
||||
The `default` type is a hybrid of NIO FS and MMapFS, which chooses the best
|
||||
file system for each type of file. Currently only the Lucene term dictionary
|
||||
and doc values files are memory mapped to reduce the impact on the operating
|
||||
system. All other files are opened using Lucene `NIOFSDirectory`. Address
|
||||
space settings (<<vm-max-map-count>>) might also apply if your term
|
||||
dictionaries are large.
|
||||
file system for each type of file. Currently only the Lucene term dictionary,
|
||||
doc values and points files are memory mapped to reduce the impact on the
|
||||
operating system. All other files are opened using Lucene `NIOFSDirectory`.
|
||||
Address space settings (<<vm-max-map-count>>) might also apply if your term
|
||||
dictionary are large, if you index many fields that use points (numerics, dates
|
||||
and ip addresses) or if you have many fields with doc values.
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[elasticsearch-reference]]
|
||||
= Elasticsearch Reference
|
||||
|
||||
:version: 5.0.0-alpha1
|
||||
:version: 5.0.0-alpha2
|
||||
:major-version: 5.x
|
||||
:branch: master
|
||||
:jdk: 1.8.0_73
|
||||
|
|
|
@ -14,13 +14,14 @@ associated with it.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ curl -XPUT 'http://localhost:9200/twitter/'
|
||||
|
||||
$ curl -XPUT 'http://localhost:9200/twitter/' -d '
|
||||
index :
|
||||
number_of_shards : 3 <1>
|
||||
number_of_replicas : 2 <2>
|
||||
'
|
||||
$ curl -XPUT 'http://localhost:9200/twitter/' -d '{
|
||||
"settings" : {
|
||||
"index" : {
|
||||
"number_of_shards" : 3 <1>
|
||||
"number_of_replicas" : 2 <2>
|
||||
}
|
||||
}
|
||||
}'
|
||||
--------------------------------------------------
|
||||
<1> Default for `number_of_shards` is 5
|
||||
<2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard)
|
||||
|
|
|
@ -739,6 +739,67 @@ Here is an example that adds the parsed date to the `timestamp` field based on t
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
[[date-index-name-processor]]
|
||||
=== Date Index Name Processor
|
||||
|
||||
The purpose of this processor is to point documents to the right time based index based
|
||||
on a date or timestamp field in a document by using the <<date-math-index-names, date math index name support>>.
|
||||
|
||||
The processor sets the `_index` meta field with a date math index name expression based on the provided index name
|
||||
prefix, a date or timestamp field in the documents being processed and the provided date rounding.
|
||||
|
||||
First this processor fetches the date or timestamp from a field in the document being processed. Optionally
|
||||
date formatting can be configured on how the field's value should be parsed into a date. Then this date,
|
||||
the provided index name prefix and the provided date rounding get formatted into a date math index name expression.
|
||||
Also here optionally date formatting can be specified on how the date should be formatted into a date math index name
|
||||
expression.
|
||||
|
||||
An example pipeline that points documents to a monthly index that starts with a `myindex-` prefix based on a
|
||||
date in the `date1` field:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/1
|
||||
{
|
||||
"processors" : [
|
||||
{
|
||||
"date_index_name" : {
|
||||
"field" : "date1",
|
||||
"index_name_prefix" : "myindex-",
|
||||
"date_rounding" : "m"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Using that pipeline for an index request:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /myindex/type/1?pipeline=1
|
||||
{
|
||||
"date1" : "2016-04-25T12:02:01.789Z"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
The above request will not index this document into the `myindex` index, but into the `myindex-2016-04-01` index.
|
||||
This is because the date is being rounded by month.
|
||||
|
||||
[[date-index-name-options]]
|
||||
.Date index name options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to get the date or timestamp from.
|
||||
| `index_name_prefix` | no | - | A prefix of the index name to be prepended before the printed date.
|
||||
| `date_rounding` | yes | - | How to round the date when formatting the date into the index name. Valid values are: `y` (year), `m` (month), `w` (week), `d` (day), `h` (hour), `m` (minute) and `s` (second).
|
||||
| `date_formats ` | no | yyyy-MM-dd'T'HH:mm:ss.SSSZ | An array of the expected date formats for parsing dates / timestamps in the document being preprocessed. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N.
|
||||
| `timezone` | no | UTC | The timezone to use when parsing the date and when date math index supports resolves expressions into concrete index names.
|
||||
| `locale` | no | ENGLISH | The locale to use when parsing the date from the document being preprocessed, relevant when parsing month names or week days.
|
||||
| `index_name_format` | no | yyyy-MM-dd | The format to be used when printing the parsed date into the index name. An valid Joda pattern is expected here.
|
||||
|======
|
||||
|
||||
[[fail-processor]]
|
||||
=== Fail Processor
|
||||
Raises an exception. This is useful for when
|
||||
|
@ -1179,6 +1240,7 @@ its value will be replaced with the provided one.
|
|||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The field to insert, upsert, or update
|
||||
| `value` | yes | - | The value to be set for the field
|
||||
| `override`| no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched.
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
|
|
|
@ -6,9 +6,10 @@ Each document indexed is associated with a <<mapping-type-field,`_type`>> (see
|
|||
indexed as its value can be derived automatically from the
|
||||
<<mapping-uid-field,`_uid`>> field.
|
||||
|
||||
The value of the `_id` field is accessible in queries and scripts, but _not_
|
||||
in aggregations or when sorting, where the <<mapping-uid-field,`_uid`>> field
|
||||
should be used instead:
|
||||
The value of the `_id` field is accessible in certain queries (`term`,
|
||||
`terms`, `match`, `query_string`, `simple_query_string`) and scripts, but
|
||||
_not_ in aggregations or when sorting, where the <<mapping-uid-field,`_uid`>>
|
||||
field should be used instead:
|
||||
|
||||
[source,js]
|
||||
--------------------------
|
||||
|
|
|
@ -30,7 +30,7 @@ PUT my_index
|
|||
--------------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> The `title` and `content` fields with be included in the `_all` field.
|
||||
<1> The `title` and `content` fields will be included in the `_all` field.
|
||||
<2> The `date` field will not be included in the `_all` field.
|
||||
|
||||
TIP: The `include_in_all` setting is allowed to have different settings for
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue