Merge branch 'master' into settings_prototype

This commit is contained in:
Simon Willnauer 2015-12-18 09:15:58 +01:00
commit eca2435838
273 changed files with 4662 additions and 900 deletions
TESTING.asciidocbuild.gradle
buildSrc/src/main/groovy/org/elasticsearch/gradle
core
build.gradle
src
main/java/org/elasticsearch
Version.java
action
cluster
common
env
gateway
index
monitor/jvm
percolator
search
test

@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command.
REST tests use the following command: REST tests use the following command:
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
gradle :distribution:tar:integTest \ gradle :distribution:integ-test-zip:integTest \
-Dtests.class=org.elasticsearch.test.rest.RestIT -Dtests.class=org.elasticsearch.test.rest.RestIT
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
A specific test case can be run with A specific test case can be run with
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
gradle :distribution:tar:integTest \ gradle :distribution:integ-test-zip:integTest \
-Dtests.class=org.elasticsearch.test.rest.RestIT \ -Dtests.class=org.elasticsearch.test.rest.RestIT \
-Dtests.method="test {p0=cat.shards/10_basic/Help}" -Dtests.method="test {p0=cat.shards/10_basic/Help}"
--------------------------------------------------------------------------- ---------------------------------------------------------------------------

@ -109,7 +109,7 @@ subprojects {
ext.projectSubstitutions = [ ext.projectSubstitutions = [
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':core', "org.elasticsearch:elasticsearch:${version}": ':core',
"org.elasticsearch:test-framework:${version}": ':test-framework', "org.elasticsearch.test:framework:${version}": ':test:framework',
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
@ -141,8 +141,8 @@ subprojects {
// the dependency is added. // the dependency is added.
gradle.projectsEvaluated { gradle.projectsEvaluated {
allprojects { allprojects {
if (project.path == ':test-framework') { if (project.path == ':test:framework') {
// :test-framework:test cannot run before and after :core:test // :test:framework:test cannot run before and after :core:test
return return
} }
configurations.all { configurations.all {

@ -202,7 +202,7 @@ class BuildPlugin implements Plugin<Project> {
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself // force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
Closure disableTransitiveDeps = { ModuleDependency dep -> Closure disableTransitiveDeps = { ModuleDependency dep ->
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') { if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
dep.transitive = false dep.transitive = false
// also create a configuration just for this dependency version, so that later // also create a configuration just for this dependency version, so that later
@ -302,6 +302,7 @@ class BuildPlugin implements Plugin<Project> {
options.compilerArgs << '-profile' << project.compactProfile options.compilerArgs << '-profile' << project.compactProfile
} }
options.encoding = 'UTF-8' options.encoding = 'UTF-8'
//options.incremental = true
} }
} }
} }

@ -60,7 +60,7 @@ public class PluginBuildPlugin extends BuildPlugin {
private static void configureDependencies(Project project) { private static void configureDependencies(Project project) {
project.dependencies { project.dependencies {
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}" testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
// we "upgrade" these optional deps to provided for plugins, since they will run // we "upgrade" these optional deps to provided for plugins, since they will run
// with a full elasticsearch server that includes optional deps // with a full elasticsearch server that includes optional deps
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"

@ -98,7 +98,7 @@ public class ThirdPartyAuditTask extends DefaultTask {
// we only want third party dependencies. // we only want third party dependencies.
FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> FileCollection jars = project.configurations.testCompile.fileCollection({ dependency ->
dependency.group != "org.elasticsearch" dependency.group.startsWith("org.elasticsearch") == false
}) })
// we don't want provided dependencies, which we have already scanned. e.g. don't // we don't want provided dependencies, which we have already scanned. e.g. don't

@ -42,7 +42,7 @@ public class StandaloneTestBasePlugin implements Plugin<Project> {
// only setup tests to build // only setup tests to build
project.sourceSets.create('test') project.sourceSets.create('test')
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}") project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}")
project.eclipse.classpath.sourceSets = [project.sourceSets.test] project.eclipse.classpath.sourceSets = [project.sourceSets.test]
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]

@ -82,7 +82,7 @@ dependencies {
compile "net.java.dev.jna:jna:${versions.jna}", optional compile "net.java.dev.jna:jna:${versions.jna}", optional
if (isEclipse == false || project.path == ":core-tests") { if (isEclipse == false || project.path == ":core-tests") {
testCompile("org.elasticsearch:test-framework:${version}") { testCompile("org.elasticsearch.test:framework:${version}") {
// tests use the locally compiled version of core // tests use the locally compiled version of core
exclude group: 'org.elasticsearch', module: 'elasticsearch' exclude group: 'org.elasticsearch', module: 'elasticsearch'
} }

@ -268,11 +268,15 @@ public class Version {
public static final int V_2_0_1_ID = 2000199; public static final int V_2_0_1_ID = 2000199;
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_2_ID = 2000299; public static final int V_2_0_2_ID = 2000299;
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_3_ID = 2000399;
public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_1_0_ID = 2010099; public static final int V_2_1_0_ID = 2010099;
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_1_ID = 2010199; public static final int V_2_1_1_ID = 2010199;
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_2_ID = 2010299;
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099; public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_3_0_0_ID = 3000099; public static final int V_3_0_0_ID = 3000099;
@ -293,10 +297,14 @@ public class Version {
return V_3_0_0; return V_3_0_0;
case V_2_2_0_ID: case V_2_2_0_ID:
return V_2_2_0; return V_2_2_0;
case V_2_1_2_ID:
return V_2_1_2;
case V_2_1_1_ID: case V_2_1_1_ID:
return V_2_1_1; return V_2_1_1;
case V_2_1_0_ID: case V_2_1_0_ID:
return V_2_1_0; return V_2_1_0;
case V_2_0_3_ID:
return V_2_0_3;
case V_2_0_2_ID: case V_2_0_2_ID:
return V_2_0_2; return V_2_0_2;
case V_2_0_1_ID: case V_2_0_1_ID:

@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> { public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
private DiscoveryNode node; private DiscoveryNode node;
private long version; private long version;
private String allocationId;
private Throwable storeException; private Throwable storeException;
private Allocation allocation; private AllocationStatus allocationStatus;
/** /**
* The status of the shard store with respect to the cluster * The status of the shard store with respect to the cluster
*/ */
public enum Allocation { public enum AllocationStatus {
/** /**
* Allocated as primary * Allocated as primary
@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private final byte id; private final byte id;
Allocation(byte id) { AllocationStatus(byte id) {
this.id = id; this.id = id;
} }
private static Allocation fromId(byte id) { private static AllocationStatus fromId(byte id) {
switch (id) { switch (id) {
case 0: return PRIMARY; case 0: return PRIMARY;
case 1: return REPLICA; case 1: return REPLICA;
case 2: return UNUSED; case 2: return UNUSED;
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
} }
} }
@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
case 0: return "primary"; case 0: return "primary";
case 1: return "replica"; case 1: return "replica";
case 2: return "unused"; case 2: return "unused";
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
} }
} }
private static Allocation readFrom(StreamInput in) throws IOException { private static AllocationStatus readFrom(StreamInput in) throws IOException {
return fromId(in.readByte()); return fromId(in.readByte());
} }
@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private StoreStatus() { private StoreStatus() {
} }
public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) { public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
this.node = node; this.node = node;
this.version = version; this.version = version;
this.allocation = allocation; this.allocationId = allocationId;
this.allocationStatus = allocationStatus;
this.storeException = storeException; this.storeException = storeException;
} }
@ -130,13 +132,20 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} }
/** /**
* Version of the store, used to select the store that will be * Version of the store
* used as a primary.
*/ */
public long getVersion() { public long getVersion() {
return version; return version;
} }
/**
* AllocationStatus id of the store, used to select the store that will be
* used as a primary.
*/
public String getAllocationId() {
return allocationId;
}
/** /**
* Exception while trying to open the * Exception while trying to open the
* shard index or from when the shard failed * shard index or from when the shard failed
@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} }
/** /**
* The allocation status of the store. * The allocationStatus status of the store.
* {@link Allocation#PRIMARY} indicates a primary shard copy * {@link AllocationStatus#PRIMARY} indicates a primary shard copy
* {@link Allocation#REPLICA} indicates a replica shard copy * {@link AllocationStatus#REPLICA} indicates a replica shard copy
* {@link Allocation#UNUSED} indicates an unused shard copy * {@link AllocationStatus#UNUSED} indicates an unused shard copy
*/ */
public Allocation getAllocation() { public AllocationStatus getAllocationStatus() {
return allocation; return allocationStatus;
} }
static StoreStatus readStoreStatus(StreamInput in) throws IOException { static StoreStatus readStoreStatus(StreamInput in) throws IOException {
@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in); node = DiscoveryNode.readNode(in);
version = in.readLong(); version = in.readLong();
allocation = Allocation.readFrom(in); allocationId = in.readOptionalString();
allocationStatus = AllocationStatus.readFrom(in);
if (in.readBoolean()) { if (in.readBoolean()) {
storeException = in.readThrowable(); storeException = in.readThrowable();
} }
@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out); node.writeTo(out);
out.writeLong(version); out.writeLong(version);
allocation.writeTo(out); out.writeOptionalString(allocationId);
allocationStatus.writeTo(out);
if (storeException != null) { if (storeException != null) {
out.writeBoolean(true); out.writeBoolean(true);
out.writeThrowable(storeException); out.writeThrowable(storeException);
@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
node.toXContent(builder, params); node.toXContent(builder, params);
builder.field(Fields.VERSION, version); builder.field(Fields.VERSION, version);
builder.field(Fields.ALLOCATED, allocation.value()); builder.field(Fields.ALLOCATION_ID, allocationId);
builder.field(Fields.ALLOCATED, allocationStatus.value());
if (storeException != null) { if (storeException != null) {
builder.startObject(Fields.STORE_EXCEPTION); builder.startObject(Fields.STORE_EXCEPTION);
ElasticsearchException.toXContent(builder, params, storeException); ElasticsearchException.toXContent(builder, params, storeException);
@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} else { } else {
int compare = Long.compare(other.version, version); int compare = Long.compare(other.version, version);
if (compare == 0) { if (compare == 0) {
return Integer.compare(allocation.id, other.allocation.id); return Integer.compare(allocationStatus.id, other.allocationStatus.id);
} }
return compare; return compare;
} }
@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
static final XContentBuilderString STORES = new XContentBuilderString("stores"); static final XContentBuilderString STORES = new XContentBuilderString("stores");
// StoreStatus fields // StoreStatus fields
static final XContentBuilderString VERSION = new XContentBuilderString("version"); static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception"); static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation"); static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");
} }

@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
} }
for (NodeGatewayStartedShards response : fetchResponse.responses) { for (NodeGatewayStartedShards response : fetchResponse.responses) {
if (shardExistsInNode(response)) { if (shardExistsInNode(response)) {
IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException())); storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
} }
} }
CollectionUtil.timSort(storeStatuses); CollectionUtil.timSort(storeStatuses);
@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
} }
private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
for (ShardRouting shardRouting : routingNodes.node(node.id())) { for (ShardRouting shardRouting : routingNodes.node(node.id())) {
ShardId shardId = shardRouting.shardId(); ShardId shardId = shardRouting.shardId();
if (shardId.id() == shardID && shardId.getIndex().equals(index)) { if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
if (shardRouting.primary()) { if (shardRouting.primary()) {
return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
} else if (shardRouting.assignedToNode()) { } else if (shardRouting.assignedToNode()) {
return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA;
} else { } else {
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
} }
} }
} }
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
} }
/** /**
* A shard exists/existed in a node only if shard state file exists in the node * A shard exists/existed in a node only if shard state file exists in the node
*/ */
private boolean shardExistsInNode(final NodeGatewayStartedShards response) { private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
return response.storeException() != null || response.version() != -1; return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
} }
@Override @Override

@ -78,7 +78,7 @@ public class BulkProcessor implements Closeable {
private int bulkActions = 1000; private int bulkActions = 1000;
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
private TimeValue flushInterval = null; private TimeValue flushInterval = null;
private BackoffPolicy backoffPolicy = BackoffPolicy.noBackoff(); private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
/** /**
* Creates a builder of bulk processor with the client to use and the listener that will be used * Creates a builder of bulk processor with the client to use and the listener that will be used
@ -140,7 +140,9 @@ public class BulkProcessor implements Closeable {
* Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally
* in case they have failed due to resource constraints (i.e. a thread pool was full). * in case they have failed due to resource constraints (i.e. a thread pool was full).
* *
* The default is to not back off, i.e. failing immediately. * The default is to back off exponentially.
*
* @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff()
*/ */
public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) {
if (backoffPolicy == null) { if (backoffPolicy == null) {
@ -162,7 +164,7 @@ public class BulkProcessor implements Closeable {
if (client == null) { if (client == null) {
throw new NullPointerException("The client you specified while building a BulkProcessor is null"); throw new NullPointerException("The client you specified while building a BulkProcessor is null");
} }
return new Builder(client, listener); return new Builder(client, listener);
} }

@ -20,7 +20,6 @@
package org.elasticsearch.action.percolate; package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;

@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
return this; return this;
} }
/**
* Should the query be profiled. Defaults to <code>false</code>
*/
public SearchRequestBuilder setProfile(boolean profile) {
sourceBuilder().profile(profile);
return this;
}
@Override @Override
public String toString() { public String toString() {
if (request.source() != null) { if (request.source() != null) {

@ -20,6 +20,7 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse; import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse;
@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
this.scrollId = scrollId; this.scrollId = scrollId;
} }
/**
* If profiling was enabled, this returns an object containing the profile results from
* each shard. If profiling was not enabled, this will return null
*
* @return The profile results or null
*/
public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() {
return internalResponse.profile();
}
static final class Fields { static final class Fields {
static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id"); static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id");
static final XContentBuilderString TOOK = new XContentBuilderString("took"); static final XContentBuilderString TOOK = new XContentBuilderString("took");

@ -621,7 +621,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public int numberOfReplicas() { public int numberOfReplicas() {
return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1); return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
} }
public Builder creationDate(long creationDate) { public Builder creationDate(long creationDate) {
settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build(); settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build();
return this; return this;

@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Locale;
/** /**
* Service responsible for submitting open/close index requests * Service responsible for submitting open/close index requests
@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent {
} }
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
for (IndexShardRoutingTable shard : indexRoutingTable) {
for (ShardRouting shardRouting : shard) {
if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) {
throw new IndexPrimaryShardNotAllocatedException(new Index(index));
}
}
}
indicesToClose.add(index); indicesToClose.add(index);
} }
} }

@ -236,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent {
} }
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
Map<String, DocumentMapper> newMappers = new HashMap<>(); String mappingType = request.type();
Map<String, DocumentMapper> existingMappers = new HashMap<>(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
for (String index : request.indices()) { for (String index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index); IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception // try and parse it (no need to add it here) so we can bail early in case of parsing exception
@ -245,9 +245,9 @@ public class MetaDataMappingService extends AbstractComponent {
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) { if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false);
} else { } else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) { if (existingMapper != null) {
// first, simulate // first, simulate
// this will just throw exceptions in case of problems // this will just throw exceptions in case of problems
@ -270,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent {
} }
} }
} }
newMappers.put(index, newMapper); if (mappingType == null) {
if (existingMapper != null) { mappingType = newMapper.type();
existingMappers.put(index, existingMapper); } else if (mappingType.equals(newMapper.type()) == false) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
} }
} }
assert mappingType != null;
String mappingType = request.type();
if (mappingType == null) {
mappingType = newMappers.values().iterator().next().type();
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
}
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
} }
final Map<String, MappingMetaData> mappings = new HashMap<>(); final Map<String, MappingMetaData> mappings = new HashMap<>();
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) { for (String index : request.indices()) {
String index = entry.getKey();
// do the actual merge here on the master, and update the mapping source // do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) {
continue; continue;
} }
CompressedXContent existingSource = null; CompressedXContent existingSource = null;
if (existingMappers.containsKey(entry.getKey())) { DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
existingSource = existingMappers.get(entry.getKey()).mappingSource(); if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
} }
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource(); CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) { if (existingSource != null) {
@ -318,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent {
} else { } else {
mappings.put(index, new MappingMetaData(mergedMapper)); mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) { } else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type()); logger.info("[{}] create_mapping [{}]", index, mappingType);
} }
} }
} }

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing; package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent {
return shardIdentifier; return shardIdentifier;
} }
public boolean allocatedPostIndexCreate() { public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
if (active()) { if (active()) {
return true; return true;
} }
@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent {
return false; return false;
} }
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
// when no shards with this id have ever been active for this index
return false;
}
return true; return true;
} }

@ -22,13 +22,13 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.ObjectLookupContainer;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
@ -330,7 +330,8 @@ public class DiskThresholdDecider extends AllocationDecider {
} }
// a flag for whether the primary shard has been previously allocated // a flag for whether the primary shard has been previously allocated
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(); IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
// checks for exact byte comparisons // checks for exact byte comparisons
if (freeBytes < freeBytesThresholdLow.bytes()) { if (freeBytes < freeBytesThresholdLow.bytes()) {

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.decider; package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -91,8 +92,8 @@ public class EnableAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
} }
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE); String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
final Allocation enable; final Allocation enable;
if (enableIndexValue != null) { if (enableIndexValue != null) {
enable = Allocation.parse(enableIndexValue); enable = Allocation.parse(enableIndexValue);
@ -105,7 +106,7 @@ public class EnableAllocationDecider extends AllocationDecider {
case NONE: case NONE:
return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
case NEW_PRIMARIES: case NEW_PRIMARIES:
if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) { if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
} else { } else {
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");

@ -52,7 +52,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
@ -357,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
timeInQueue = 0; timeInQueue = 0;
} }
pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue, pending.executing)); pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new Text(source), timeInQueue, pending.executing));
} }
return pendingClusterTasks; return pendingClusterTasks;
} }

@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
@ -256,13 +256,13 @@ public abstract class StreamInput extends InputStream {
if (length == -1) { if (length == -1) {
return null; return null;
} }
return new StringAndBytesText(readBytesReference(length)); return new Text(readBytesReference(length));
} }
public Text readText() throws IOException { public Text readText() throws IOException {
// use StringAndBytes so we can cache the string if its ever converted to it // use StringAndBytes so we can cache the string if its ever converted to it
int length = readInt(); int length = readInt();
return new StringAndBytesText(readBytesReference(length)); return new Text(readBytesReference(length));
} }
@Nullable @Nullable

@ -149,6 +149,10 @@ public final class AllTermQuery extends Query {
return null; return null;
} }
final TermState state = termStates.get(context.ord); final TermState state = termStates.get(context.ord);
if (state == null) {
// Term does not exist in this segment
return null;
}
termsEnum.seekExact(term.bytes(), state); termsEnum.seekExact(term.bytes(), state);
PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS);
assert docs != null; assert docs != null;

@ -1,82 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesReference;
/**
* A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}.
*/
public class BytesText implements Text {
private BytesReference bytes;
private int hash;
public BytesText(BytesReference bytes) {
this.bytes = bytes;
}
@Override
public boolean hasBytes() {
return true;
}
@Override
public BytesReference bytes() {
return bytes;
}
@Override
public boolean hasString() {
return false;
}
@Override
public String string() {
// TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
}
@Override
public String toString() {
return string();
}
@Override
public int hashCode() {
if (hash == 0) {
hash = bytes.hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
}

@ -1,111 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
/**
* Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
* the other is requests, caches the other one in a local reference so no additional conversion will be needed.
*/
public class StringAndBytesText implements Text {
public static final Text[] EMPTY_ARRAY = new Text[0];
public static Text[] convertFromStringArray(String[] strings) {
if (strings.length == 0) {
return EMPTY_ARRAY;
}
Text[] texts = new Text[strings.length];
for (int i = 0; i < strings.length; i++) {
texts[i] = new StringAndBytesText(strings[i]);
}
return texts;
}
private BytesReference bytes;
private String text;
private int hash;
public StringAndBytesText(BytesReference bytes) {
this.bytes = bytes;
}
public StringAndBytesText(String text) {
this.text = text;
}
@Override
public boolean hasBytes() {
return bytes != null;
}
@Override
public BytesReference bytes() {
if (bytes == null) {
bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
}
return bytes;
}
@Override
public boolean hasString() {
return text != null;
}
@Override
public String string() {
// TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
if (text == null) {
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
}
return text;
}
@Override
public String toString() {
return string();
}
@Override
public int hashCode() {
if (hash == 0) {
hash = bytes().hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
}

@ -1,94 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
/**
* A {@link String} only representation of the text. Will always convert to bytes on the fly.
*/
public class StringText implements Text {
public static final Text[] EMPTY_ARRAY = new Text[0];
public static Text[] convertFromStringArray(String[] strings) {
if (strings.length == 0) {
return EMPTY_ARRAY;
}
Text[] texts = new Text[strings.length];
for (int i = 0; i < strings.length; i++) {
texts[i] = new StringText(strings[i]);
}
return texts;
}
private final String text;
private int hash;
public StringText(String text) {
this.text = text;
}
@Override
public boolean hasBytes() {
return false;
}
@Override
public BytesReference bytes() {
return new BytesArray(text.getBytes(StandardCharsets.UTF_8));
}
@Override
public boolean hasString() {
return true;
}
@Override
public String string() {
return text;
}
@Override
public String toString() {
return string();
}
@Override
public int hashCode() {
// we use bytes here so we can be consistent with other text implementations
if (hash == 0) {
hash = bytes().hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
// we use bytes here so we can be consistent with other text implementations
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
}

@ -18,39 +18,101 @@
*/ */
package org.elasticsearch.common.text; package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
/** /**
* Text represents a (usually) long text data. We use this abstraction instead of {@link String} * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
* so we can represent it in a more optimized manner in memory as well as serializing it over the * the other is requests, caches the other one in a local reference so no additional conversion will be needed.
* network as well as converting it to json format.
*/ */
public interface Text extends Comparable<Text> { public final class Text implements Comparable<Text> {
public static final Text[] EMPTY_ARRAY = new Text[0];
public static Text[] convertFromStringArray(String[] strings) {
if (strings.length == 0) {
return EMPTY_ARRAY;
}
Text[] texts = new Text[strings.length];
for (int i = 0; i < strings.length; i++) {
texts[i] = new Text(strings[i]);
}
return texts;
}
private BytesReference bytes;
private String text;
private int hash;
public Text(BytesReference bytes) {
this.bytes = bytes;
}
public Text(String text) {
this.text = text;
}
/** /**
* Are bytes available without the need to be converted into bytes when calling {@link #bytes()}. * Whether a {@link BytesReference} view of the data is already materialized.
*/ */
boolean hasBytes(); public boolean hasBytes() {
return bytes != null;
}
/** /**
* The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()} * Returns a {@link BytesReference} view of the data.
*/ */
BytesReference bytes(); public BytesReference bytes() {
if (bytes == null) {
bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
}
return bytes;
}
/** /**
* Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}. * Whether a {@link String} view of the data is already materialized.
*/ */
boolean hasString(); public boolean hasString() {
return text != null;
}
/** /**
* Returns the string representation of the text, might be converted to a string on the fly. * Returns a {@link String} view of the data.
*/ */
String string(); public String string() {
if (text == null) {
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
}
return text;
}
/**
* Returns the string representation of the text, might be converted to a string on the fly.
*/
@Override @Override
String toString(); public String toString() {
return string();
}
@Override
public int hashCode() {
if (hash == 0) {
hash = bytes().hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
} }

@ -296,10 +296,10 @@ public class NodeEnvironment extends AbstractComponent implements Closeable {
} }
private void maybeLogHeapDetails() { private void maybeLogHeapDetails() {
ByteSizeValue maxHeapSize = JvmInfo.jvmInfo().getMem().getHeapMax(); JvmInfo jvmInfo = JvmInfo.jvmInfo();
Boolean usingCompressedOops = JvmInfo.jvmInfo().usingCompressedOops(); ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax();
String usingCompressedOopsStatus = usingCompressedOops == null ? "unknown" : Boolean.toString(usingCompressedOops); String useCompressedOops = jvmInfo.useCompressedOops();
logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, usingCompressedOopsStatus); logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops);
} }
private static String toString(Collection<String> items) { private static String toString(Collection<String> items) {

@ -20,6 +20,7 @@
package org.elasticsearch.gateway; package org.elasticsearch.gateway;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -30,8 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import java.util.*; import java.util.*;
import java.util.stream.Collectors;
/** /**
* The primary shard allocator allocates primary shard that were not created as * The primary shard allocator allocates primary shard that were not created as
@ -39,6 +42,7 @@ import java.util.*;
*/ */
public abstract class PrimaryShardAllocator extends AbstractComponent { public abstract class PrimaryShardAllocator extends AbstractComponent {
@Deprecated
public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards"; public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards";
private final String initialShards; private final String initialShards;
@ -56,13 +60,21 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) { while (unassignedIterator.hasNext()) {
ShardRouting shard = unassignedIterator.next(); final ShardRouting shard = unassignedIterator.next();
if (needToFindPrimaryCopy(shard) == false) { if (shard.primary() == false) {
continue; continue;
} }
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation); final IndexMetaData indexMetaData = metaData.index(shard.getIndex());
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList());
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
// when we create a fresh index
continue;
}
final AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
if (shardState.hasData() == false) { if (shardState.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard started state", shard); logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
allocation.setHasPendingAsyncFetch(); allocation.setHasPendingAsyncFetch();
@ -70,25 +82,50 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
continue; continue;
} }
IndexMetaData indexMetaData = metaData.index(shard.getIndex()); final Set<String> lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id());
Settings indexSettings = Settings.builder().put(settings).put(indexMetaData.getSettings()).build(); final boolean snapshotRestore = shard.restoreSource() != null;
final boolean recoverOnAnyNode = recoverOnAnyNode(indexSettings);
NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexSettings), allocation.getIgnoreNodes(shard.shardId()), shardState); final NodesAndVersions nodesAndVersions;
logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); final boolean enoughAllocationsFound;
if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) { if (lastActiveAllocationIds.isEmpty()) {
// if we are restoring this shard we still can allocate assert indexSettings.getIndexVersionCreated().before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new";
if (shard.restoreSource() == null) { // when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
if (snapshotRestore || recoverOnAnyNode) {
enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
} else {
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions);
}
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
} else {
assert lastActiveAllocationIds.isEmpty() == false;
// use allocation ids to select nodes
nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState);
enoughAllocationsFound = nodesAndVersions.allocationsFound > 0;
logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds);
}
if (enoughAllocationsFound == false){
if (snapshotRestore) {
// let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
} else if (recoverOnAnyNode) {
// let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
} else {
// we can't really allocate, so ignore it and continue // we can't really allocate, so ignore it and continue
unassignedIterator.removeAndIgnore(); unassignedIterator.removeAndIgnore();
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound); logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound);
} else {
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource());
} }
continue; continue;
} }
NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions); final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes);
if (nodesToAllocate.yesNodes.isEmpty() == false) { if (nodesToAllocate.yesNodes.isEmpty() == false) {
DiscoveryNode node = nodesToAllocate.yesNodes.get(0); DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
@ -109,63 +146,99 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} }
/** /**
* Does the shard need to find a primary copy? * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
* entries with matching allocation id are always at the front of the list.
*/ */
boolean needToFindPrimaryCopy(ShardRouting shard) { protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
if (shard.primary() == false) { Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
return false; List<DiscoveryNode> matchingNodes = new ArrayList<>();
List<DiscoveryNode> nonMatchingNodes = new ArrayList<>();
long highestVersion = -1;
for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) {
DiscoveryNode node = nodeShardState.getNode();
String allocationId = nodeShardState.allocationId();
if (ignoreNodes.contains(node.id())) {
continue;
}
if (nodeShardState.storeException() == null) {
if (allocationId == null && nodeShardState.version() != -1) {
// old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard
allocationId = "_n/a_";
}
logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId);
} else {
logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId);
allocationId = null;
}
if (allocationId != null) {
if (lastActiveAllocationIds.contains(allocationId)) {
matchingNodes.add(node);
highestVersion = Math.max(highestVersion, nodeShardState.version());
} else if (matchAnyShard) {
nonMatchingNodes.add(node);
highestVersion = Math.max(highestVersion, nodeShardState.version());
}
}
} }
// this is an API allocation, ignore since we know there is no data... List<DiscoveryNode> nodes = new ArrayList<>();
if (shard.allocatedPostIndexCreate() == false) { nodes.addAll(matchingNodes);
return false; nodes.addAll(nonMatchingNodes);
}
return true; if (logger.isTraceEnabled()) {
logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", ")));
}
return new NodesAndVersions(nodes, nodes.size(), highestVersion);
} }
private boolean isEnoughAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { /**
* used by old version-based allocation
*/
private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) {
// check if the counts meets the minimum set // check if the counts meets the minimum set
int requiredAllocation = 1; int requiredAllocation = 1;
// if we restore from a repository one copy is more then enough // if we restore from a repository one copy is more then enough
if (shard.restoreSource() == null) { try {
try { String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); if ("quorum".equals(initialShards)) {
if ("quorum".equals(initialShards)) { if (indexMetaData.getNumberOfReplicas() > 1) {
if (indexMetaData.getNumberOfReplicas() > 1) { requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
}
} else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
if (indexMetaData.getNumberOfReplicas() > 2) {
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
}
} else if ("one".equals(initialShards)) {
requiredAllocation = 1;
} else if ("full".equals(initialShards) || "all".equals(initialShards)) {
requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
} else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
if (indexMetaData.getNumberOfReplicas() > 1) {
requiredAllocation = indexMetaData.getNumberOfReplicas();
}
} else {
requiredAllocation = Integer.parseInt(initialShards);
} }
} catch (Exception e) { } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); if (indexMetaData.getNumberOfReplicas() > 2) {
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
}
} else if ("one".equals(initialShards)) {
requiredAllocation = 1;
} else if ("full".equals(initialShards) || "all".equals(initialShards)) {
requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
} else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
if (indexMetaData.getNumberOfReplicas() > 1) {
requiredAllocation = indexMetaData.getNumberOfReplicas();
}
} else {
requiredAllocation = Integer.parseInt(initialShards);
} }
} catch (Exception e) {
logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard);
} }
return nodesAndVersions.allocationsFound >= requiredAllocation; return nodesAndVersions.allocationsFound >= requiredAllocation;
} }
/** /**
* Based on the nodes and versions, build the list of yes/no/throttle nodes that the shard applies to. * Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders
*/ */
private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, NodesAndVersions nodesAndVersions) { private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List<DiscoveryNode> nodes) {
List<DiscoveryNode> yesNodes = new ArrayList<>(); List<DiscoveryNode> yesNodes = new ArrayList<>();
List<DiscoveryNode> throttledNodes = new ArrayList<>(); List<DiscoveryNode> throttledNodes = new ArrayList<>();
List<DiscoveryNode> noNodes = new ArrayList<>(); List<DiscoveryNode> noNodes = new ArrayList<>();
for (DiscoveryNode discoNode : nodesAndVersions.nodes) { for (DiscoveryNode discoNode : nodes) {
RoutingNode node = allocation.routingNodes().node(discoNode.id()); RoutingNode node = allocation.routingNodes().node(discoNode.id());
if (node == null) { if (node == null) {
continue; continue;
@ -184,9 +257,11 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} }
/** /**
* Builds a list of nodes and version * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version
* are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest
* version are always at the front of the list.
*/ */
NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean recoveryOnAnyNode, Set<String> ignoreNodes, NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) { AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> shardState) {
final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>(); final Map<DiscoveryNode, Long> nodesWithVersion = new HashMap<>();
int numberOfAllocationsFound = 0; int numberOfAllocationsFound = 0;
@ -208,20 +283,15 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
version = -1; version = -1;
} }
if (recoveryOnAnyNode) { if (version != -1) {
numberOfAllocationsFound++;
if (version > highestVersion) {
highestVersion = version;
}
// We always put the node without clearing the map
nodesWithVersion.put(node, version);
} else if (version != -1) {
numberOfAllocationsFound++; numberOfAllocationsFound++;
// If we've found a new "best" candidate, clear the // If we've found a new "best" candidate, clear the
// current candidates and add it // current candidates and add it
if (version > highestVersion) { if (version > highestVersion) {
highestVersion = version; highestVersion = version;
nodesWithVersion.clear(); if (matchAnyShard == false) {
nodesWithVersion.clear();
}
nodesWithVersion.put(node, version); nodesWithVersion.put(node, version);
} else if (version == highestVersion) { } else if (version == highestVersion) {
// If the candidate is the same, add it to the // If the candidate is the same, add it to the
@ -258,9 +328,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* Return {@code true} if the index is configured to allow shards to be * Return {@code true} if the index is configured to allow shards to be
* recovered on any node * recovered on any node
*/ */
private boolean recoverOnAnyNode(Settings idxSettings) { private boolean recoverOnAnyNode(IndexSettings indexSettings) {
return IndexMetaData.isOnSharedFilesystem(idxSettings) && return indexSettings.isOnSharedFilesystem()
idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); && indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false);
} }
protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation); protected abstract AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);

@ -24,6 +24,8 @@ import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor; import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingNodes;
@ -56,6 +58,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
*/ */
public boolean processExistingRecoveries(RoutingAllocation allocation) { public boolean processExistingRecoveries(RoutingAllocation allocation) {
boolean changed = false; boolean changed = false;
MetaData metaData = allocation.metaData();
for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) { for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) {
nodes.next(); nodes.next();
for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) { for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) {
@ -69,8 +72,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
if (shard.relocatingNodeId() != null) { if (shard.relocatingNodeId() != null) {
continue; continue;
} }
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
if (shard.allocatedPostIndexCreate() == false) { IndexMetaData indexMetaData = metaData.index(shard.getIndex());
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
continue; continue;
} }
@ -114,6 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
boolean changed = false; boolean changed = false;
final RoutingNodes routingNodes = allocation.routingNodes(); final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
MetaData metaData = allocation.metaData();
while (unassignedIterator.hasNext()) { while (unassignedIterator.hasNext()) {
ShardRouting shard = unassignedIterator.next(); ShardRouting shard = unassignedIterator.next();
if (shard.primary()) { if (shard.primary()) {
@ -121,7 +127,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
} }
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
if (shard.allocatedPostIndexCreate() == false) { IndexMetaData indexMetaData = metaData.index(shard.getIndex());
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
continue; continue;
} }

@ -139,7 +139,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
Store.tryOpenIndex(shardPath.resolveIndex()); Store.tryOpenIndex(shardPath.resolveIndex());
} catch (Exception exception) { } catch (Exception exception) {
logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : ""); logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception); String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, exception);
} }
} }
// old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata // old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata
@ -149,11 +150,12 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID); logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID);
} else { } else {
logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData); logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version); String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId);
} }
} }
logger.trace("{} no local shard info found", shardId); logger.trace("{} no local shard info found", shardId);
return new NodeGatewayStartedShards(clusterService.localNode(), -1); return new NodeGatewayStartedShards(clusterService.localNode(), -1, null);
} catch (Exception e) { } catch (Exception e) {
throw new ElasticsearchException("failed to load started shards", e); throw new ElasticsearchException("failed to load started shards", e);
} }
@ -277,17 +279,19 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
public static class NodeGatewayStartedShards extends BaseNodeResponse { public static class NodeGatewayStartedShards extends BaseNodeResponse {
private long version = -1; private long version = -1;
private String allocationId = null;
private Throwable storeException = null; private Throwable storeException = null;
public NodeGatewayStartedShards() { public NodeGatewayStartedShards() {
} }
public NodeGatewayStartedShards(DiscoveryNode node, long version) { public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId) {
this(node, version, null); this(node, version, allocationId, null);
} }
public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) { public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, Throwable storeException) {
super(node); super(node);
this.version = version; this.version = version;
this.allocationId = allocationId;
this.storeException = storeException; this.storeException = storeException;
} }
@ -295,6 +299,10 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
return this.version; return this.version;
} }
public String allocationId() {
return this.allocationId;
}
public Throwable storeException() { public Throwable storeException() {
return this.storeException; return this.storeException;
} }
@ -303,16 +311,17 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
version = in.readLong(); version = in.readLong();
allocationId = in.readOptionalString();
if (in.readBoolean()) { if (in.readBoolean()) {
storeException = in.readThrowable(); storeException = in.readThrowable();
} }
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeLong(version); out.writeLong(version);
out.writeOptionalString(allocationId);
if (storeException != null) { if (storeException != null) {
out.writeBoolean(true); out.writeBoolean(true);
out.writeThrowable(storeException); out.writeThrowable(storeException);

@ -29,7 +29,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
@ -114,7 +114,7 @@ public class DocumentMapper implements ToXContent {
private final MapperService mapperService; private final MapperService mapperService;
private final String type; private final String type;
private final StringAndBytesText typeText; private final Text typeText;
private volatile CompressedXContent mappingSource; private volatile CompressedXContent mappingSource;
@ -138,7 +138,7 @@ public class DocumentMapper implements ToXContent {
ReentrantReadWriteLock mappingLock) { ReentrantReadWriteLock mappingLock) {
this.mapperService = mapperService; this.mapperService = mapperService;
this.type = rootObjectMapper.name(); this.type = rootObjectMapper.name();
this.typeText = new StringAndBytesText(this.type); this.typeText = new Text(this.type);
this.mapping = new Mapping( this.mapping = new Mapping(
Version.indexCreated(indexSettings), Version.indexCreated(indexSettings),
rootObjectMapper, rootObjectMapper,

@ -198,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) { public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) {
if (DEFAULT_MAPPING.equals(type)) { if (DEFAULT_MAPPING.equals(type)) {
// verify we can parse it // verify we can parse it
// NOTE: never apply the default here
DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource); DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource);
// still add it as a document mapper so we have it registered and, for example, persisted back into // still add it as a document mapper so we have it registered and, for example, persisted back into
// the cluster meta data if needed, or checked for existence // the cluster meta data if needed, or checked for existence
@ -211,68 +212,70 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
return mapper; return mapper;
} else { } else {
return merge(parse(type, mappingSource, applyDefault), updateAllTypes); try (ReleasableLock lock = mappingWriteLock.acquire()) {
// only apply the default mapping if we don't have the type yet
applyDefault &= mappers.containsKey(type) == false;
return merge(parse(type, mappingSource, applyDefault), updateAllTypes);
}
} }
} }
// never expose this to the outside world, we need to reparse the doc mapper so we get fresh // never expose this to the outside world, we need to reparse the doc mapper so we get fresh
// instances of field mappers to properly remove existing doc mapper // instances of field mappers to properly remove existing doc mapper
private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) { private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) {
try (ReleasableLock lock = mappingWriteLock.acquire()) { if (mapper.type().length() == 0) {
if (mapper.type().length() == 0) { throw new InvalidTypeNameException("mapping type name is empty");
throw new InvalidTypeNameException("mapping type name is empty"); }
} if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); }
} if (mapper.type().charAt(0) == '_') {
if (mapper.type().charAt(0) == '_') { throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); }
} if (mapper.type().contains("#")) {
if (mapper.type().contains("#")) { throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); }
} if (mapper.type().contains(",")) {
if (mapper.type().contains(",")) { throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); }
} if (mapper.type().equals(mapper.parentFieldMapper().type())) {
if (mapper.type().equals(mapper.parentFieldMapper().type())) { throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); }
} if (typeNameStartsWithIllegalDot(mapper)) {
if (typeNameStartsWithIllegalDot(mapper)) { if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) {
if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
} else {
logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
}
}
// we can add new field/object mappers while the old ones are there
// since we get new instances of those, and when we remove, we remove
// by instance equality
DocumentMapper oldMapper = mappers.get(mapper.type());
if (oldMapper != null) {
oldMapper.merge(mapper.mapping(), false, updateAllTypes);
return oldMapper;
} else { } else {
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility( logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type());
mapper.type(), mapper.mapping(), updateAllTypes);
Collection<ObjectMapper> newObjectMappers = newMappers.v1();
Collection<FieldMapper> newFieldMappers = newMappers.v2();
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
for (DocumentTypeListener typeListener : typeListeners) {
typeListener.beforeCreate(mapper);
}
mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
if (mapper.parentFieldMapper().active()) {
Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
newParentTypes.addAll(parentTypes);
newParentTypes.add(mapper.parentFieldMapper().type());
parentTypes = unmodifiableSet(newParentTypes);
}
assert assertSerialization(mapper);
return mapper;
} }
} }
// we can add new field/object mappers while the old ones are there
// since we get new instances of those, and when we remove, we remove
// by instance equality
DocumentMapper oldMapper = mappers.get(mapper.type());
if (oldMapper != null) {
oldMapper.merge(mapper.mapping(), false, updateAllTypes);
return oldMapper;
} else {
Tuple<Collection<ObjectMapper>, Collection<FieldMapper>> newMappers = checkMappersCompatibility(
mapper.type(), mapper.mapping(), updateAllTypes);
Collection<ObjectMapper> newObjectMappers = newMappers.v1();
Collection<FieldMapper> newFieldMappers = newMappers.v2();
addMappers(mapper.type(), newObjectMappers, newFieldMappers);
for (DocumentTypeListener typeListener : typeListeners) {
typeListener.beforeCreate(mapper);
}
mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map();
if (mapper.parentFieldMapper().active()) {
Set<String> newParentTypes = new HashSet<>(parentTypes.size() + 1);
newParentTypes.addAll(parentTypes);
newParentTypes.add(mapper.parentFieldMapper().type());
parentTypes = unmodifiableSet(newParentTypes);
}
assert assertSerialization(mapper);
return mapper;
}
} }
private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) { private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {

@ -19,7 +19,11 @@
package org.elasticsearch.index.shard; package org.elasticsearch.index.shard;
import org.apache.lucene.index.*; import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
@ -61,7 +65,16 @@ import org.elasticsearch.index.cache.bitset.ShardBitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.cache.request.ShardRequestCache; import org.elasticsearch.index.cache.request.ShardRequestCache;
import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.*; import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.RefreshFailedEngineException;
import org.elasticsearch.index.engine.Segment;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.fielddata.ShardFieldData; import org.elasticsearch.index.fielddata.ShardFieldData;
@ -70,7 +83,12 @@ import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.get.ShardGetService;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentMapperForType;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
@ -108,7 +126,12 @@ import java.io.IOException;
import java.io.PrintStream; import java.io.PrintStream;
import java.nio.channels.ClosedByInterruptException; import java.nio.channels.ClosedByInterruptException;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.util.*; import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -151,7 +174,6 @@ public class IndexShard extends AbstractIndexShardComponent {
private TimeValue refreshInterval; private TimeValue refreshInterval;
private volatile ScheduledFuture<?> refreshScheduledFuture; private volatile ScheduledFuture<?> refreshScheduledFuture;
private volatile ScheduledFuture<?> mergeScheduleFuture;
protected volatile ShardRouting shardRouting; protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state; protected volatile IndexShardState state;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>(); protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
@ -766,8 +788,6 @@ public class IndexShard extends AbstractIndexShardComponent {
if (state != IndexShardState.CLOSED) { if (state != IndexShardState.CLOSED) {
FutureUtils.cancel(refreshScheduledFuture); FutureUtils.cancel(refreshScheduledFuture);
refreshScheduledFuture = null; refreshScheduledFuture = null;
FutureUtils.cancel(mergeScheduleFuture);
mergeScheduleFuture = null;
} }
changeState(IndexShardState.CLOSED, reason); changeState(IndexShardState.CLOSED, reason);
indexShardOperationCounter.decRef(); indexShardOperationCounter.decRef();
@ -1099,7 +1119,8 @@ public class IndexShard extends AbstractIndexShardComponent {
// we are the first primary, recover from the gateway // we are the first primary, recover from the gateway
// if its post api allocation, the index should exists // if its post api allocation, the index should exists
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
final boolean shouldExist = shardRouting.allocatedPostIndexCreate(); boolean shouldExist = shardRouting.allocatedPostIndexCreate(idxSettings.getIndexMetaData());
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
return storeRecovery.recoverFromStore(this, shouldExist, localNode); return storeRecovery.recoverFromStore(this, shouldExist, localNode);
} }

@ -116,11 +116,10 @@ public class JvmInfo implements Streamable, ToXContent {
Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); Method vmOptionMethod = clazz.getMethod("getVMOption", String.class);
Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops");
Method valueMethod = vmOptionClazz.getMethod("getValue"); Method valueMethod = vmOptionClazz.getMethod("getValue");
String value = (String)valueMethod.invoke(useCompressedOopsVmOption); info.useCompressedOops = (String)valueMethod.invoke(useCompressedOopsVmOption);
info.usingCompressedOops = Boolean.parseBoolean(value);
} catch (Throwable t) { } catch (Throwable t) {
// unable to deduce the state of compressed oops // unable to deduce the state of compressed oops
// usingCompressedOops will hold its default value of null info.useCompressedOops = "unknown";
} }
INSTANCE = info; INSTANCE = info;
@ -157,7 +156,7 @@ public class JvmInfo implements Streamable, ToXContent {
String[] gcCollectors = Strings.EMPTY_ARRAY; String[] gcCollectors = Strings.EMPTY_ARRAY;
String[] memoryPools = Strings.EMPTY_ARRAY; String[] memoryPools = Strings.EMPTY_ARRAY;
private Boolean usingCompressedOops; private String useCompressedOops;
private JvmInfo() { private JvmInfo() {
} }
@ -282,8 +281,16 @@ public class JvmInfo implements Streamable, ToXContent {
return this.systemProperties; return this.systemProperties;
} }
public Boolean usingCompressedOops() { /**
return this.usingCompressedOops; * The value of the JVM flag UseCompressedOops, if available otherwise
* "unknown". The value "unknown" indicates that an attempt was
* made to obtain the value of the flag on this JVM and the attempt
* failed.
*
* @return the value of the JVM flag UseCompressedOops or "unknown"
*/
public String useCompressedOops() {
return this.useCompressedOops;
} }
@Override @Override
@ -307,7 +314,7 @@ public class JvmInfo implements Streamable, ToXContent {
builder.field(Fields.GC_COLLECTORS, gcCollectors); builder.field(Fields.GC_COLLECTORS, gcCollectors);
builder.field(Fields.MEMORY_POOLS, memoryPools); builder.field(Fields.MEMORY_POOLS, memoryPools);
builder.field(Fields.USING_COMPRESSED_OOPS, usingCompressedOops == null ? "unknown" : Boolean.toString(usingCompressedOops)); builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops);
builder.endObject(); builder.endObject();
return builder; return builder;
@ -368,11 +375,7 @@ public class JvmInfo implements Streamable, ToXContent {
mem.readFrom(in); mem.readFrom(in);
gcCollectors = in.readStringArray(); gcCollectors = in.readStringArray();
memoryPools = in.readStringArray(); memoryPools = in.readStringArray();
if (in.readBoolean()) { useCompressedOops = in.readString();
usingCompressedOops = in.readBoolean();
} else {
usingCompressedOops = null;
}
} }
@Override @Override
@ -397,12 +400,7 @@ public class JvmInfo implements Streamable, ToXContent {
mem.writeTo(out); mem.writeTo(out);
out.writeStringArray(gcCollectors); out.writeStringArray(gcCollectors);
out.writeStringArray(memoryPools); out.writeStringArray(memoryPools);
if (usingCompressedOops != null) { out.writeString(useCompressedOops);
out.writeBoolean(true);
out.writeBoolean(usingCompressedOops);
} else {
out.writeBoolean(false);
}
} }
public static class Mem implements Streamable { public static class Mem implements Streamable {

@ -38,12 +38,11 @@ import org.elasticsearch.common.HasHeaders;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
@ -74,6 +73,8 @@ import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.LeafSearchLookup;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.profile.Profiler;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext;
@ -139,7 +140,7 @@ public class PercolateContext extends SearchContext {
this.bigArrays = bigArrays.withCircuitBreaking(); this.bigArrays = bigArrays.withCircuitBreaking();
this.querySearchResult = new QuerySearchResult(0, searchShardTarget); this.querySearchResult = new QuerySearchResult(0, searchShardTarget);
this.engineSearcher = indexShard.acquireSearcher("percolate"); this.engineSearcher = indexShard.acquireSearcher("percolate");
this.searcher = new ContextIndexSearcher(this, engineSearcher); this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
this.scriptService = scriptService; this.scriptService = scriptService;
this.numberOfShards = request.getNumberOfShards(); this.numberOfShards = request.getNumberOfShards();
this.aliasFilter = aliasFilter; this.aliasFilter = aliasFilter;
@ -164,7 +165,7 @@ public class PercolateContext extends SearchContext {
fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList()));
} }
hitContext().reset( hitContext().reset(
new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields), new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields),
atomicReaderContext, 0, docSearcher.searcher() atomicReaderContext, 0, docSearcher.searcher()
); );
} }
@ -748,5 +749,7 @@ public class PercolateContext extends SearchContext {
} }
@Override @Override
public QueryCache getQueryCache() { return indexService.cache().query();} public Profilers getProfilers() {
throw new UnsupportedOperationException();
}
} }

@ -52,8 +52,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.BytesText;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
@ -533,10 +531,10 @@ public class PercolatorService extends AbstractComponent {
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize); List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize);
outer: outer:
for (PercolateShardResponse response : shardResults) { for (PercolateShardResponse response : shardResults) {
Text index = new StringText(response.getIndex()); Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) { for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i]; float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i];
Text match = new BytesText(new BytesArray(response.matches()[i])); Text match = new Text(new BytesArray(response.matches()[i]));
Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i); Map<String, HighlightField> hl = response.hls().isEmpty() ? null : response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
if (requestedSize != 0 && finalMatches.size() == requestedSize) { if (requestedSize != 0 && finalMatches.size() == requestedSize) {
@ -686,10 +684,10 @@ public class PercolatorService extends AbstractComponent {
List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize); List<PercolateResponse.Match> finalMatches = new ArrayList<>(requestedSize);
if (nonEmptyResponses == 1) { if (nonEmptyResponses == 1) {
PercolateShardResponse response = shardResults.get(firstNonEmptyIndex); PercolateShardResponse response = shardResults.get(firstNonEmptyIndex);
Text index = new StringText(response.getIndex()); Text index = new Text(response.getIndex());
for (int i = 0; i < response.matches().length; i++) { for (int i = 0; i < response.matches().length; i++) {
float score = response.scores().length == 0 ? Float.NaN : response.scores()[i]; float score = response.scores().length == 0 ? Float.NaN : response.scores()[i];
Text match = new BytesText(new BytesArray(response.matches()[i])); Text match = new Text(new BytesArray(response.matches()[i]));
if (!response.hls().isEmpty()) { if (!response.hls().isEmpty()) {
Map<String, HighlightField> hl = response.hls().get(i); Map<String, HighlightField> hl = response.hls().get(i);
finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); finalMatches.add(new PercolateResponse.Match(index, match, score, hl));
@ -728,8 +726,8 @@ public class PercolatorService extends AbstractComponent {
slots[requestIndex]++; slots[requestIndex]++;
PercolateShardResponse shardResponse = shardResults.get(requestIndex); PercolateShardResponse shardResponse = shardResults.get(requestIndex);
Text index = new StringText(shardResponse.getIndex()); Text index = new Text(shardResponse.getIndex());
Text match = new BytesText(new BytesArray(shardResponse.matches()[itemIndex])); Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex]));
float score = shardResponse.scores()[itemIndex]; float score = shardResponse.scores()[itemIndex];
if (!shardResponse.hls().isEmpty()) { if (!shardResponse.hls().isEmpty()) {
Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex); Map<String, HighlightField> hl = shardResponse.hls().get(itemIndex);

@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.ObjectSet;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.NumericDocValues;
@ -87,6 +88,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField;
import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.*; import org.elasticsearch.search.internal.*;
import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.*; import org.elasticsearch.search.query.*;
import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.search.warmer.IndexWarmersMetaData;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -547,7 +549,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
SearchContext.setCurrent(context); SearchContext.setCurrent(context);
try { try {
@ -654,7 +656,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
} }
} }
private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchContextException { private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException {
// nothing to parse... // nothing to parse...
if (source == null) { if (source == null) {
return; return;
@ -710,6 +712,9 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
if (source.minScore() != null) { if (source.minScore() != null) {
context.minimumScore(source.minScore()); context.minimumScore(source.minScore());
} }
if (source.profile()) {
context.setProfilers(new Profilers(context.searcher()));
}
context.timeoutInMillis(source.timeoutInMillis()); context.timeoutInMillis(source.timeoutInMillis());
context.terminateAfter(source.terminateAfter()); context.terminateAfter(source.terminateAfter());
if (source.aggregations() != null) { if (source.aggregations() != null) {

@ -23,7 +23,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import java.io.IOException; import java.io.IOException;
@ -42,8 +42,8 @@ public class SearchShardTarget implements Streamable, Comparable<SearchShardTarg
} }
public SearchShardTarget(String nodeId, String index, int shardId) { public SearchShardTarget(String nodeId, String index, int shardId) {
this.nodeId = nodeId == null ? null : new StringAndBytesText(nodeId); this.nodeId = nodeId == null ? null : new Text(nodeId);
this.index = new StringAndBytesText(index); this.index = new Text(index);
this.shardId = shardId; this.shardId = shardId;
} }

@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
@ -30,10 +31,13 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.profile.CollectorResult;
import org.elasticsearch.search.profile.InternalProfileCollector;
import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QueryPhaseExecutionException;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -81,8 +85,13 @@ public class AggregationPhase implements SearchPhase {
} }
context.aggregations().aggregators(aggregators); context.aggregations().aggregators(aggregators);
if (!collectors.isEmpty()) { if (!collectors.isEmpty()) {
final BucketCollector collector = BucketCollector.wrap(collectors); Collector collector = BucketCollector.wrap(collectors);
collector.preCollection(); ((BucketCollector)collector).preCollection();
if (context.getProfilers() != null) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_AGGREGATION,
// TODO: report on child aggs as well
Collections.emptyList());
}
context.queryCollectors().put(AggregationPhase.class, collector); context.queryCollectors().put(AggregationPhase.class, collector);
} }
} catch (IOException e) { } catch (IOException e) {
@ -116,6 +125,7 @@ public class AggregationPhase implements SearchPhase {
BucketCollector globalsCollector = BucketCollector.wrap(globals); BucketCollector globalsCollector = BucketCollector.wrap(globals);
Query query = Queries.newMatchAllQuery(); Query query = Queries.newMatchAllQuery();
Query searchFilter = context.searchFilter(context.types()); Query searchFilter = context.searchFilter(context.types());
if (searchFilter != null) { if (searchFilter != null) {
BooleanQuery filtered = new BooleanQuery.Builder() BooleanQuery filtered = new BooleanQuery.Builder()
.add(query, Occur.MUST) .add(query, Occur.MUST)
@ -124,8 +134,20 @@ public class AggregationPhase implements SearchPhase {
query = filtered; query = filtered;
} }
try { try {
final Collector collector;
if (context.getProfilers() == null) {
collector = globalsCollector;
} else {
InternalProfileCollector profileCollector = new InternalProfileCollector(
globalsCollector, CollectorResult.REASON_AGGREGATION_GLOBAL,
// TODO: report on sub collectors
Collections.emptyList());
collector = profileCollector;
// start a new profile with this collector
context.getProfilers().addProfiler().setCollector(profileCollector);
}
globalsCollector.preCollection(); globalsCollector.preCollection();
context.searcher().search(query, globalsCollector); context.searcher().search(query, collector);
} catch (Exception e) { } catch (Exception e) {
throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e); throw new QueryPhaseExecutionException(context, "Failed to execute global aggregators", e);
} finally { } finally {

@ -25,6 +25,7 @@ import org.apache.lucene.search.Collector;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.stream.StreamSupport; import java.util.stream.StreamSupport;
@ -99,6 +100,11 @@ public abstract class BucketCollector implements Collector {
} }
return false; return false;
} }
@Override
public String toString() {
return Arrays.toString(collectors);
}
}; };
} }
} }

@ -23,7 +23,6 @@ import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.rounding.Rounding;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationExecutionException;
@ -151,7 +150,7 @@ public class InternalHistogram<B extends InternalHistogram.Bucket> extends Inter
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (formatter != ValueFormatter.RAW) { if (formatter != ValueFormatter.RAW) {
Text keyTxt = new StringText(formatter.format(key)); Text keyTxt = new Text(formatter.format(key));
if (keyed) { if (keyed) {
builder.startObject(keyTxt.string()); builder.startObject(keyTxt.string());
} else { } else {

@ -22,6 +22,7 @@ package org.elasticsearch.search.builder;
import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.ObjectFloatHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
@ -91,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
public static final ParseField RESCORE_FIELD = new ParseField("rescore"); public static final ParseField RESCORE_FIELD = new ParseField("rescore");
public static final ParseField STATS_FIELD = new ParseField("stats"); public static final ParseField STATS_FIELD = new ParseField("stats");
public static final ParseField EXT_FIELD = new ParseField("ext"); public static final ParseField EXT_FIELD = new ParseField("ext");
public static final ParseField PROFILE_FIELD = new ParseField("profile");
private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder(); private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder();
@ -158,6 +160,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
private BytesReference ext = null; private BytesReference ext = null;
private boolean profile = false;
/** /**
* Constructs a new search source builder. * Constructs a new search source builder.
*/ */
@ -475,6 +480,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
return this; return this;
} }
/**
* Should the query be profiled. Defaults to <tt>false</tt>
*/
public SearchSourceBuilder profile(boolean profile) {
this.profile = profile;
return this;
}
/**
* Return whether to profile query execution, or {@code null} if
* unspecified.
*/
public boolean profile() {
return profile;
}
/** /**
* Gets the bytes representing the rescore builders for this request. * Gets the bytes representing the rescore builders for this request.
*/ */
@ -723,6 +744,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.fieldNames = fieldNames; builder.fieldNames = fieldNames;
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) {
builder.sort(parser.text()); builder.sort(parser.text());
} else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) {
builder.profile = parser.booleanValue();
} else { } else {
throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].",
parser.getTokenLocation()); parser.getTokenLocation());
@ -931,6 +954,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
builder.field(EXPLAIN_FIELD.getPreferredName(), explain); builder.field(EXPLAIN_FIELD.getPreferredName(), explain);
} }
if (profile) {
builder.field("profile", true);
}
if (fetchSourceContext != null) { if (fetchSourceContext != null) {
builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext); builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext);
} }
@ -1212,6 +1239,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (in.readBoolean()) { if (in.readBoolean()) {
builder.ext = in.readBytesReference(); builder.ext = in.readBytesReference();
} }
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
builder.profile = in.readBoolean();
} else {
builder.profile = false;
}
return builder; return builder;
} }
@ -1325,13 +1357,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
if (hasExt) { if (hasExt) {
out.writeBytesReference(ext); out.writeBytesReference(ext);
} }
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
out.writeBoolean(profile);
}
} }
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from, return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from,
highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields,
size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version); size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile);
} }
@Override @Override
@ -1364,6 +1399,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ
&& Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(terminateAfter, other.terminateAfter)
&& Objects.equals(timeoutInMillis, other.timeoutInMillis) && Objects.equals(timeoutInMillis, other.timeoutInMillis)
&& Objects.equals(trackScores, other.trackScores) && Objects.equals(trackScores, other.trackScores)
&& Objects.equals(version, other.version); && Objects.equals(version, other.version)
&& Objects.equals(profile, other.profile);
} }
} }

@ -43,7 +43,6 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
@ -52,9 +51,11 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider;
import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchHits;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.profile.InternalProfileShardResults;
import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.query.QuerySearchResultProvider;
import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest;
import org.elasticsearch.search.profile.ProfileShardResult;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -410,6 +411,17 @@ public class SearchPhaseController extends AbstractComponent {
} }
} }
//Collect profile results
InternalProfileShardResults shardResults = null;
if (!queryResults.isEmpty() && firstResult.profileResults() != null) {
Map<String, List<ProfileShardResult>> profileResults = new HashMap<>(queryResults.size());
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults) {
String key = entry.value.queryResult().shardTarget().toString();
profileResults.put(key, entry.value.queryResult().profileResults());
}
shardResults = new InternalProfileShardResults(profileResults);
}
if (aggregations != null) { if (aggregations != null) {
List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators(); List<SiblingPipelineAggregator> pipelineAggregators = firstResult.pipelineAggregators();
if (pipelineAggregators != null) { if (pipelineAggregators != null) {
@ -427,7 +439,7 @@ public class SearchPhaseController extends AbstractComponent {
InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore);
return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly); return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly);
} }
} }

@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
@ -198,7 +198,7 @@ public class FetchPhase implements SearchPhase {
DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type()); DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type());
Text typeText; Text typeText;
if (documentMapper == null) { if (documentMapper == null) {
typeText = new StringAndBytesText(fieldsVisitor.uid().type()); typeText = new Text(fieldsVisitor.uid().type());
} else { } else {
typeText = documentMapper.typeText(); typeText = documentMapper.typeText();
} }

@ -82,7 +82,7 @@ public final class CustomQueryScorer extends QueryScorer {
} else if (query instanceof FiltersFunctionScoreQuery) { } else if (query instanceof FiltersFunctionScoreQuery) {
query = ((FiltersFunctionScoreQuery) query).getSubQuery(); query = ((FiltersFunctionScoreQuery) query).getSubQuery();
extract(query, query.getBoost(), terms); extract(query, query.getBoost(), terms);
} else { } else if (terms.isEmpty()) {
extractWeightedTerms(terms, query, query.getBoost()); extractWeightedTerms(terms, query, query.getBoost());
} }
} }

@ -33,7 +33,7 @@ import org.apache.lucene.search.vectorhighlight.SimpleFieldFragList;
import org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder; import org.apache.lucene.search.vectorhighlight.SimpleFragListBuilder;
import org.apache.lucene.search.vectorhighlight.SingleFragListBuilder; import org.apache.lucene.search.vectorhighlight.SingleFragListBuilder;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.FetchSubPhase;
@ -159,7 +159,7 @@ public class FastVectorHighlighter implements Highlighter {
} }
if (fragments != null && fragments.length > 0) { if (fragments != null && fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
} }
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
@ -170,7 +170,7 @@ public class FastVectorHighlighter implements Highlighter {
fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(), fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(),
fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder); fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder);
if (fragments != null && fragments.length > 0) { if (fragments != null && fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
} }
} }

@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import java.io.IOException; import java.io.IOException;
@ -90,7 +89,7 @@ public class HighlightField implements Streamable {
if (in.readBoolean()) { if (in.readBoolean()) {
int size = in.readVInt(); int size = in.readVInt();
if (size == 0) { if (size == 0) {
fragments = StringText.EMPTY_ARRAY; fragments = Text.EMPTY_ARRAY;
} else { } else {
fragments = new Text[size]; fragments = new Text[size];
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {

@ -33,9 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.search.highlight.TextFragment; import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
@ -158,7 +156,7 @@ public class PlainHighlighter implements Highlighter {
} }
if (fragments.length > 0) { if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
} }
int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize();
@ -172,7 +170,7 @@ public class PlainHighlighter implements Highlighter {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e); throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
} }
if (end > 0) { if (end > 0) {
return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) }); return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) });
} }
} }
return null; return null;

@ -28,7 +28,7 @@ import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator;
import org.apache.lucene.search.postingshighlight.Snippet; import org.apache.lucene.search.postingshighlight.Snippet;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchPhaseExecutionException;
import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.FetchSubPhase;
@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter {
} }
if (fragments.length > 0) { if (fragments.length > 0) {
return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments));
} }
return null; return null;

@ -26,6 +26,9 @@ import org.apache.lucene.search.*;
import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.profile.ProfileBreakdown;
import org.elasticsearch.search.profile.ProfileWeight;
import org.elasticsearch.search.profile.Profiler;
import java.io.IOException; import java.io.IOException;
@ -43,26 +46,44 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
private final Engine.Searcher engineSearcher; private final Engine.Searcher engineSearcher;
public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) { // TODO revisit moving the profiler to inheritance or wrapping model in the future
private Profiler profiler;
public ContextIndexSearcher(Engine.Searcher searcher,
QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) {
super(searcher.reader()); super(searcher.reader());
in = searcher.searcher(); in = searcher.searcher();
engineSearcher = searcher; engineSearcher = searcher;
setSimilarity(searcher.searcher().getSimilarity(true)); setSimilarity(searcher.searcher().getSimilarity(true));
setQueryCache(searchContext.getQueryCache()); setQueryCache(queryCache);
setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy()); setQueryCachingPolicy(queryCachingPolicy);
} }
@Override @Override
public void close() { public void close() {
} }
public void setProfiler(Profiler profiler) {
this.profiler = profiler;
}
public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { public void setAggregatedDfs(AggregatedDfs aggregatedDfs) {
this.aggregatedDfs = aggregatedDfs; this.aggregatedDfs = aggregatedDfs;
} }
@Override @Override
public Query rewrite(Query original) throws IOException { public Query rewrite(Query original) throws IOException {
return in.rewrite(original); if (profiler != null) {
profiler.startRewriteTime();
}
try {
return in.rewrite(original);
} finally {
if (profiler != null) {
profiler.stopAndAddRewriteTime();
}
}
} }
@Override @Override
@ -72,8 +93,34 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
if (aggregatedDfs != null && needsScores) { if (aggregatedDfs != null && needsScores) {
// if scores are needed and we have dfs data then use it // if scores are needed and we have dfs data then use it
return super.createNormalizedWeight(query, needsScores); return super.createNormalizedWeight(query, needsScores);
} else if (profiler != null) {
// we need to use the createWeight method to insert the wrappers
return super.createNormalizedWeight(query, needsScores);
} else {
return in.createNormalizedWeight(query, needsScores);
}
}
@Override
public Weight createWeight(Query query, boolean needsScores) throws IOException {
if (profiler != null) {
// createWeight() is called for each query in the tree, so we tell the queryProfiler
// each invocation so that it can build an internal representation of the query
// tree
ProfileBreakdown profile = profiler.getQueryBreakdown(query);
profile.startTime(ProfileBreakdown.TimingType.CREATE_WEIGHT);
final Weight weight;
try {
weight = super.createWeight(query, needsScores);
} finally {
profile.stopAndRecordTime();
profiler.pollLastQuery();
}
return new ProfileWeight(query, weight, profile);
} else {
// needs to be 'super', not 'in' in order to use aggregated DFS
return super.createWeight(query, needsScores);
} }
return in.createNormalizedWeight(query, needsScores);
} }
@Override @Override

@ -58,6 +58,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QueryPhaseExecutionException;
import org.elasticsearch.search.profile.Profiler;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext;
@ -129,10 +131,10 @@ public class DefaultSearchContext extends SearchContext {
private List<RescoreSearchContext> rescore; private List<RescoreSearchContext> rescore;
private SearchLookup searchLookup; private SearchLookup searchLookup;
private volatile long keepAlive; private volatile long keepAlive;
private ScoreDoc lastEmittedDoc;
private final long originNanoTime = System.nanoTime(); private final long originNanoTime = System.nanoTime();
private volatile long lastAccessTime = -1; private volatile long lastAccessTime = -1;
private InnerHitsContext innerHitsContext; private InnerHitsContext innerHitsContext;
private Profilers profilers;
private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>(); private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
private final Map<Class<?>, Collector> queryCollectors = new HashMap<>(); private final Map<Class<?>, Collector> queryCollectors = new HashMap<>();
@ -158,7 +160,7 @@ public class DefaultSearchContext extends SearchContext {
this.fetchResult = new FetchSearchResult(id, shardTarget); this.fetchResult = new FetchSearchResult(id, shardTarget);
this.indexShard = indexShard; this.indexShard = indexShard;
this.indexService = indexService; this.indexService = indexService;
this.searcher = new ContextIndexSearcher(this, engineSearcher); this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
this.timeEstimateCounter = timeEstimateCounter; this.timeEstimateCounter = timeEstimateCounter;
this.timeoutInMillis = timeout.millis(); this.timeoutInMillis = timeout.millis();
} }
@ -724,5 +726,11 @@ public class DefaultSearchContext extends SearchContext {
} }
@Override @Override
public QueryCache getQueryCache() { return indexService.cache().query();} public Profilers getProfilers() {
return profilers;
}
public void setProfilers(Profilers profilers) {
this.profilers = profilers;
}
} }

@ -29,7 +29,6 @@ import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
@ -49,6 +48,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext;
@ -517,8 +517,11 @@ public abstract class FilteredSearchContext extends SearchContext {
} }
@Override @Override
public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();} public Profilers getProfilers() {
return in.getProfilers();
}
@Override @Override
public QueryCache getQueryCache() { return in.getQueryCache();} public Map<Class<?>, Collector> queryCollectors() { return in.queryCollectors();}
} }

@ -30,7 +30,7 @@ import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -104,14 +104,14 @@ public class InternalSearchHit implements SearchHit {
public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) { public InternalSearchHit(int docId, String id, Text type, Map<String, SearchHitField> fields) {
this.docId = docId; this.docId = docId;
this.id = new StringAndBytesText(id); this.id = new Text(id);
this.type = type; this.type = type;
this.fields = fields; this.fields = fields;
} }
public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map<String, SearchHitField> fields) { public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map<String, SearchHitField> fields) {
this.docId = nestedTopDocId; this.docId = nestedTopDocId;
this.id = new StringAndBytesText(id); this.id = new Text(id);
this.type = type; this.type = type;
this.nestedIdentity = nestedIdentity; this.nestedIdentity = nestedIdentity;
this.fields = fields; this.fields = fields;
@ -339,7 +339,7 @@ public class InternalSearchHit implements SearchHit {
if (sortValues != null) { if (sortValues != null) {
for (int i = 0; i < sortValues.length; i++) { for (int i = 0; i < sortValues.length; i++) {
if (sortValues[i] instanceof BytesRef) { if (sortValues[i] instanceof BytesRef) {
sortValuesCopy[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i])); sortValuesCopy[i] = new Text(new BytesArray((BytesRef) sortValues[i]));
} }
} }
} }
@ -783,7 +783,7 @@ public class InternalSearchHit implements SearchHit {
private InternalNestedIdentity child; private InternalNestedIdentity child;
public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) { public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) {
this.field = new StringAndBytesText(field); this.field = new Text(field);
this.offset = offset; this.offset = offset;
this.child = child; this.child = child;
} }

@ -28,9 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.profile.InternalProfileShardResults;
import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits; import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits;
@ -40,7 +45,7 @@ import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHit
public class InternalSearchResponse implements Streamable, ToXContent { public class InternalSearchResponse implements Streamable, ToXContent {
public static InternalSearchResponse empty() { public static InternalSearchResponse empty() {
return new InternalSearchResponse(InternalSearchHits.empty(), null, null, false, null); return new InternalSearchResponse(InternalSearchHits.empty(), null, null, null, false, null);
} }
private InternalSearchHits hits; private InternalSearchHits hits;
@ -49,6 +54,8 @@ public class InternalSearchResponse implements Streamable, ToXContent {
private Suggest suggest; private Suggest suggest;
private InternalProfileShardResults profileResults;
private boolean timedOut; private boolean timedOut;
private Boolean terminatedEarly = null; private Boolean terminatedEarly = null;
@ -56,10 +63,12 @@ public class InternalSearchResponse implements Streamable, ToXContent {
private InternalSearchResponse() { private InternalSearchResponse() {
} }
public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly) { public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest,
InternalProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) {
this.hits = hits; this.hits = hits;
this.aggregations = aggregations; this.aggregations = aggregations;
this.suggest = suggest; this.suggest = suggest;
this.profileResults = profileResults;
this.timedOut = timedOut; this.timedOut = timedOut;
this.terminatedEarly = terminatedEarly; this.terminatedEarly = terminatedEarly;
} }
@ -84,6 +93,19 @@ public class InternalSearchResponse implements Streamable, ToXContent {
return suggest; return suggest;
} }
/**
* Returns the profile results for this search response (including all shards).
* An empty map is returned if profiling was not enabled
*
* @return Profile results
*/
public Map<String, List<ProfileShardResult>> profile() {
if (profileResults == null) {
return Collections.emptyMap();
}
return profileResults.getShardResults();
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
hits.toXContent(builder, params); hits.toXContent(builder, params);
@ -93,6 +115,9 @@ public class InternalSearchResponse implements Streamable, ToXContent {
if (suggest != null) { if (suggest != null) {
suggest.toXContent(builder, params); suggest.toXContent(builder, params);
} }
if (profileResults != null) {
profileResults.toXContent(builder, params);
}
return builder; return builder;
} }
@ -114,6 +139,12 @@ public class InternalSearchResponse implements Streamable, ToXContent {
timedOut = in.readBoolean(); timedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean(); terminatedEarly = in.readOptionalBoolean();
if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
profileResults = new InternalProfileShardResults(in);
} else {
profileResults = null;
}
} }
@Override @Override
@ -134,5 +165,14 @@ public class InternalSearchResponse implements Streamable, ToXContent {
out.writeBoolean(timedOut); out.writeBoolean(timedOut);
out.writeOptionalBoolean(terminatedEarly); out.writeOptionalBoolean(terminatedEarly);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
if (profileResults == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
profileResults.writeTo(out);
}
}
} }
} }

@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.query.QueryCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
@ -56,6 +55,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext;
import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResult;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext;
@ -303,6 +303,11 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple
public abstract FetchSearchResult fetchResult(); public abstract FetchSearchResult fetchResult();
/**
* Return a handle over the profilers for the current search request, or {@code null} if profiling is not enabled.
*/
public abstract Profilers getProfilers();
/** /**
* Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object * Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object
* is function of the provided {@link Lifetime}. * is function of the provided {@link Lifetime}.
@ -367,5 +372,4 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple
CONTEXT CONTEXT
} }
public abstract QueryCache getQueryCache();
} }

@ -71,6 +71,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
private Boolean requestCache; private Boolean requestCache;
private long nowInMillis; private long nowInMillis;
private boolean profile;
ShardSearchLocalRequest() { ShardSearchLocalRequest() {
} }
@ -165,6 +167,16 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S
return scroll; return scroll;
} }
@Override
public void setProfile(boolean profile) {
this.profile = profile;
}
@Override
public boolean isProfile() {
return profile;
}
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
protected void innerReadFrom(StreamInput in) throws IOException { protected void innerReadFrom(StreamInput in) throws IOException {
index = in.readString(); index = in.readString();

@ -59,6 +59,17 @@ public interface ShardSearchRequest extends HasContextAndHeaders {
Scroll scroll(); Scroll scroll();
/**
* Sets if this shard search needs to be profiled or not
* @param profile True if the shard should be profiled
*/
void setProfile(boolean profile);
/**
* Returns true if this shard search is being profiled or not
*/
boolean isProfile();
/** /**
* Returns the cache key for this shard search request, based on its content * Returns the cache key for this shard search request, based on its content
*/ */

@ -150,4 +150,14 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
public BytesReference cacheKey() throws IOException { public BytesReference cacheKey() throws IOException {
return shardSearchLocalRequest.cacheKey(); return shardSearchLocalRequest.cacheKey();
} }
@Override
public void setProfile(boolean profile) {
shardSearchLocalRequest.setProfile(profile);
}
@Override
public boolean isProfile() {
return shardSearchLocalRequest.isProfile();
}
} }

@ -0,0 +1,156 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Locale;
/**
* Public interface and serialization container for profiled timings of the
* Collectors used in the search. Children CollectorResult's may be
* embedded inside of a parent CollectorResult
*/
public class CollectorResult implements ToXContent, Writeable {
public static final String REASON_SEARCH_COUNT = "search_count";
public static final String REASON_SEARCH_TOP_HITS = "search_top_hits";
public static final String REASON_SEARCH_TERMINATE_AFTER_COUNT = "search_terminate_after_count";
public static final String REASON_SEARCH_POST_FILTER = "search_post_filter";
public static final String REASON_SEARCH_MIN_SCORE = "search_min_score";
public static final String REASON_SEARCH_MULTI = "search_multi";
public static final String REASON_SEARCH_TIMEOUT = "search_timeout";
public static final String REASON_AGGREGATION = "aggregation";
public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global";
private static final ParseField NAME = new ParseField("name");
private static final ParseField REASON = new ParseField("reason");
private static final ParseField TIME = new ParseField("time");
private static final ParseField CHILDREN = new ParseField("children");
/**
* A more friendly representation of the Collector's class name
*/
private final String collectorName;
/**
* A "hint" to help provide some context about this Collector
*/
private final String reason;
/**
* The total elapsed time for this Collector
*/
private final Long time;
/**
* A list of children collectors "embedded" inside this collector
*/
private List<CollectorResult> children;
public CollectorResult(String collectorName, String reason, Long time, List<CollectorResult> children) {
this.collectorName = collectorName;
this.reason = reason;
this.time = time;
this.children = children;
}
public CollectorResult(StreamInput in) throws IOException {
this.collectorName = in.readString();
this.reason = in.readString();
this.time = in.readLong();
int size = in.readVInt();
this.children = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
CollectorResult child = new CollectorResult(in);
this.children.add(child);
}
}
/**
* @return the profiled time for this collector (inclusive of children)
*/
public long getTime() {
return this.time;
}
/**
* @return a human readable "hint" about what this collector was used for
*/
public String getReason() {
return this.reason;
}
/**
* @return the lucene class name of the collector
*/
public String getName() {
return this.collectorName;
}
/**
* @return a list of children collectors
*/
public List<CollectorResult> getProfiledChildren() {
return children;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder = builder.startObject()
.field(NAME.getPreferredName(), toString())
.field(REASON.getPreferredName(), reason)
.field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0)));
if (!children.isEmpty()) {
builder = builder.startArray(CHILDREN.getPreferredName());
for (CollectorResult child : children) {
builder = child.toXContent(builder, params);
}
builder = builder.endArray();
}
builder = builder.endObject();
return builder;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(collectorName);
out.writeString(reason);
out.writeLong(time);
out.writeVInt(children.size());
for (CollectorResult child : children) {
child.writeTo(out);
}
}
@Override
public Object readFrom(StreamInput in) throws IOException {
return new CollectorResult(in);
}
}

@ -0,0 +1,135 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.LeafCollector;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* This class wraps a Lucene Collector and times the execution of:
* - setScorer()
* - collect()
* - doSetNextReader()
* - needsScores()
*
* InternalProfiler facilitates the linking of the the Collector graph
*/
public class InternalProfileCollector implements Collector {
/**
* A more friendly representation of the Collector's class name
*/
private final String collectorName;
/**
* A "hint" to help provide some context about this Collector
*/
private final String reason;
/** The wrapped collector */
private final ProfileCollector collector;
/**
* A list of "embedded" children collectors
*/
private final List<InternalProfileCollector> children;
public InternalProfileCollector(Collector collector, String reason, List<InternalProfileCollector> children) {
this.collector = new ProfileCollector(collector);
this.reason = reason;
this.collectorName = deriveCollectorName(collector);
this.children = children;
}
/**
* @return the profiled time for this collector (inclusive of children)
*/
public long getTime() {
return collector.getTime();
}
/**
* @return a human readable "hint" about what this collector was used for
*/
public String getReason() {
return this.reason;
}
/**
* @return the lucene class name of the collector
*/
public String getName() {
return this.collectorName;
}
/**
* Creates a human-friendly representation of the Collector name.
*
* Bucket Collectors use the aggregation name in their toString() method,
* which makes the profiled output a bit nicer.
*
* @param c The Collector to derive a name from
* @return A (hopefully) prettier name
*/
private String deriveCollectorName(Collector c) {
String s = c.getClass().getSimpleName();
// MutiCollector which wraps multiple BucketCollectors is generated
// via an anonymous class, so this corrects the lack of a name by
// asking the enclosingClass
if (s.equals("")) {
s = c.getClass().getEnclosingClass().getSimpleName();
}
// Aggregation collector toString()'s include the user-defined agg name
if (reason.equals(CollectorResult.REASON_AGGREGATION) || reason.equals(CollectorResult.REASON_AGGREGATION_GLOBAL)) {
s += ": [" + c.toString() + "]";
}
return s;
}
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
return collector.getLeafCollector(context);
}
@Override
public boolean needsScores() {
return collector.needsScores();
}
public CollectorResult getCollectorTree() {
return InternalProfileCollector.doGetCollectorTree(this);
}
private static CollectorResult doGetCollectorTree(InternalProfileCollector collector) {
List<CollectorResult> childResults = new ArrayList<>(collector.children.size());
for (InternalProfileCollector child : collector.children) {
CollectorResult result = doGetCollectorTree(child);
childResults.add(result);
}
return new CollectorResult(collector.getName(), collector.getReason(), collector.getTime(), childResults);
}
}

@ -0,0 +1,89 @@
package org.elasticsearch.search.profile;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;
/**
* A container class to hold all the profile results across all shards. Internally
* holds a map of shard ID -&gt; Profiled results
*/
public final class InternalProfileShardResults implements Writeable<InternalProfileShardResults>, ToXContent{
private Map<String, List<ProfileShardResult>> shardResults;
public InternalProfileShardResults(Map<String, List<ProfileShardResult>> shardResults) {
Map<String, List<ProfileShardResult>> transformed =
shardResults.entrySet()
.stream()
.collect(Collectors.toMap(
Map.Entry::getKey,
e -> Collections.unmodifiableList(e.getValue()))
);
this.shardResults = Collections.unmodifiableMap(transformed);
}
public InternalProfileShardResults(StreamInput in) throws IOException {
int size = in.readInt();
shardResults = new HashMap<>(size);
for (int i = 0; i < size; i++) {
String key = in.readString();
int shardResultsSize = in.readInt();
List<ProfileShardResult> shardResult = new ArrayList<>(shardResultsSize);
for (int j = 0; j < shardResultsSize; j++) {
ProfileShardResult result = new ProfileShardResult(in);
shardResult.add(result);
}
shardResults.put(key, shardResult);
}
}
public Map<String, List<ProfileShardResult>> getShardResults() {
return this.shardResults;
}
@Override
public InternalProfileShardResults readFrom(StreamInput in) throws IOException {
return new InternalProfileShardResults(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(shardResults.size());
for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
out.writeString(entry.getKey());
out.writeInt(entry.getValue().size());
for (ProfileShardResult result : entry.getValue()) {
result.writeTo(out);
}
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("profile").startArray("shards");
for (Map.Entry<String, List<ProfileShardResult>> entry : shardResults.entrySet()) {
builder.startObject().field("id",entry.getKey()).startArray("searches");
for (ProfileShardResult result : entry.getValue()) {
builder.startObject();
result.toXContent(builder, params);
builder.endObject();
}
builder.endArray().endObject();
}
builder.endArray().endObject();
return builder;
}
}

@ -0,0 +1,235 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.search.Query;
import java.util.*;
import java.util.concurrent.LinkedBlockingDeque;
/**
* This class tracks the dependency tree for queries (scoring and rewriting) and
* generates {@link ProfileBreakdown} for each node in the tree. It also finalizes the tree
* and returns a list of {@link ProfileResult} that can be serialized back to the client
*/
final class InternalProfileTree {
private ArrayList<ProfileBreakdown> timings;
/** Maps the Query to it's list of children. This is basically the dependency tree */
private ArrayList<ArrayList<Integer>> tree;
/** A list of the original queries, keyed by index position */
private ArrayList<Query> queries;
/** A list of top-level "roots". Each root can have its own tree of profiles */
private ArrayList<Integer> roots;
/** Rewrite time */
private long rewriteTime;
private long rewriteScratch;
/** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */
private Deque<Integer> stack;
private int currentToken = 0;
public InternalProfileTree() {
timings = new ArrayList<>(10);
stack = new LinkedBlockingDeque<>(10);
tree = new ArrayList<>(10);
queries = new ArrayList<>(10);
roots = new ArrayList<>(10);
}
/**
* Returns a {@link ProfileBreakdown} for a scoring query. Scoring queries (e.g. those
* that are past the rewrite phase and are now being wrapped by createWeight() ) follow
* a recursive progression. We can track the dependency tree by a simple stack
*
* The only hiccup is that the first scoring query will be identical to the last rewritten
* query, so we need to take special care to fix that
*
* @param query The scoring query we wish to profile
* @return A ProfileBreakdown for this query
*/
public ProfileBreakdown getQueryBreakdown(Query query) {
int token = currentToken;
boolean stackEmpty = stack.isEmpty();
// If the stack is empty, we are a new root query
if (stackEmpty) {
// We couldn't find a rewritten query to attach to, so just add it as a
// top-level root. This is just a precaution: it really shouldn't happen.
// We would only get here if a top-level query that never rewrites for some reason.
roots.add(token);
// Increment the token since we are adding a new node, but notably, do not
// updateParent() because this was added as a root
currentToken += 1;
stack.add(token);
return addDependencyNode(query, token);
}
updateParent(token);
// Increment the token since we are adding a new node
currentToken += 1;
stack.add(token);
return addDependencyNode(query, token);
}
/**
* Begin timing a query for a specific Timing context
*/
public void startRewriteTime() {
assert rewriteScratch == 0;
rewriteScratch = System.nanoTime();
}
/**
* Halt the timing process and add the elapsed rewriting time.
* startRewriteTime() must be called for a particular context prior to calling
* stopAndAddRewriteTime(), otherwise the elapsed time will be negative and
* nonsensical
*
* @return The elapsed time
*/
public long stopAndAddRewriteTime() {
long time = Math.max(1, System.nanoTime() - rewriteScratch);
rewriteTime += time;
rewriteScratch = 0;
return time;
}
/**
* Helper method to add a new node to the dependency tree.
*
* Initializes a new list in the dependency tree, saves the query and
* generates a new {@link ProfileBreakdown} to track the timings
* of this query
*
* @param query The query to profile
* @param token The assigned token for this query
* @return A ProfileBreakdown to profile this query
*/
private ProfileBreakdown addDependencyNode(Query query, int token) {
// Add a new slot in the dependency tree
tree.add(new ArrayList<>(5));
// Save our query for lookup later
queries.add(query);
ProfileBreakdown queryTimings = new ProfileBreakdown();
timings.add(token, queryTimings);
return queryTimings;
}
/**
* Removes the last (e.g. most recent) value on the stack
*/
public void pollLast() {
stack.pollLast();
}
/**
* After the query has been run and profiled, we need to merge the flat timing map
* with the dependency graph to build a data structure that mirrors the original
* query tree
*
* @return a hierarchical representation of the profiled query tree
*/
public List<ProfileResult> getQueryTree() {
ArrayList<ProfileResult> results = new ArrayList<>(5);
for (Integer root : roots) {
results.add(doGetQueryTree(root));
}
return results;
}
/**
* Recursive helper to finalize a node in the dependency tree
* @param token The node we are currently finalizing
* @return A hierarchical representation of the tree inclusive of children at this level
*/
private ProfileResult doGetQueryTree(int token) {
Query query = queries.get(token);
ProfileBreakdown breakdown = timings.get(token);
Map<String, Long> timings = breakdown.toTimingMap();
List<Integer> children = tree.get(token);
List<ProfileResult> childrenProfileResults = Collections.emptyList();
if (children != null) {
childrenProfileResults = new ArrayList<>(children.size());
for (Integer child : children) {
ProfileResult childNode = doGetQueryTree(child);
childrenProfileResults.add(childNode);
}
}
// TODO this would be better done bottom-up instead of top-down to avoid
// calculating the same times over and over...but worth the effort?
long nodeTime = getNodeTime(timings, childrenProfileResults);
String queryDescription = query.getClass().getSimpleName();
String luceneName = query.toString();
return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime);
}
public long getRewriteTime() {
return rewriteTime;
}
/**
* Internal helper to add a child to the current parent node
*
* @param childToken The child to add to the current parent
*/
private void updateParent(int childToken) {
Integer parent = stack.peekLast();
ArrayList<Integer> parentNode = tree.get(parent);
parentNode.add(childToken);
tree.set(parent, parentNode);
}
/**
* Internal helper to calculate the time of a node, inclusive of children
*
* @param timings A map of breakdown timing for the node
* @param children All children profile results at this node
* @return The total time at this node, inclusive of children
*/
private static long getNodeTime(Map<String, Long> timings, List<ProfileResult> children) {
long nodeTime = 0;
for (long time : timings.values()) {
nodeTime += time;
}
// Then add up our children
for (ProfileResult child : children) {
nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren());
}
return nodeTime;
}
}

@ -0,0 +1,113 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import java.util.Collections;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
/**
* A record of timings for the various operations that may happen during query execution.
* A node's time may be composed of several internal attributes (rewriting, weighting,
* scoring, etc).
*/
public final class ProfileBreakdown {
/** Enumeration of all supported timing types. */
public enum TimingType {
CREATE_WEIGHT,
BUILD_SCORER,
NEXT_DOC,
ADVANCE,
MATCH,
SCORE;
@Override
public String toString() {
return name().toLowerCase(Locale.ROOT);
}
}
/**
* The accumulated timings for this query node
*/
private final long[] timings;
/** Scrach to store the current timing type. */
private TimingType currentTimingType;
/**
* The temporary scratch space for holding start-times
*/
private long scratch;
/** Sole constructor. */
public ProfileBreakdown() {
timings = new long[TimingType.values().length];
}
/**
* Begin timing a query for a specific Timing context
* @param timing The timing context being profiled
*/
public void startTime(TimingType timing) {
assert currentTimingType == null;
assert scratch == 0;
currentTimingType = timing;
scratch = System.nanoTime();
}
/**
* Halt the timing process and save the elapsed time.
* startTime() must be called for a particular context prior to calling
* stopAndRecordTime(), otherwise the elapsed time will be negative and
* nonsensical
*
* @return The elapsed time
*/
public long stopAndRecordTime() {
long time = Math.max(1, System.nanoTime() - scratch);
timings[currentTimingType.ordinal()] += time;
currentTimingType = null;
scratch = 0L;
return time;
}
/** Convert this record to a map from {@link TimingType} to times. */
public Map<String, Long> toTimingMap() {
Map<String, Long> map = new HashMap<>();
for (TimingType timingType : TimingType.values()) {
map.put(timingType.toString(), timings[timingType.ordinal()]);
}
return Collections.unmodifiableMap(map);
}
/**
* Add <code>other</code>'s timings into this breakdown
* @param other Another Breakdown to merge with this one
*/
public void merge(ProfileBreakdown other) {
assert(timings.length == other.timings.length);
for (int i = 0; i < timings.length; ++i) {
timings[i] += other.timings[i];
}
}
}

@ -0,0 +1,94 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FilterCollector;
import org.apache.lucene.search.FilterLeafCollector;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Scorer;
import java.io.IOException;
/** A collector that profiles how much time is spent calling it. */
final class ProfileCollector extends FilterCollector {
private long time;
/** Sole constructor. */
public ProfileCollector(Collector in) {
super(in);
}
/** Return the wrapped collector. */
public Collector getDelegate() {
return in;
}
@Override
public boolean needsScores() {
final long start = System.nanoTime();
try {
return super.needsScores();
} finally {
time += Math.max(1, System.nanoTime() - start);
}
}
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
final long start = System.nanoTime();
final LeafCollector inLeafCollector;
try {
inLeafCollector = super.getLeafCollector(context);
} finally {
time += Math.max(1, System.nanoTime() - start);
}
return new FilterLeafCollector(inLeafCollector) {
@Override
public void collect(int doc) throws IOException {
final long start = System.nanoTime();
try {
super.collect(doc);
} finally {
time += Math.max(1, System.nanoTime() - start);
}
}
@Override
public void setScorer(Scorer scorer) throws IOException {
final long start = System.nanoTime();
try {
super.setScorer(scorer);
} finally {
time += Math.max(1, System.nanoTime() - start);
}
}
};
}
/** Return the total time spent on this collector. */
public long getTime() {
return time;
}
}

@ -0,0 +1,165 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
/**
* This class is the internal representation of a profiled Query, corresponding
* to a single node in the query tree. It is built after the query has finished executing
* and is merely a structured representation, rather than the entity that collects the timing
* profile (see InternalProfiler for that)
*
* Each InternalProfileResult has a List of InternalProfileResults, which will contain
* "children" queries if applicable
*/
final class ProfileResult implements Writeable<ProfileResult>, ToXContent {
private static final ParseField QUERY_TYPE = new ParseField("query_type");
private static final ParseField LUCENE_DESCRIPTION = new ParseField("lucene");
private static final ParseField NODE_TIME = new ParseField("time");
private static final ParseField CHILDREN = new ParseField("children");
private static final ParseField BREAKDOWN = new ParseField("breakdown");
private final String queryType;
private final String luceneDescription;
private final Map<String, Long> timings;
private final long nodeTime;
private final List<ProfileResult> children;
public ProfileResult(String queryType, String luceneDescription, Map<String, Long> timings, List<ProfileResult> children, long nodeTime) {
this.queryType = queryType;
this.luceneDescription = luceneDescription;
this.timings = timings;
this.children = children;
this.nodeTime = nodeTime;
}
public ProfileResult(StreamInput in) throws IOException{
this.queryType = in.readString();
this.luceneDescription = in.readString();
this.nodeTime = in.readLong();
int timingsSize = in.readVInt();
this.timings = new HashMap<>(timingsSize);
for (int i = 0; i < timingsSize; ++i) {
timings.put(in.readString(), in.readLong());
}
int size = in.readVInt();
this.children = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
children.add(new ProfileResult(in));
}
}
/**
* Retrieve the lucene description of this query (e.g. the "explain" text)
*/
public String getLuceneDescription() {
return luceneDescription;
}
/**
* Retrieve the name of the query (e.g. "TermQuery")
*/
public String getQueryName() {
return queryType;
}
/**
* Returns the timing breakdown for this particular query node
*/
public Map<String, Long> getTimeBreakdown() {
return Collections.unmodifiableMap(timings);
}
/**
* Returns the total time (inclusive of children) for this query node.
*
* @return elapsed time in nanoseconds
*/
public long getTime() {
return nodeTime;
}
/**
* Returns a list of all profiled children queries
*/
public List<ProfileResult> getProfiledChildren() {
return Collections.unmodifiableList(children);
}
@Override
public ProfileResult readFrom(StreamInput in) throws IOException {
return new ProfileResult(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(queryType);
out.writeString(luceneDescription);
out.writeLong(nodeTime); // not Vlong because can be negative
out.writeVInt(timings.size());
for (Map.Entry<String, Long> entry : timings.entrySet()) {
out.writeString(entry.getKey());
out.writeLong(entry.getValue());
}
out.writeVInt(children.size());
for (ProfileResult child : children) {
child.writeTo(out);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder = builder.startObject()
.field(QUERY_TYPE.getPreferredName(), queryType)
.field(LUCENE_DESCRIPTION.getPreferredName(), luceneDescription)
.field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double)(getTime() / 1000000.0)))
.field(BREAKDOWN.getPreferredName(), timings);
if (!children.isEmpty()) {
builder = builder.startArray(CHILDREN.getPreferredName());
for (ProfileResult child : children) {
builder = child.toXContent(builder, params);
}
builder = builder.endArray();
}
builder = builder.endObject();
return builder;
}
}

@ -0,0 +1,158 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.TwoPhaseIterator;
import org.apache.lucene.search.Weight;
import java.io.IOException;
import java.util.Collection;
/**
* {@link Scorer} wrapper that will compute how much time is spent on moving
* the iterator, confirming matches and computing scores.
*/
final class ProfileScorer extends Scorer {
private final Scorer scorer;
private ProfileWeight profileWeight;
private final ProfileBreakdown profile;
ProfileScorer(ProfileWeight w, Scorer scorer, ProfileBreakdown profile) throws IOException {
super(w);
this.scorer = scorer;
this.profileWeight = w;
this.profile = profile;
}
@Override
public int docID() {
return scorer.docID();
}
@Override
public int advance(int target) throws IOException {
profile.startTime(ProfileBreakdown.TimingType.ADVANCE);
try {
return scorer.advance(target);
} finally {
profile.stopAndRecordTime();
}
}
@Override
public int nextDoc() throws IOException {
profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC);
try {
return scorer.nextDoc();
} finally {
profile.stopAndRecordTime();
}
}
@Override
public float score() throws IOException {
profile.startTime(ProfileBreakdown.TimingType.SCORE);
try {
return scorer.score();
} finally {
profile.stopAndRecordTime();
}
}
@Override
public int freq() throws IOException {
return scorer.freq();
}
@Override
public long cost() {
return scorer.cost();
}
@Override
public Weight getWeight() {
return profileWeight;
}
@Override
public Collection<ChildScorer> getChildren() {
return scorer.getChildren();
}
@Override
public TwoPhaseIterator asTwoPhaseIterator() {
final TwoPhaseIterator in = scorer.asTwoPhaseIterator();
if (in == null) {
return null;
}
final DocIdSetIterator inApproximation = in.approximation();
final DocIdSetIterator approximation = new DocIdSetIterator() {
@Override
public int advance(int target) throws IOException {
profile.startTime(ProfileBreakdown.TimingType.ADVANCE);
try {
return inApproximation.advance(target);
} finally {
profile.stopAndRecordTime();
}
}
@Override
public int nextDoc() throws IOException {
profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC);
try {
return inApproximation.nextDoc();
} finally {
profile.stopAndRecordTime();
}
}
@Override
public int docID() {
return inApproximation.docID();
}
@Override
public long cost() {
return inApproximation.cost();
}
};
return new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
profile.startTime(ProfileBreakdown.TimingType.MATCH);
try {
return in.matches();
} finally {
profile.stopAndRecordTime();
}
}
@Override
public float matchCost() {
return in.matchCost();
}
};
}
}

@ -0,0 +1,103 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.*;
/**
* A container class to hold the profile results for a single shard in the request.
* Contains a list of query profiles, a collector tree and a total rewrite tree.
*/
public final class ProfileShardResult implements Writeable<ProfileShardResult>, ToXContent {
private final List<ProfileResult> profileResults;
private final CollectorResult profileCollector;
private final long rewriteTime;
public ProfileShardResult(List<ProfileResult> profileResults, long rewriteTime,
CollectorResult profileCollector) {
assert(profileCollector != null);
this.profileResults = profileResults;
this.profileCollector = profileCollector;
this.rewriteTime = rewriteTime;
}
public ProfileShardResult(StreamInput in) throws IOException {
int profileSize = in.readVInt();
profileResults = new ArrayList<>(profileSize);
for (int j = 0; j < profileSize; j++) {
profileResults.add(new ProfileResult(in));
}
profileCollector = new CollectorResult(in);
rewriteTime = in.readLong();
}
public List<ProfileResult> getQueryResults() {
return Collections.unmodifiableList(profileResults);
}
public long getRewriteTime() {
return rewriteTime;
}
public CollectorResult getCollectorResult() {
return profileCollector;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray("query");
for (ProfileResult p : profileResults) {
p.toXContent(builder, params);
}
builder.endArray();
builder.field("rewrite_time", rewriteTime);
builder.startArray("collector");
profileCollector.toXContent(builder, params);
builder.endArray();
return builder;
}
@Override
public ProfileShardResult readFrom(StreamInput in) throws IOException {
return new ProfileShardResult(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(profileResults.size());
for (ProfileResult p : profileResults) {
p.writeTo(out);
}
profileCollector.writeTo(out);
out.writeLong(rewriteTime);
}
}

@ -0,0 +1,97 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BulkScorer;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import java.io.IOException;
import java.util.Set;
/**
* Weight wrapper that will compute how much time it takes to build the
* {@link Scorer} and then return a {@link Scorer} that is wrapped in
* order to compute timings as well.
*/
public final class ProfileWeight extends Weight {
private final Weight subQueryWeight;
private final ProfileBreakdown profile;
public ProfileWeight(Query query, Weight subQueryWeight, ProfileBreakdown profile) throws IOException {
super(query);
this.subQueryWeight = subQueryWeight;
this.profile = profile;
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER);
final Scorer subQueryScorer;
try {
subQueryScorer = subQueryWeight.scorer(context);
} finally {
profile.stopAndRecordTime();
}
if (subQueryScorer == null) {
return null;
}
return new ProfileScorer(this, subQueryScorer, profile);
}
@Override
public BulkScorer bulkScorer(LeafReaderContext context) throws IOException {
// We use the default bulk scorer instead of the specialized one. The reason
// is that Lucene's BulkScorers do everything at once: finding matches,
// scoring them and calling the collector, so they make it impossible to
// see where time is spent, which is the purpose of query profiling.
// The default bulk scorer will pull a scorer and iterate over matches,
// this might be a significantly different execution path for some queries
// like disjunctions, but in general this is what is done anyway
return super.bulkScorer(context);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return subQueryWeight.explain(context, doc);
}
@Override
public float getValueForNormalization() throws IOException {
return subQueryWeight.getValueForNormalization();
}
@Override
public void normalize(float norm, float topLevelBoost) {
subQueryWeight.normalize(norm, topLevelBoost);
}
@Override
public void extractTerms(Set<Term> set) {
subQueryWeight.extractTerms(set);
}
}

@ -0,0 +1,130 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.search.Query;
import java.util.*;
/**
* This class acts as a thread-local storage for profiling a query. It also
* builds a representation of the query tree which is built constructed
* "online" as the weights are wrapped by ContextIndexSearcher. This allows us
* to know the relationship between nodes in tree without explicitly
* walking the tree or pre-wrapping everything
*
* A Profiler is associated with every Search, not per Search-Request. E.g. a
* request may execute two searches (query + global agg). A Profiler just
* represents one of those
*/
public final class Profiler {
private final InternalProfileTree queryTree = new InternalProfileTree();
/**
* The root Collector used in the search
*/
private InternalProfileCollector collector;
public Profiler() {}
/** Set the collector that is associated with this profiler. */
public void setCollector(InternalProfileCollector collector) {
if (this.collector != null) {
throw new IllegalStateException("The collector can only be set once.");
}
this.collector = Objects.requireNonNull(collector);
}
/**
* Get the {@link ProfileBreakdown} for the given query, potentially creating it if it did not exist.
* This should only be used for queries that will be undergoing scoring. Do not use it to profile the
* rewriting phase
*/
public ProfileBreakdown getQueryBreakdown(Query query) {
return queryTree.getQueryBreakdown(query);
}
/**
* Begin timing the rewrite phase of a request. All rewrites are accumulated together into a
* single metric
*/
public void startRewriteTime() {
queryTree.startRewriteTime();
}
/**
* Stop recording the current rewrite and add it's time to the total tally, returning the
* cumulative time so far.
*
* @return cumulative rewrite time
*/
public long stopAndAddRewriteTime() {
return queryTree.stopAndAddRewriteTime();
}
/**
* Removes the last (e.g. most recent) query on the stack. This should only be called for scoring
* queries, not rewritten queries
*/
public void pollLastQuery() {
queryTree.pollLast();
}
/**
* @return a hierarchical representation of the profiled query tree
*/
public List<ProfileResult> getQueryTree() {
return queryTree.getQueryTree();
}
/**
* @return total time taken to rewrite all queries in this profile
*/
public long getRewriteTime() {
return queryTree.getRewriteTime();
}
/**
* Return the current root Collector for this search
*/
public CollectorResult getCollector() {
return collector.getCollectorTree();
}
/**
* Helper method to convert Profiler into InternalProfileShardResults, which can be
* serialized to other nodes, emitted as JSON, etc.
*
* @param profilers A list of Profilers to convert into InternalProfileShardResults
* @return A list of corresponding InternalProfileShardResults
*/
public static List<ProfileShardResult> buildShardResults(List<Profiler> profilers) {
List<ProfileShardResult> results = new ArrayList<>(profilers.size());
for (Profiler profiler : profilers) {
ProfileShardResult result = new ProfileShardResult(
profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector());
results.add(result);
}
return results;
}
}

@ -0,0 +1,59 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/** Wrapper around several {@link Profiler}s that makes management easier. */
public final class Profilers {
private final ContextIndexSearcher searcher;
private final List<Profiler> profilers;
/** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */
public Profilers(ContextIndexSearcher searcher) {
this.searcher = searcher;
this.profilers = new ArrayList<>();
addProfiler();
}
/** Switch to a new profile. */
public Profiler addProfiler() {
Profiler profiler = new Profiler();
searcher.setProfiler(profiler);
profilers.add(profiler);
return profiler;
}
/** Get the current profiler. */
public Profiler getCurrent() {
return profilers.get(profilers.size() - 1);
}
/** Return the list of all created {@link Profiler}s so far. */
public List<Profiler> getProfilers() {
return Collections.unmodifiableList(profilers);
}
}

@ -52,13 +52,16 @@ import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.profile.*;
import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.rescore.RescorePhase;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.sort.SortParseElement; import org.elasticsearch.search.sort.SortParseElement;
import org.elasticsearch.search.sort.TrackScoresParseElement; import org.elasticsearch.search.sort.TrackScoresParseElement;
import org.elasticsearch.search.suggest.SuggestPhase; import org.elasticsearch.search.suggest.SuggestPhase;
import java.util.AbstractList;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -124,6 +127,11 @@ public class QueryPhase implements SearchPhase {
} }
suggestPhase.execute(searchContext); suggestPhase.execute(searchContext);
aggregationPhase.execute(searchContext); aggregationPhase.execute(searchContext);
if (searchContext.getProfilers() != null) {
List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers());
searchContext.queryResult().profileResults(shardResults);
}
} }
private static boolean returnsDocsInOrder(Query query, Sort sort) { private static boolean returnsDocsInOrder(Query query, Sort sort) {
@ -147,6 +155,7 @@ public class QueryPhase implements SearchPhase {
QuerySearchResult queryResult = searchContext.queryResult(); QuerySearchResult queryResult = searchContext.queryResult();
queryResult.searchTimedOut(false); queryResult.searchTimedOut(false);
final boolean doProfile = searchContext.getProfilers() != null;
final SearchType searchType = searchContext.searchType(); final SearchType searchType = searchContext.searchType();
boolean rescore = false; boolean rescore = false;
try { try {
@ -162,9 +171,13 @@ public class QueryPhase implements SearchPhase {
Callable<TopDocs> topDocsCallable; Callable<TopDocs> topDocsCallable;
assert query == searcher.rewrite(query); // already rewritten assert query == searcher.rewrite(query); // already rewritten
if (searchContext.size() == 0) { // no matter what the value of from is if (searchContext.size() == 0) { // no matter what the value of from is
final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector();
collector = totalHitCountCollector; collector = totalHitCountCollector;
if (searchContext.getProfilers() != null) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList());
}
topDocsCallable = new Callable<TopDocs>() { topDocsCallable = new Callable<TopDocs>() {
@Override @Override
public TopDocs call() throws Exception { public TopDocs call() throws Exception {
@ -219,6 +232,9 @@ public class QueryPhase implements SearchPhase {
topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc); topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc);
} }
collector = topDocsCollector; collector = topDocsCollector;
if (doProfile) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList());
}
topDocsCallable = new Callable<TopDocs>() { topDocsCallable = new Callable<TopDocs>() {
@Override @Override
public TopDocs call() throws Exception { public TopDocs call() throws Exception {
@ -254,27 +270,57 @@ public class QueryPhase implements SearchPhase {
final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER;
if (terminateAfterSet) { if (terminateAfterSet) {
final Collector child = collector;
// throws Lucene.EarlyTerminationException when given count is reached // throws Lucene.EarlyTerminationException when given count is reached
collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter()); collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter());
if (doProfile) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT,
Collections.singletonList((InternalProfileCollector) child));
}
} }
if (searchContext.parsedPostFilter() != null) { if (searchContext.parsedPostFilter() != null) {
final Collector child = collector;
// this will only get applied to the actual search collector and not // this will only get applied to the actual search collector and not
// to any scoped collectors, also, it will only be applied to the main collector // to any scoped collectors, also, it will only be applied to the main collector
// since that is where the filter should only work // since that is where the filter should only work
final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false); final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false);
collector = new FilteredCollector(collector, filterWeight); collector = new FilteredCollector(collector, filterWeight);
if (doProfile) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER,
Collections.singletonList((InternalProfileCollector) child));
}
} }
// plug in additional collectors, like aggregations // plug in additional collectors, like aggregations
List<Collector> allCollectors = new ArrayList<>(); final List<Collector> subCollectors = new ArrayList<>();
allCollectors.add(collector); subCollectors.add(collector);
allCollectors.addAll(searchContext.queryCollectors().values()); subCollectors.addAll(searchContext.queryCollectors().values());
collector = MultiCollector.wrap(allCollectors); collector = MultiCollector.wrap(subCollectors);
if (doProfile && collector instanceof InternalProfileCollector == false) {
// When there is a single collector to wrap, MultiCollector returns it
// directly, so only wrap in the case that there are several sub collectors
final List<InternalProfileCollector> children = new AbstractList<InternalProfileCollector>() {
@Override
public InternalProfileCollector get(int index) {
return (InternalProfileCollector) subCollectors.get(index);
}
@Override
public int size() {
return subCollectors.size();
}
};
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children);
}
// apply the minimum score after multi collector so we filter aggs as well // apply the minimum score after multi collector so we filter aggs as well
if (searchContext.minimumScore() != null) { if (searchContext.minimumScore() != null) {
final Collector child = collector;
collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); collector = new MinimumScoreCollector(collector, searchContext.minimumScore());
if (doProfile) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE,
Collections.singletonList((InternalProfileCollector) child));
}
} }
if (collector.getClass() == TotalHitCountCollector.class) { if (collector.getClass() == TotalHitCountCollector.class) {
@ -319,13 +365,21 @@ public class QueryPhase implements SearchPhase {
final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis(); final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis();
if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed
final Collector child = collector;
// TODO: change to use our own counter that uses the scheduler in ThreadPool // TODO: change to use our own counter that uses the scheduler in ThreadPool
// throws TimeLimitingCollector.TimeExceededException when timeout has reached // throws TimeLimitingCollector.TimeExceededException when timeout has reached
collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis()); collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis());
if (doProfile) {
collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT,
Collections.singletonList((InternalProfileCollector) child));
}
} }
try { try {
if (collector != null) { if (collector != null) {
if (doProfile) {
searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector);
}
searcher.search(query, collector); searcher.search(query, collector);
} }
} catch (TimeLimitingCollector.TimeExceededException e) { } catch (TimeLimitingCollector.TimeExceededException e) {
@ -343,7 +397,13 @@ public class QueryPhase implements SearchPhase {
queryResult.topDocs(topDocsCallable.call()); queryResult.topDocs(topDocsCallable.call());
if (searchContext.getProfilers() != null) {
List<ProfileShardResult> shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers());
searchContext.queryResult().profileResults(shardResults);
}
return rescore; return rescore;
} catch (Throwable e) { } catch (Throwable e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e);
} }

@ -20,6 +20,8 @@
package org.elasticsearch.search.query; package org.elasticsearch.search.query;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -29,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException; import java.io.IOException;
@ -53,6 +56,7 @@ public class QuerySearchResult extends QuerySearchResultProvider {
private Suggest suggest; private Suggest suggest;
private boolean searchTimedOut; private boolean searchTimedOut;
private Boolean terminatedEarly = null; private Boolean terminatedEarly = null;
private List<ProfileShardResult> profileShardResults;
public QuerySearchResult() { public QuerySearchResult() {
@ -120,6 +124,22 @@ public class QuerySearchResult extends QuerySearchResultProvider {
this.aggregations = aggregations; this.aggregations = aggregations;
} }
/**
* Returns the profiled results for this search, or potentially null if result was empty
* @return The profiled results, or null
*/
public @Nullable List<ProfileShardResult> profileResults() {
return profileShardResults;
}
/**
* Sets the finalized profiling results for this query
* @param shardResults The finalized profile
*/
public void profileResults(List<ProfileShardResult> shardResults) {
this.profileShardResults = shardResults;
}
public List<SiblingPipelineAggregator> pipelineAggregators() { public List<SiblingPipelineAggregator> pipelineAggregators() {
return pipelineAggregators; return pipelineAggregators;
} }
@ -191,6 +211,15 @@ public class QuerySearchResult extends QuerySearchResultProvider {
} }
searchTimedOut = in.readBoolean(); searchTimedOut = in.readBoolean();
terminatedEarly = in.readOptionalBoolean(); terminatedEarly = in.readOptionalBoolean();
if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) {
int profileSize = in.readVInt();
profileShardResults = new ArrayList<>(profileSize);
for (int i = 0; i < profileSize; i++) {
ProfileShardResult result = new ProfileShardResult(in);
profileShardResults.add(result);
}
}
} }
@Override @Override
@ -229,5 +258,17 @@ public class QuerySearchResult extends QuerySearchResultProvider {
} }
out.writeBoolean(searchTimedOut); out.writeBoolean(searchTimedOut);
out.writeOptionalBoolean(terminatedEarly); out.writeOptionalBoolean(terminatedEarly);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
if (profileShardResults == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeVInt(profileShardResults.size());
for (ProfileShardResult shardResult : profileShardResults) {
shardResult.writeTo(out);
}
}
}
} }
} }

@ -30,7 +30,7 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs;
import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector; import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector;
import org.apache.lucene.util.*; import org.apache.lucene.util.*;
import org.apache.lucene.util.PriorityQueue; import org.apache.lucene.util.PriorityQueue;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.AtomicFieldData;
import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
@ -57,7 +57,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
} }
CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize()); CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize());
spare.copyUTF8Bytes(suggestionContext.getText()); spare.copyUTF8Bytes(suggestionContext.getText());
CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length()); CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new Text(spare.toString()), 0, spare.length());
completionSuggestion.addTerm(completionSuggestEntry); completionSuggestion.addTerm(completionSuggestEntry);
TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize()); TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize());
suggest(searcher, suggestionContext.toQuery(), collector); suggest(searcher, suggestionContext.toQuery(), collector);
@ -91,7 +91,7 @@ public class CompletionSuggester extends Suggester<CompletionSuggestionContext>
} }
if (numResult++ < suggestionContext.getSize()) { if (numResult++ < suggestionContext.getSize()) {
CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(
new StringText(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); new Text(suggestDoc.key.toString()), suggestDoc.score, contexts, payload);
completionSuggestEntry.addOption(option); completionSuggestEntry.addOption(option);
} else { } else {
break; break;

@ -30,7 +30,6 @@ import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
@ -127,11 +126,11 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
if (!collateMatch && !collatePrune) { if (!collateMatch && !collatePrune) {
continue; continue;
} }
Text phrase = new StringText(spare.toString()); Text phrase = new Text(spare.toString());
Text highlighted = null; Text highlighted = null;
if (suggestion.getPreTag() != null) { if (suggestion.getPreTag() != null) {
spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag())); spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag()));
highlighted = new StringText(spare.toString()); highlighted = new Text(spare.toString());
} }
if (collatePrune) { if (collatePrune) {
resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch)); resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch));
@ -147,7 +146,7 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) { private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) {
spare.copyUTF8Bytes(suggestion.getText()); spare.copyUTF8Bytes(suggestion.getText());
return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length(), cutoffScore); return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore);
} }
ScriptService scriptService() { ScriptService scriptService() {

@ -27,8 +27,6 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.text.BytesText;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestContextParser;
import org.elasticsearch.search.suggest.SuggestUtils; import org.elasticsearch.search.suggest.SuggestUtils;
@ -54,10 +52,10 @@ public final class TermSuggester extends Suggester<TermSuggestionContext> {
SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar( SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar(
token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode() token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode()
); );
Text key = new BytesText(new BytesArray(token.term.bytes())); Text key = new Text(new BytesArray(token.term.bytes()));
TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset); TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset);
for (SuggestWord suggestWord : suggestedWords) { for (SuggestWord suggestWord : suggestedWords) {
Text word = new StringText(suggestWord.string); Text word = new Text(suggestWord.string);
resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score)); resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score));
} }
response.addTerm(resultEntry); response.addTerm(resultEntry);

@ -285,4 +285,11 @@ public class CreateIndexIT extends ESIntegTestCase {
assertThat(messages.toString(), containsString("mapper [text] is used by multiple types")); assertThat(messages.toString(), containsString("mapper [text] is used by multiple types"));
} }
} }
public void testRestartIndexCreationAfterFullClusterRestart() throws Exception {
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get();
client().admin().indices().prepareCreate("test").setSettings(indexSettings()).get();
internalCluster().fullRestart();
ensureGreen("test");
}
} }

@ -87,6 +87,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) { for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) {
for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) {
assertThat(storeStatus.getVersion(), greaterThan(-1l)); assertThat(storeStatus.getVersion(), greaterThan(-1l));
assertThat(storeStatus.getAllocationId(), notNullValue());
assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue());
assertThat(storeStatus.getStoreException(), nullValue()); assertThat(storeStatus.getStoreException(), nullValue());
} }
@ -108,7 +109,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size()));
for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> storesStatus : shardStoresStatuses) { for (IntObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> storesStatus : shardStoresStatuses) {
assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); assertThat("must report for one store", storesStatus.value.size(), equalTo(1));
assertThat("reported store should be primary", storesStatus.value.get(0).getAllocation(), equalTo(IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY)); assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY));
} }
logger.info("--> enable allocation"); logger.info("--> enable allocation");
enableAllocation(index); enableAllocation(index);

@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.shards;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
@ -44,9 +45,9 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT);
DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT);
List<IndicesShardStoresResponse.StoreStatus> storeStatusList = new ArrayList<>(); List<IndicesShardStoresResponse.StoreStatus> storeStatusList = new ArrayList<>();
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, new IOException("corrupted"))); storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted")));
storeStatuses.put(0, storeStatusList); storeStatuses.put(0, storeStatusList);
storeStatuses.put(1, storeStatusList); storeStatuses.put(1, storeStatusList);
ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> storesMap = storeStatuses.build(); ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>> storesMap = storeStatuses.build();
@ -89,8 +90,10 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i); IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i);
assertThat(storeInfo.containsKey("version"), equalTo(true)); assertThat(storeInfo.containsKey("version"), equalTo(true));
assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion()))); assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion())));
assertThat(storeInfo.containsKey("allocation_id"), equalTo(true));
assertThat(((String) storeInfo.get("allocation_id")), equalTo((storeStatus.getAllocationId())));
assertThat(storeInfo.containsKey("allocation"), equalTo(true)); assertThat(storeInfo.containsKey("allocation"), equalTo(true));
assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocation().value())); assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value()));
assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true)); assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true));
if (storeStatus.getStoreException() != null) { if (storeStatus.getStoreException() != null) {
assertThat(storeInfo.containsKey("store_exception"), equalTo(true)); assertThat(storeInfo.containsKey("store_exception"), equalTo(true));
@ -104,11 +107,11 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
public void testStoreStatusOrdering() throws Exception { public void testStoreStatusOrdering() throws Exception {
DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT);
List<IndicesShardStoresResponse.StoreStatus> orderedStoreStatuses = new ArrayList<>(); List<IndicesShardStoresResponse.StoreStatus> orderedStoreStatuses = new ArrayList<>();
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, null)); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null));
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted")));
List<IndicesShardStoresResponse.StoreStatus> storeStatuses = new ArrayList<>(orderedStoreStatuses); List<IndicesShardStoresResponse.StoreStatus> storeStatuses = new ArrayList<>(orderedStoreStatuses);
Collections.shuffle(storeStatuses, random()); Collections.shuffle(storeStatuses, random());

@ -31,6 +31,7 @@ import org.junit.After;
import org.junit.Before; import org.junit.Before;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import static org.hamcrest.Matchers.*; import static org.hamcrest.Matchers.*;
@ -126,9 +127,9 @@ public class RetryTests extends ESTestCase {
private static class AssertingListener implements ActionListener<BulkResponse> { private static class AssertingListener implements ActionListener<BulkResponse> {
private final CountDownLatch latch; private final CountDownLatch latch;
private int countOnResponseCalled = 0; private final AtomicInteger countOnResponseCalled = new AtomicInteger();
private Throwable lastFailure; private volatile Throwable lastFailure;
private BulkResponse response; private volatile BulkResponse response;
private AssertingListener() { private AssertingListener() {
latch = new CountDownLatch(1); latch = new CountDownLatch(1);
@ -140,19 +141,19 @@ public class RetryTests extends ESTestCase {
@Override @Override
public void onResponse(BulkResponse bulkItemResponses) { public void onResponse(BulkResponse bulkItemResponses) {
latch.countDown();
this.response = bulkItemResponses; this.response = bulkItemResponses;
countOnResponseCalled++; countOnResponseCalled.incrementAndGet();
latch.countDown();
} }
@Override @Override
public void onFailure(Throwable e) { public void onFailure(Throwable e) {
latch.countDown();
this.lastFailure = e; this.lastFailure = e;
latch.countDown();
} }
public void assertOnResponseCalled() { public void assertOnResponseCalled() {
assertThat(countOnResponseCalled, equalTo(1)); assertThat(countOnResponseCalled.get(), equalTo(1));
} }
public void assertResponseWithNumberOfItems(int numItems) { public void assertResponseWithNumberOfItems(int numItems) {

@ -54,6 +54,7 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
@ -762,6 +763,11 @@ public class ClusterServiceIT extends ESIntegTestCase {
} }
} }
int numberOfThreads = randomIntBetween(2, 8);
int tasksSubmittedPerThread = randomIntBetween(1, 1024);
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
final Semaphore semaphore = new Semaphore(numberOfExecutors);
class TaskExecutor implements ClusterStateTaskExecutor<Task> { class TaskExecutor implements ClusterStateTaskExecutor<Task> {
private AtomicInteger counter = new AtomicInteger(); private AtomicInteger counter = new AtomicInteger();
private AtomicInteger batches = new AtomicInteger(); private AtomicInteger batches = new AtomicInteger();
@ -775,6 +781,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
if (randomBoolean()) { if (randomBoolean()) {
maybeUpdatedClusterState = ClusterState.builder(currentState).build(); maybeUpdatedClusterState = ClusterState.builder(currentState).build();
batches.incrementAndGet(); batches.incrementAndGet();
semaphore.acquire();
} }
return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState); return BatchResult.<Task>builder().successes(tasks).build(maybeUpdatedClusterState);
} }
@ -787,10 +794,9 @@ public class ClusterServiceIT extends ESIntegTestCase {
@Override @Override
public void clusterStatePublished(ClusterState newClusterState) { public void clusterStatePublished(ClusterState newClusterState) {
published.incrementAndGet(); published.incrementAndGet();
semaphore.release();
} }
} }
int numberOfThreads = randomIntBetween(2, 8);
int tasksSubmittedPerThread = randomIntBetween(1, 1024);
ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>(); ConcurrentMap<String, AtomicInteger> counters = new ConcurrentHashMap<>();
CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread);
@ -807,7 +813,6 @@ public class ClusterServiceIT extends ESIntegTestCase {
} }
}; };
int numberOfExecutors = Math.max(1, numberOfThreads / 4);
List<TaskExecutor> executors = new ArrayList<>(); List<TaskExecutor> executors = new ArrayList<>();
for (int i = 0; i < numberOfExecutors; i++) { for (int i = 0; i < numberOfExecutors; i++) {
executors.add(new TaskExecutor()); executors.add(new TaskExecutor());
@ -853,6 +858,8 @@ public class ClusterServiceIT extends ESIntegTestCase {
// wait until all the cluster state updates have been processed // wait until all the cluster state updates have been processed
updateLatch.await(); updateLatch.await();
// and until all of the publication callbacks have completed
semaphore.acquire(numberOfExecutors);
// assert the number of executed tasks is correct // assert the number of executed tasks is correct
assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get());

@ -0,0 +1,103 @@
package org.elasticsearch.cluster.routing;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.disruption.NetworkDisconnectPartition;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.hamcrest.Matchers.equalTo;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
@ESIntegTestCase.SuppressLocalMode
public class PrimaryAllocationIT extends ESIntegTestCase {
public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception {
logger.info("--> starting 3 nodes, 1 master, 2 data");
String master = internalCluster().startMasterOnlyNode(Settings.EMPTY);
internalCluster().startDataOnlyNodesAsync(2).get();
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
.put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get());
ensureGreen();
logger.info("--> indexing...");
client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
refresh();
ClusterState state = client().admin().cluster().prepareState().all().get().getState();
List<ShardRouting> shards = state.routingTable().allShards("test");
assertThat(shards.size(), equalTo(2));
final String primaryNode;
final String replicaNode;
if (shards.get(0).primary()) {
primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name();
replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name();
} else {
primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name();
replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name();
}
NetworkDisconnectPartition partition = new NetworkDisconnectPartition(
new HashSet<>(Arrays.asList(master, replicaNode)), Collections.singleton(primaryNode), random());
internalCluster().setDisruptionScheme(partition);
logger.info("--> partitioning node with primary shard from rest of cluster");
partition.startDisrupting();
ensureStableCluster(2, master);
logger.info("--> index a document into previous replica shard (that is now primary)");
client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
logger.info("--> shut down node that has new acknowledged document");
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
ensureStableCluster(1, master);
partition.stopDisrupting();
logger.info("--> waiting for node with old primary shard to rejoin the cluster");
ensureStableCluster(2, master);
logger.info("--> check that old primary shard does not get promoted to primary again");
// kick reroute and wait for all shard states to be fetched
client(master).admin().cluster().prepareReroute().get();
assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0)));
// kick reroute a second time and check that all shards are unassigned
assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2));
logger.info("--> starting node that reuses data folder with the up-to-date primary shard");
internalCluster().startDataOnlyNode(Settings.EMPTY);
logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available");
ensureYellow("test");
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l);
}
public void testNotWaitForQuorumCopies() throws Exception {
logger.info("--> starting 3 nodes");
internalCluster().startNodesAsync(3).get();
logger.info("--> creating index with 1 primary and 2 replicas");
assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
.put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get());
ensureGreen("test");
client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
logger.info("--> removing 2 nodes from cluster");
internalCluster().stopRandomDataNode();
internalCluster().stopRandomDataNode();
internalCluster().fullRestart();
logger.info("--> checking that index still gets allocated with only 1 shard copy being available");
ensureYellow("test");
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 1l);
}
}

@ -220,6 +220,41 @@ public class SimpleAllTests extends ESTestCase {
indexWriter.close(); indexWriter.close();
} }
public void testTermMissingFromOneSegment() throws Exception {
Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));
Document doc = new Document();
doc.add(new Field("_id", "1", StoredField.TYPE));
AllEntries allEntries = new AllEntries();
allEntries.addText("field", "something", 2.0f);
allEntries.reset();
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
indexWriter.commit();
doc = new Document();
doc.add(new Field("_id", "2", StoredField.TYPE));
allEntries = new AllEntries();
allEntries.addText("field", "else", 1.0f);
allEntries.reset();
doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER)));
indexWriter.addDocument(doc);
IndexReader reader = DirectoryReader.open(indexWriter, true);
assertEquals(2, reader.leaves().size());
IndexSearcher searcher = new IndexSearcher(reader);
// "something" only appears in the first segment:
Query query = new AllTermQuery(new Term("_all", "something"));
TopDocs docs = searcher.search(query, 10);
assertEquals(1, docs.totalHits);
indexWriter.close();
}
public void testMultipleTokensAllNoBoost() throws Exception { public void testMultipleTokensAllNoBoost() throws Exception {
Directory dir = new RAMDirectory(); Directory dir = new RAMDirectory();
IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER));

@ -36,7 +36,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before; import org.junit.Before;
import java.io.IOException; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
@ -59,25 +59,29 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
this.testAllocator = new TestAllocator(); this.testAllocator = new TestAllocator();
} }
/** public void testNoProcessPrimaryNotAllocatedBefore() {
* Verifies that the canProcess method of primary allocation behaves correctly final RoutingAllocation allocation;
* and processes only the applicable shard. if (randomBoolean()) {
*/ allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomBoolean(), Version.CURRENT);
public void testNoProcessReplica() { } else {
ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), true, Version.V_2_1_0);
assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); }
} boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
public void testNoProcessPrimayNotAllcoatedBefore() { assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, true, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId));
assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false));
} }
/** /**
* Tests that when async fetch returns that there is no data, the shard will not be allocated. * Tests that when async fetch returns that there is no data, the shard will not be allocated.
*/ */
public void testNoAsyncFetchData() { public void testNoAsyncFetchData() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); final RoutingAllocation allocation;
if (randomBoolean()) {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId");
} else {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0);
}
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -85,11 +89,17 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
} }
/** /**
* Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. * Tests when the node returns that no data was found for it (-1 for version and null for allocation id),
* it will be moved to ignore unassigned.
*/ */
public void testNoAllocationFound() { public void testNoAllocationFound() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); final RoutingAllocation allocation;
testAllocator.addData(node1, -1); if (randomBoolean()) {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId");
} else {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0);
}
testAllocator.addData(node1, -1, null);
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -97,11 +107,43 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
} }
/** /**
* Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. * Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore unassigned.
*/
public void testNoMatchingAllocationIdFound() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "id2");
testAllocator.addData(node1, 1, "id1");
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
}
/**
* Tests that when there is a node to allocate the shard to, and there are no active allocation ids, it will be allocated to it.
* This is the case when we have old shards from pre-3.0 days.
*/
public void testNoActiveAllocationIds() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1);
testAllocator.addData(node1, 1, null);
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id()));
}
/**
* Tests when the node returns that no data was found for it, it will be moved to ignore unassigned.
*/ */
public void testStoreException() { public void testStoreException() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); final RoutingAllocation allocation;
testAllocator.addData(node1, 3, new CorruptIndexException("test", "test")); if (randomBoolean()) {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, 1, "allocId1", new CorruptIndexException("test", "test"));
} else {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1);
testAllocator.addData(node1, 3, null, new CorruptIndexException("test", "test"));
}
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -112,8 +154,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* Tests that when there is a node to allocate the shard to, it will be allocated to it. * Tests that when there is a node to allocate the shard to, it will be allocated to it.
*/ */
public void testFoundAllocationAndAllocating() { public void testFoundAllocationAndAllocating() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); final RoutingAllocation allocation;
testAllocator.addData(node1, 10); if (randomBoolean()) {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, 1, "allocId1");
} else {
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_2_0);
testAllocator.addData(node1, 3, null);
}
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true)); assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
@ -126,8 +174,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* it will be moved to ignore unassigned until it can be allocated to. * it will be moved to ignore unassigned until it can be allocated to.
*/ */
public void testFoundAllocationButThrottlingDecider() { public void testFoundAllocationButThrottlingDecider() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders()); final RoutingAllocation allocation;
testAllocator.addData(node1, 10); if (randomBoolean()) {
allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, 1, "allocId1");
} else {
allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, Version.V_2_2_0);
testAllocator.addData(node1, 3, null);
}
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
@ -139,8 +193,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* force the allocation to it. * force the allocation to it.
*/ */
public void testFoundAllocationButNoDecider() { public void testFoundAllocationButNoDecider() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders()); final RoutingAllocation allocation;
testAllocator.addData(node1, 10); if (randomBoolean()) {
allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
testAllocator.addData(node1, 1, "allocId1");
} else {
allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, Version.V_2_0_0);
testAllocator.addData(node1, 3, null);
}
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true)); assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
@ -149,11 +209,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
} }
/** /**
* Tests that the highest version node is chosed for allocation. * Tests that the highest version node is chosen for allocation.
*/ */
public void testAllocateToTheHighestVersion() { public void testAllocateToTheHighestVersionOnLegacyIndex() {
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0);
testAllocator.addData(node1, 10).addData(node2, 12); testAllocator.addData(node1, 10, null).addData(node2, 12, null);
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true)); assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
@ -162,35 +222,150 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
} }
/** /**
* Tests that when restoring from snapshot, even if we didn't find any node to allocate on, the shard * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation
* will remain in the unassigned list to be allocated later. * deciders say yes, we allocate to that node.
*/ */
public void testRestoreIgnoresNoNodesToAllocate() { public void testRestore() {
MetaData metaData = MetaData.builder() RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders());
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) testAllocator.addData(node1, 1, randomFrom(null, "allocId"));
.build(); boolean changed = testAllocator.allocateUnassigned(allocation);
RoutingTable routingTable = RoutingTable.builder() assertThat(changed, equalTo(true));
.addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
.build(); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) }
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), state.getRoutingNodes(), state.nodes(), null, System.nanoTime());
testAllocator.addData(node1, -1).addData(node2, -1); /**
* Tests that when restoring from a snapshot and we find a node with a shard copy and allocation
* deciders say throttle, we add it to ignored shards.
*/
public void testRestoreThrottle() {
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders());
testAllocator.addData(node1, 1, randomFrom(null, "allocId"));
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
}
/**
* Tests that when restoring from a snapshot and we find a node with a shard copy but allocation
* deciders say no, we still allocate to that node.
*/
public void testRestoreForcesAllocateIfShardAvailable() {
RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders());
testAllocator.addData(node1, 1, randomFrom(null, "some allocId"));
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
}
/**
* Tests that when restoring from a snapshot and we don't find a node with a shard copy, the shard will remain in
* the unassigned list to be allocated later.
*/
public void testRestoreDoesNotAssignIfNoShardAvailable() {
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders());
testAllocator.addData(node1, -1, null);
boolean changed = testAllocator.allocateUnassigned(allocation); boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
}
private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders) {
Version version = randomFrom(Version.CURRENT, Version.V_2_0_0);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)).numberOfShards(1).numberOfReplicas(0)
.putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet()))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), version, shardId.getIndex()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
}
/**
* Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation
* deciders say yes, we allocate to that node.
*/
public void testRecoverOnAnyNode() {
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders());
testAllocator.addData(node1, 1, randomFrom(null, "allocId"));
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
}
/**
* Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation
* deciders say throttle, we add it to ignored shards.
*/
public void testRecoverOnAnyNodeThrottle() {
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders());
testAllocator.addData(node1, 1, randomFrom(null, "allocId"));
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
}
/**
* Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation
* deciders say no, we still allocate to that node.
*/
public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() {
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders());
testAllocator.addData(node1, 1, randomFrom(null, "allocId"));
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
}
/**
* Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let
* BalancedShardAllocator assign the shard
*/
public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() {
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders());
testAllocator.addData(node1, -1, null);
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false));
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
}
private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders) {
Version version = randomFrom(Version.CURRENT, Version.V_2_0_0);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
.put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true))
.numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet()))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
.metaData(metaData)
.routingTable(routingTable)
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
} }
/** /**
* Tests that only when enough copies of the shard exists we are going to allocate it. This test * Tests that only when enough copies of the shard exists we are going to allocate it. This test
* verifies that with same version (1), and quorum allocation. * verifies that with same version (1), and quorum allocation.
*/ */
public void testEnoughCopiesFoundForAllocation() { public void testEnoughCopiesFoundForAllocationOnLegacyIndex() {
MetaData metaData = MetaData.builder() MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2))
.build(); .build();
RoutingTable routingTable = RoutingTable.builder() RoutingTable routingTable = RoutingTable.builder()
.addAsRecovery(metaData.index(shardId.getIndex())) .addAsRecovery(metaData.index(shardId.getIndex()))
@ -207,7 +382,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node1, 1); testAllocator.addData(node1, 1, null);
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation); changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
@ -215,7 +390,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node2, 1); testAllocator.addData(node2, 1, null);
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation); changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true)); assertThat(changed, equalTo(true));
@ -229,9 +404,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
* Tests that only when enough copies of the shard exists we are going to allocate it. This test * Tests that only when enough copies of the shard exists we are going to allocate it. This test
* verifies that even with different version, we treat different versions as a copy, and count them. * verifies that even with different version, we treat different versions as a copy, and count them.
*/ */
public void testEnoughCopiesFoundForAllocationWithDifferentVersion() { public void testEnoughCopiesFoundForAllocationOnLegacyIndexWithDifferentVersion() {
MetaData metaData = MetaData.builder() MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2))
.build(); .build();
RoutingTable routingTable = RoutingTable.builder() RoutingTable routingTable = RoutingTable.builder()
.addAsRecovery(metaData.index(shardId.getIndex())) .addAsRecovery(metaData.index(shardId.getIndex()))
@ -248,7 +423,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node1, 1); testAllocator.addData(node1, 1, null);
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation); changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(false)); assertThat(changed, equalTo(false));
@ -256,7 +431,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas
testAllocator.addData(node2, 2); testAllocator.addData(node2, 2, null);
allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
changed = testAllocator.allocateUnassigned(allocation); changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true)); assertThat(changed, equalTo(true));
@ -266,67 +441,20 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id())); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
} }
public void testAllocationOnAnyNodeWithSharedFs() { private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) {
ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false,
ShardRoutingState.UNASSIGNED, 0,
new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null));
Map<DiscoveryNode, TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> data = new HashMap<>();
data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1));
data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 5));
data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, -1));
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetches =
new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), new HashSet<>());
PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, new HashSet<String>(), fetches);
assertThat(nAndV.allocationsFound, equalTo(2));
assertThat(nAndV.highestVersion, equalTo(5L));
assertThat(nAndV.nodes, contains(node2));
nAndV = testAllocator.buildNodesAndVersions(shard, true, new HashSet<String>(), fetches);
assertThat(nAndV.allocationsFound, equalTo(3));
assertThat(nAndV.highestVersion, equalTo(5L));
// All three nodes are potential candidates because shards can be recovered on any node
assertThat(nAndV.nodes, contains(node2, node1, node3));
}
public void testAllocationOnAnyNodeShouldPutNodesWithExceptionsLast() {
ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false,
ShardRoutingState.UNASSIGNED, 0,
new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null));
Map<DiscoveryNode, TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> data = new HashMap<>();
data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1));
data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 1));
data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, 1, new IOException("I failed to open")));
HashSet<String> ignoredNodes = new HashSet<>();
ignoredNodes.add(node2.id());
AsyncShardFetch.FetchResult<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetches =
new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), ignoredNodes);
PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, ignoredNodes, fetches);
assertThat(nAndV.allocationsFound, equalTo(1));
assertThat(nAndV.highestVersion, equalTo(1L));
assertThat(nAndV.nodes, contains(node1));
nAndV = testAllocator.buildNodesAndVersions(shard, true, ignoredNodes, fetches);
assertThat(nAndV.allocationsFound, equalTo(2));
assertThat(nAndV.highestVersion, equalTo(1L));
// node3 should be last here
assertThat(nAndV.nodes.size(), equalTo(2));
assertThat(nAndV.nodes, contains(node1, node3));
}
private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders) {
MetaData metaData = MetaData.builder() MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version))
.build(); .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, new HashSet<>(Arrays.asList(activeAllocationIds))))
RoutingTable routingTable = RoutingTable.builder() .build();
.addAsRecovery(metaData.index(shardId.getIndex())) RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
.build(); if (asNew) {
routingTableBuilder.addAsNew(metaData.index(shardId.getIndex()));
} else {
routingTableBuilder.addAsRecovery(metaData.index(shardId.getIndex()));
}
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
.metaData(metaData) .metaData(metaData)
.routingTable(routingTable) .routingTable(routingTableBuilder.build())
.nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build();
return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime());
} }
@ -344,15 +472,15 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
return this; return this;
} }
public TestAllocator addData(DiscoveryNode node, long version) { public TestAllocator addData(DiscoveryNode node, long version, String allocationId) {
return addData(node, version, null); return addData(node, version, allocationId, null);
} }
public TestAllocator addData(DiscoveryNode node, long version, @Nullable Throwable storeException) { public TestAllocator addData(DiscoveryNode node, long version, String allocationId, @Nullable Throwable storeException) {
if (data == null) { if (data == null) {
data = new HashMap<>(); data = new HashMap<>();
} }
data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, storeException)); data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, allocationId, storeException));
return this; return this;
} }

@ -20,10 +20,10 @@
package org.elasticsearch.gateway; package org.elasticsearch.gateway;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.ESIntegTestCase.Scope;
@ -32,14 +32,10 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.Requests.clusterHealthRequest; import static org.elasticsearch.client.Requests.clusterHealthRequest;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
/** /**
* *
@ -51,72 +47,12 @@ public class QuorumGatewayIT extends ESIntegTestCase {
return 2; return 2;
} }
public void testChangeInitialShardsRecovery() throws Exception {
logger.info("--> starting 3 nodes");
final String[] nodes = internalCluster().startNodesAsync(3).get().toArray(new String[0]);
createIndex("test");
ensureGreen();
NumShards test = getNumShards("test");
logger.info("--> indexing...");
client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get();
//We don't check for failures in the flush response: if we do we might get the following:
// FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed]
flush();
client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get();
refresh();
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l);
}
final String nodeToRemove = nodes[between(0,2)];
logger.info("--> restarting 1 nodes -- kill 2");
internalCluster().fullRestart(new RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
return Settings.EMPTY;
}
@Override
public boolean doRestart(String nodeName) {
return nodeToRemove.equals(nodeName);
}
});
if (randomBoolean()) {
Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate
}
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet
assertTrue(awaitBusy(() -> {
ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null;
})); // wait until we get a cluster state - could be null if we quick enough.
final ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get();
assertThat(clusterStateResponse.getState(), notNullValue());
assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue());
assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false));
logger.info("--> change the recovery.initial_shards setting, and make sure its recovered");
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get();
logger.info("--> running cluster_health (wait for the shards to startup), primaries only since we only have 1 node");
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(test.numPrimaries)).actionGet();
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l);
}
}
public void testQuorumRecovery() throws Exception { public void testQuorumRecovery() throws Exception {
logger.info("--> starting 3 nodes"); logger.info("--> starting 3 nodes");
internalCluster().startNodesAsync(3).get();
// we are shutting down nodes - make sure we don't have 2 clusters if we test network // we are shutting down nodes - make sure we don't have 2 clusters if we test network
setMinimumMasterNodes(2); internalCluster().startNodesAsync(3,
Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build()).get();
createIndex("test"); createIndex("test");
ensureGreen(); ensureGreen();

@ -43,9 +43,11 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before; import org.junit.Before;
import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
@ -275,13 +277,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
} }
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) { private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10);
MetaData metaData = MetaData.builder() MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)).numberOfShards(1).numberOfReplicas(0)) .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings))
.build(); .numberOfShards(1).numberOfReplicas(1)
.putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId()))))
.build();
RoutingTable routingTable = RoutingTable.builder() RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shardId.getIndex()) .add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId) .addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) .addShard(primaryShard)
.addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(reason, null))) .addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(reason, null)))
.build()) .build())
) )
@ -294,13 +299,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
} }
private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) {
ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10);
MetaData metaData = MetaData.builder() MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT))
.numberOfShards(1).numberOfReplicas(1)
.putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId()))))
.build(); .build();
RoutingTable routingTable = RoutingTable.builder() RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shardId.getIndex()) .add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId) .addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) .addShard(primaryShard)
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, 10, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null))) .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, 10, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)))
.build()) .build())
) )

@ -133,7 +133,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ShardId id = new ShardId("foo", 1); ShardId id = new ShardId("foo", 1);
long version = between(1, Integer.MAX_VALUE / 2); long version = between(1, Integer.MAX_VALUE / 2);
boolean primary = randomBoolean(); boolean primary = randomBoolean();
AllocationId allocationId = randomAllocationId(); AllocationId allocationId = randomBoolean() ? null : randomAllocationId();
ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo", allocationId); ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo", allocationId);
write(state1, env.availableShardPaths(id)); write(state1, env.availableShardPaths(id));
ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id)); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id));
@ -288,7 +288,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
} }
public void testShardStateMetaHashCodeEquals() { public void testShardStateMetaHashCodeEquals() {
ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); AllocationId allocationId = randomBoolean() ? null : randomAllocationId();
ShardStateMetaData meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId);
assertEquals(meta, new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId)); assertEquals(meta, new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId));
assertEquals(meta.hashCode(), new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId).hashCode()); assertEquals(meta.hashCode(), new ShardStateMetaData(meta.version, meta.primary, meta.indexUUID, meta.allocationId).hashCode());
@ -299,7 +300,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID + "foo", randomAllocationId()))); assertFalse(meta.equals(new ShardStateMetaData(meta.version, !meta.primary, meta.indexUUID + "foo", randomAllocationId())));
Set<Integer> hashCodes = new HashSet<>(); Set<Integer> hashCodes = new HashSet<>();
for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode
meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); allocationId = randomBoolean() ? null : randomAllocationId();
meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId);
hashCodes.add(meta.hashCode()); hashCodes.add(meta.hashCode());
} }
assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1);

@ -97,7 +97,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
} }
public void testFastCloseAfterCreateDoesNotClose() { public void testFastCloseAfterCreateContinuesCreateAfterOpen() {
logger.info("--> creating test index that cannot be allocated"); logger.info("--> creating test index that cannot be allocated");
client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder() client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder()
.put("index.routing.allocation.include.tag", "no_such_node").build()).get(); .put("index.routing.allocation.include.tag", "no_such_node").build()).get();
@ -106,17 +106,14 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
assertThat(health.isTimedOut(), equalTo(false)); assertThat(health.isTimedOut(), equalTo(false));
assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));
try { client().admin().indices().prepareClose("test").get();
client().admin().indices().prepareClose("test").get();
fail("Exception should have been thrown");
} catch(IndexPrimaryShardNotAllocatedException e) {
// expected
}
logger.info("--> updating test index settings to allow allocation"); logger.info("--> updating test index settings to allow allocation");
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.settingsBuilder() client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.settingsBuilder()
.put("index.routing.allocation.include.tag", "").build()).get(); .put("index.routing.allocation.include.tag", "").build()).get();
client().admin().indices().prepareOpen("test").get();
logger.info("--> waiting for green status"); logger.info("--> waiting for green status");
ensureGreen(); ensureGreen();

@ -57,6 +57,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.min;
import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.AggregationBuilders.range;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative;
import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg;
import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.is;

@ -18,7 +18,6 @@
*/ */
package org.elasticsearch.search.highlight; package org.elasticsearch.search.highlight;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapper;
@ -52,12 +51,12 @@ public class CustomHighlighter implements Highlighter {
} }
List<Text> responses = new ArrayList<>(); List<Text> responses = new ArrayList<>();
responses.add(new StringText(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), responses.add(new Text(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(),
cacheEntry.position))); cacheEntry.position)));
if (field.fieldOptions().options() != null) { if (field.fieldOptions().options() != null) {
for (Map.Entry<String, Object> entry : field.fieldOptions().options().entrySet()) { for (Map.Entry<String, Object> entry : field.fieldOptions().options().entrySet()) {
responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue())); responses.add(new Text("field:" + entry.getKey() + ":" + entry.getValue()));
} }
} }

@ -1557,7 +1557,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
.fragmenter("simple"))).get(); .fragmenter("simple"))).get();
assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight")); assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the <em>tag</em> token near the end")); assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
response = client().prepareSearch("test") response = client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE))
@ -1566,7 +1566,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
.fragmenter("span"))).get(); .fragmenter("span"))).get();
assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight")); assertHighlight(response, 0, "tags", 0, equalTo("this is a really <em>long</em> <em>tag</em> i would like to highlight"));
assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the <em>tag</em> token near the end")); assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very <em>long</em> <em>tag</em> and has the tag token near the end"));
assertFailures(client().prepareSearch("test") assertFailures(client().prepareSearch("test")
.setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE))
@ -2062,7 +2062,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
searchResponse = client().search(searchRequest("test").source(source)).actionGet(); searchResponse = client().search(searchRequest("test").source(source)).actionGet();
assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy <xxx>quick</xxx> dog")); assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The <xxx>quick</xxx> <xxx>brown</xxx> fox jumps over the lazy quick dog"));
} }
public void testPostingsHighlighterMultipleFields() throws Exception { public void testPostingsHighlighterMultipleFields() throws Exception {

@ -0,0 +1,42 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.highlight;
import org.apache.lucene.analysis.MockAnalyzer;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.highlight.QueryScorer;
import org.apache.lucene.util.LuceneTestCase;
public class PlainHighlighterTests extends LuceneTestCase {
public void testHighlightPhrase() throws Exception {
Query query = new PhraseQuery.Builder()
.add(new Term("field", "foo"))
.add(new Term("field", "bar"))
.build();
QueryScorer queryScorer = new CustomQueryScorer(query);
org.apache.lucene.search.highlight.Highlighter highlighter = new org.apache.lucene.search.highlight.Highlighter(queryScorer);
String[] frags = highlighter.getBestFragments(new MockAnalyzer(random()), "field", "bar foo bar foo", 10);
assertArrayEquals(new String[] {"bar <B>foo</B> <B>bar</B> foo"}, frags);
}
}

@ -21,7 +21,7 @@ package org.elasticsearch.search.internal;
import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.InputStreamStreamInput;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
@ -39,25 +39,25 @@ public class InternalSearchHitTests extends ESTestCase {
SearchShardTarget target = new SearchShardTarget("_node_id", "_index", 0); SearchShardTarget target = new SearchShardTarget("_node_id", "_index", 0);
Map<String, InternalSearchHits> innerHits = new HashMap<>(); Map<String, InternalSearchHits> innerHits = new HashMap<>();
InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new Text("_type"), null);
innerHit1.shardTarget(target); innerHit1.shardTarget(target);
InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null);
innerInnerHit2.shardTarget(target); innerInnerHit2.shardTarget(target);
innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerInnerHit2}, 1, 1f)); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerInnerHit2}, 1, 1f));
innerHit1.setInnerHits(innerHits); innerHit1.setInnerHits(innerHits);
InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null);
innerHit2.shardTarget(target); innerHit2.shardTarget(target);
InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new StringText("_type"), null); InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new Text("_type"), null);
innerHit3.shardTarget(target); innerHit3.shardTarget(target);
innerHits = new HashMap<>(); innerHits = new HashMap<>();
InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new Text("_type"), null);
innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerHit1, innerHit2}, 1, 1f)); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerHit1, innerHit2}, 1, 1f));
innerHits.put("2", new InternalSearchHits(new InternalSearchHit[]{innerHit3}, 1, 1f)); innerHits.put("2", new InternalSearchHits(new InternalSearchHit[]{innerHit3}, 1, 1f));
hit1.shardTarget(target); hit1.shardTarget(target);
hit1.setInnerHits(innerHits); hit1.setInnerHits(innerHits);
InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new Text("_type"), null);
hit2.shardTarget(target); hit2.shardTarget(target);
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f); InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f);

@ -0,0 +1,173 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.StringField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.RandomApproximationQuery;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TotalHitCountCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.test.ESTestCase;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
public class ProfileTests extends ESTestCase {
static Directory dir;
static IndexReader reader;
static ContextIndexSearcher searcher;
@BeforeClass
public static void before() throws IOException {
dir = newDirectory();
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
final int numDocs = TestUtil.nextInt(random(), 1, 20);
for (int i = 0; i < numDocs; ++i) {
final int numHoles = random().nextInt(5);
for (int j = 0; j < numHoles; ++j) {
w.addDocument(new Document());
}
Document doc = new Document();
doc.add(new StringField("foo", "bar", Store.NO));
w.addDocument(doc);
}
reader = w.getReader();
w.close();
Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader));
searcher = new ContextIndexSearcher(engineSearcher, IndexSearcher.getDefaultQueryCache(), MAYBE_CACHE_POLICY);
}
@AfterClass
public static void after() throws IOException {
IOUtils.close(reader, dir);
dir = null;
reader = null;
searcher = null;
}
public void testBasic() throws IOException {
Profiler profiler = new Profiler();
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.search(query, 1);
List<ProfileResult> results = profiler.getQueryTree();
assertEquals(1, results.size());
Map<String, Long> breakdown = results.get(0).getTimeBreakdown();
assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L));
long rewriteTime = profiler.getRewriteTime();
assertThat(rewriteTime, greaterThan(0L));
}
public void testNoScoring() throws IOException {
Profiler profiler = new Profiler();
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed
List<ProfileResult> results = profiler.getQueryTree();
assertEquals(1, results.size());
Map<String, Long> breakdown = results.get(0).getTimeBreakdown();
assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L));
long rewriteTime = profiler.getRewriteTime();
assertThat(rewriteTime, greaterThan(0L));
}
public void testUseIndexStats() throws IOException {
Profiler profiler = new Profiler();
searcher.setProfiler(profiler);
Query query = new TermQuery(new Term("foo", "bar"));
searcher.count(query); // will use index stats
List<ProfileResult> results = profiler.getQueryTree();
assertEquals(0, results.size());
long rewriteTime = profiler.getRewriteTime();
assertThat(rewriteTime, greaterThan(0L));
}
public void testApproximations() throws IOException {
Profiler profiler = new Profiler();
Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader));
// disable query caching since we want to test approximations, which won't
// be exposed on a cached entry
ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, null, MAYBE_CACHE_POLICY);
searcher.setProfiler(profiler);
Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random());
searcher.count(query);
List<ProfileResult> results = profiler.getQueryTree();
assertEquals(1, results.size());
Map<String, Long> breakdown = results.get(0).getTimeBreakdown();
assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L));
assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), greaterThan(0L));
long rewriteTime = profiler.getRewriteTime();
assertThat(rewriteTime, greaterThan(0L));
}
public void testCollector() throws IOException {
TotalHitCountCollector collector = new TotalHitCountCollector();
ProfileCollector profileCollector = new ProfileCollector(collector);
assertEquals(0, profileCollector.getTime());
final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0));
assertThat(profileCollector.getTime(), greaterThan(0L));
long time = profileCollector.getTime();
leafCollector.setScorer(Lucene.illegalScorer("dummy scorer"));
assertThat(profileCollector.getTime(), greaterThan(time));
time = profileCollector.getTime();
leafCollector.collect(0);
assertThat(profileCollector.getTime(), greaterThan(time));
}
}

@ -0,0 +1,596 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.elasticsearch.action.search.*;
import org.elasticsearch.search.SearchHit;
import org.apache.lucene.util.English;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.index.query.*;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.search.profile.RandomQueryGenerator.randomQueryBuilder;
import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual;
import static org.hamcrest.Matchers.*;
public class QueryProfilerIT extends ESIntegTestCase {
/**
* This test simply checks to make sure nothing crashes. Test indexes 100-150 documents,
* constructs 20-100 random queries and tries to profile them
*/
public void testProfileQuery() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
List<String> stringFields = Arrays.asList("field1");
List<String> numericFields = Arrays.asList("field2");
indexRandom(true, docs);
refresh();
int iters = between(20, 100);
for (int i = 0; i < iters; i++) {
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shard : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shard.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
}
/**
* This test generates 1-10 random queries and executes a profiled and non-profiled
* search for each query. It then does some basic sanity checking of score and hits
* to make sure the profiling doesn't interfere with the hits being returned
*/
public void testProfileMatchesRegular() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
List<String> stringFields = Arrays.asList("field1");
List<String> numericFields = Arrays.asList("field2");
indexRandom(true, docs);
refresh();
int iters = between(1, 10);
for (int i = 0; i < iters; i++) {
QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3);
logger.info(q.toString());
SearchRequestBuilder vanilla = client().prepareSearch("test")
.setQuery(q)
.setProfile(false)
.addSort("_score", SortOrder.DESC)
.addSort("_uid", SortOrder.ASC)
.setPreference("_primary")
.setSearchType(SearchType.QUERY_THEN_FETCH);
SearchRequestBuilder profile = client().prepareSearch("test")
.setQuery(q)
.setProfile(true)
.addSort("_score", SortOrder.DESC)
.addSort("_uid", SortOrder.ASC)
.setPreference("_primary")
.setSearchType(SearchType.QUERY_THEN_FETCH);
MultiSearchResponse.Item[] responses = client().prepareMultiSearch()
.add(vanilla)
.add(profile)
.execute().actionGet().getResponses();
SearchResponse vanillaResponse = responses[0].getResponse();
SearchResponse profileResponse = responses[1].getResponse();
float vanillaMaxScore = vanillaResponse.getHits().getMaxScore();
float profileMaxScore = profileResponse.getHits().getMaxScore();
if (Float.isNaN(vanillaMaxScore)) {
assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]",
Float.isNaN(profileMaxScore));
} else {
assertTrue("Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]",
nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001));
}
assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + vanillaResponse.getHits().totalHits() + "]",
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
SearchHit[] profileHits = profileResponse.getHits().getHits();
for (int j = 0; j < vanillaHits.length; j++) {
assertThat("Profile hit #" + j + " has a different ID from Vanilla",
vanillaHits[j].getId(), equalTo(profileHits[j].getId()));
}
}
}
/**
* This test verifies that the output is reasonable for a simple, non-nested query
*/
public void testSimpleMatch() throws Exception {
createIndex("test");
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
ensureGreen();
QueryBuilder q = QueryBuilders.matchQuery("field1", "one");
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
Map<String, List<ProfileShardResult>> p = resp.getProfileResults();
assertNotNull(p);
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertEquals(result.getQueryName(), "TermQuery");
assertEquals(result.getLuceneDescription(), "field1:one");
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
/**
* This test verifies that the output is reasonable for a nested query
*/
public void testBool() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")).must(QueryBuilders.matchQuery("field1", "two"));
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
Map<String, List<ProfileShardResult>> p = resp.getProfileResults();
assertNotNull(p);
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertEquals(result.getQueryName(), "BooleanQuery");
assertEquals(result.getLuceneDescription(), "+field1:one +field1:two");
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
assertEquals(result.getProfiledChildren().size(), 2);
// Check the children
List<ProfileResult> children = result.getProfiledChildren();
assertEquals(children.size(), 2);
ProfileResult childProfile = children.get(0);
assertEquals(childProfile.getQueryName(), "TermQuery");
assertEquals(childProfile.getLuceneDescription(), "field1:one");
assertThat(childProfile.getTime(), greaterThan(0L));
assertNotNull(childProfile.getTimeBreakdown());
assertEquals(childProfile.getProfiledChildren().size(), 0);
childProfile = children.get(1);
assertEquals(childProfile.getQueryName(), "TermQuery");
assertEquals(childProfile.getLuceneDescription(), "field1:two");
assertThat(childProfile.getTime(), greaterThan(0L));
assertNotNull(childProfile.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
/**
* Tests a boolean query with no children clauses
*/
public void testEmptyBool() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.boolQuery();
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
/**
* Tests a series of three nested boolean queries with a single "leaf" match query.
* The rewrite process will "collapse" this down to a single bool, so this tests to make sure
* nothing catastrophic happens during that fairly substantial rewrite
*/
public void testCollapsingBool() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one"))));
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
public void testBoosting() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two"))
.boost(randomFloat())
.negativeBoost(randomFloat());
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
public void testDisMaxRange() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.disMaxQuery()
.boost(0.33703882f)
.add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true));
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
public void testRange() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
public void testPhrase() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i) + " " + English.intToEnglish(i+1),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two");
logger.info(q.toString());
SearchResponse resp = client().prepareSearch()
.setQuery(q)
.setIndices("test")
.setTypes("type1")
.setProfile(true)
.setSearchType(SearchType.QUERY_THEN_FETCH)
.execute().actionGet();
if (resp.getShardFailures().length > 0) {
for (ShardSearchFailure f : resp.getShardFailures()) {
logger.error(f.toString());
}
fail();
}
assertNotNull("Profile response element should not be null", resp.getProfileResults());
for (Map.Entry<String, List<ProfileShardResult>> shardResult : resp.getProfileResults().entrySet()) {
for (ProfileShardResult searchProfiles : shardResult.getValue()) {
for (ProfileResult result : searchProfiles.getQueryResults()) {
assertNotNull(result.getQueryName());
assertNotNull(result.getLuceneDescription());
assertThat(result.getTime(), greaterThan(0L));
assertNotNull(result.getTimeBreakdown());
}
CollectorResult result = searchProfiles.getCollectorResult();
assertThat(result.getName(), not(isEmptyOrNullString()));
assertThat(result.getTime(), greaterThan(0L));
}
}
}
/**
* This test makes sure no profile results are returned when profiling is disabled
*/
public void testNoProfile() throws Exception {
createIndex("test");
ensureGreen();
int numDocs = randomIntBetween(100, 150);
IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
for (int i = 0; i < numDocs; i++) {
docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource(
"field1", English.intToEnglish(i),
"field2", i
);
}
indexRandom(true, docs);
refresh();
QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5);
logger.info(q.toString());
SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet();
assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0));
}
}

@ -0,0 +1,266 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.profile;
import org.apache.lucene.util.English;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.query.*;
import java.util.ArrayList;
import java.util.List;
import static com.carrotsearch.randomizedtesting.RandomizedTest.*;
import static org.junit.Assert.assertTrue;
public class RandomQueryGenerator {
public static QueryBuilder randomQueryBuilder(List<String> stringFields, List<String> numericFields, int numDocs, int depth) {
assertTrue("Must supply at least one string field", stringFields.size() > 0);
assertTrue("Must supply at least one numeric field", numericFields.size() > 0);
// If depth is exhausted, or 50% of the time return a terminal
// Helps limit ridiculously large compound queries
if (depth == 0 || randomBoolean()) {
return randomTerminalQuery(stringFields, numericFields, numDocs);
}
switch (randomIntBetween(0,5)) {
case 0:
return randomTerminalQuery(stringFields, numericFields, numDocs);
case 1:
return QueryBuilders.boolQuery().must(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1))
.filter(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1));
case 2:
return randomBoolQuery(stringFields, numericFields, numDocs, depth);
case 3:
// disabled for now because of https://issues.apache.org/jira/browse/LUCENE-6781
//return randomBoostingQuery(stringFields, numericFields, numDocs, depth);
case 4:
return randomConstantScoreQuery(stringFields, numericFields, numDocs, depth);
case 5:
return randomDisMaxQuery(stringFields, numericFields, numDocs, depth);
default:
return randomTerminalQuery(stringFields, numericFields, numDocs);
}
}
private static QueryBuilder randomTerminalQuery(List<String> stringFields, List<String> numericFields, int numDocs) {
switch (randomIntBetween(0,6)) {
case 0:
return randomTermQuery(stringFields, numDocs);
case 1:
return randomTermsQuery(stringFields, numDocs);
case 2:
return randomRangeQuery(numericFields, numDocs);
case 3:
return QueryBuilders.matchAllQuery();
case 4:
return randomCommonTermsQuery(stringFields, numDocs);
case 5:
return randomFuzzyQuery(stringFields);
case 6:
return randomIDsQuery();
default:
return randomTermQuery(stringFields, numDocs);
}
}
private static String randomQueryString(int max) {
StringBuilder qsBuilder = new StringBuilder();
for (int i = 0; i < max; i++) {
qsBuilder.append(English.intToEnglish(randomInt(max)));
qsBuilder.append(" ");
}
return qsBuilder.toString().trim();
}
private static String randomField(List<String> fields) {
return fields.get(randomInt(fields.size() - 1));
}
private static QueryBuilder randomTermQuery(List<String> fields, int numDocs) {
return QueryBuilders.termQuery(randomField(fields), randomQueryString(1));
}
private static QueryBuilder randomTermsQuery(List<String> fields, int numDocs) {
int numTerms = randomInt(numDocs);
ArrayList<String> terms = new ArrayList<>(numTerms);
for (int i = 0; i < numTerms; i++) {
terms.add(randomQueryString(1));
}
return QueryBuilders.termsQuery(randomField(fields), terms);
}
private static QueryBuilder randomRangeQuery(List<String> fields, int numDocs) {
QueryBuilder q = QueryBuilders.rangeQuery(randomField(fields));
if (randomBoolean()) {
((RangeQueryBuilder)q).from(randomIntBetween(0, numDocs / 2 - 1));
}
if (randomBoolean()) {
((RangeQueryBuilder)q).to(randomIntBetween(numDocs / 2, numDocs));
}
return q;
}
private static QueryBuilder randomBoolQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) {
QueryBuilder q = QueryBuilders.boolQuery();
int numClause = randomIntBetween(0,5);
for (int i = 0; i < numClause; i++) {
((BoolQueryBuilder)q).must(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1));
}
numClause = randomIntBetween(0,5);
for (int i = 0; i < numClause; i++) {
((BoolQueryBuilder)q).should(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1));
}
numClause = randomIntBetween(0,5);
for (int i = 0; i < numClause; i++) {
((BoolQueryBuilder)q).mustNot(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1));
}
return q;
}
private static QueryBuilder randomBoostingQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) {
return QueryBuilders.boostingQuery(
randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1),
randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1))
.boost(randomFloat())
.negativeBoost(randomFloat());
}
private static QueryBuilder randomConstantScoreQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) {
return QueryBuilders.constantScoreQuery(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1));
}
private static QueryBuilder randomCommonTermsQuery(List<String> fields, int numDocs) {
int numTerms = randomInt(numDocs);
QueryBuilder q = QueryBuilders.commonTermsQuery(randomField(fields), randomQueryString(numTerms));
if (randomBoolean()) {
((CommonTermsQueryBuilder)q).boost(randomFloat());
}
if (randomBoolean()) {
((CommonTermsQueryBuilder)q).cutoffFrequency(randomFloat());
}
if (randomBoolean()) {
((CommonTermsQueryBuilder)q).highFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms)))
.highFreqOperator(randomBoolean() ? Operator.AND : Operator.OR);
}
if (randomBoolean()) {
((CommonTermsQueryBuilder)q).lowFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms)))
.lowFreqOperator(randomBoolean() ? Operator.AND : Operator.OR);
}
return q;
}
private static QueryBuilder randomFuzzyQuery(List<String> fields) {
QueryBuilder q = QueryBuilders.fuzzyQuery(randomField(fields), randomQueryString(1));
if (randomBoolean()) {
((FuzzyQueryBuilder)q).boost(randomFloat());
}
if (randomBoolean()) {
switch (randomIntBetween(0, 4)) {
case 0:
((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO);
break;
case 1:
((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ONE);
break;
case 2:
((FuzzyQueryBuilder)q).fuzziness(Fuzziness.TWO);
break;
case 3:
((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ZERO);
break;
case 4:
((FuzzyQueryBuilder)q).fuzziness(Fuzziness.fromEdits(randomIntBetween(0,2)));
break;
default:
((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO);
break;
}
}
if (randomBoolean()) {
((FuzzyQueryBuilder)q).maxExpansions(Math.abs(randomInt()));
}
if (randomBoolean()) {
((FuzzyQueryBuilder)q).prefixLength(Math.abs(randomInt()));
}
if (randomBoolean()) {
((FuzzyQueryBuilder)q).transpositions(randomBoolean());
}
return q;
}
private static QueryBuilder randomDisMaxQuery(List<String> stringFields, List<String> numericFields, int numDocs, int depth) {
QueryBuilder q = QueryBuilders.disMaxQuery();
int numClauses = randomIntBetween(1, 10);
for (int i = 0; i < numClauses; i++) {
((DisMaxQueryBuilder)q).add(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1));
}
if (randomBoolean()) {
((DisMaxQueryBuilder)q).boost(randomFloat());
}
if (randomBoolean()) {
((DisMaxQueryBuilder)q).tieBreaker(randomFloat());
}
return q;
}
private static QueryBuilder randomIDsQuery() {
QueryBuilder q = QueryBuilders.idsQuery();
int numIDs = randomInt(100);
for (int i = 0; i < numIDs; i++) {
((IdsQueryBuilder)q).addIds(String.valueOf(randomInt()));
}
if (randomBoolean()) {
((IdsQueryBuilder)q).boost(randomFloat());
}
return q;
}
}

@ -20,7 +20,7 @@ package org.elasticsearch.search.suggest;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.CharsRefBuilder;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import java.io.IOException; import java.io.IOException;
import java.util.Locale; import java.util.Locale;
@ -42,11 +42,11 @@ public class CustomSuggester extends Suggester<CustomSuggester.CustomSuggestions
Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> response = new Suggest.Suggestion<>(name, suggestion.getSize()); Suggest.Suggestion<Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option>> response = new Suggest.Suggestion<>(name, suggestion.getSize());
String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12"); String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12");
Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<>(new StringText(firstSuggestion), 0, text.length() + 2); Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry12 = new Suggest.Suggestion.Entry<>(new Text(firstSuggestion), 0, text.length() + 2);
response.addTerm(resultEntry12); response.addTerm(resultEntry12);
String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123"); String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123");
Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<>(new StringText(secondSuggestion), 0, text.length() + 3); Suggest.Suggestion.Entry<Suggest.Suggestion.Entry.Option> resultEntry123 = new Suggest.Suggestion.Entry<>(new Text(secondSuggestion), 0, text.length() + 3);
response.addTerm(resultEntry123); response.addTerm(resultEntry123);
return response; return response;

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test.hamcrest;
public class DoubleMatcher {
/**
* Better floating point comparisons courtesy of https://github.com/brazzy/floating-point-gui.de
*
* Snippet adapted to use doubles instead of floats
*/
public static boolean nearlyEqual(double a, double b, double epsilon) {
final double absA = Math.abs(a);
final double absB = Math.abs(b);
final double diff = Math.abs(a - b);
if (a == b) { // shortcut, handles infinities
return true;
} else if (a == 0 || b == 0 || diff < Double.MIN_NORMAL) {
// a or b is zero or both are extremely close to it
// relative error is less meaningful here
return diff < (epsilon * Double.MIN_NORMAL);
} else { // use relative error
return diff / Math.min((absA + absB), Double.MAX_VALUE) < epsilon;
}
}
}

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More