Merge branch 'master' into feature/seq_no

Change IndexShard counters for the new simplifies ReplicationAction
This commit is contained in:
Boaz Leskes 2015-12-10 17:46:52 +01:00
commit 68f1a87c48
247 changed files with 24674 additions and 1826 deletions

View File

@ -184,6 +184,12 @@ tasks.idea.doLast {
if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) { if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) {
throw new GradleException('You must run gradle idea from the root of elasticsearch before importing into IntelliJ') throw new GradleException('You must run gradle idea from the root of elasticsearch before importing into IntelliJ')
} }
// add buildSrc itself as a groovy project
task buildSrcIdea(type: GradleBuild) {
buildFile = 'buildSrc/build.gradle'
tasks = ['cleanIdea', 'ideaModule']
}
tasks.idea.dependsOn(buildSrcIdea)
// eclipse configuration // eclipse configuration

View File

@ -15,8 +15,15 @@ import org.gradle.api.logging.LogLevel
import org.gradle.api.logging.Logger import org.gradle.api.logging.Logger
import org.junit.runner.Description import org.junit.runner.Description
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.atomic.AtomicInteger
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.Clip;
import javax.sound.sampled.Line;
import javax.sound.sampled.LineEvent;
import javax.sound.sampled.LineListener;
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.* import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.*
import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode import static com.carrotsearch.gradle.junit4.TestLoggingConfiguration.OutputMode
@ -102,9 +109,36 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
formatTime(e.getCurrentTime()) + ", stalled for " + formatTime(e.getCurrentTime()) + ", stalled for " +
formatDurationInSeconds(e.getNoEventDuration()) + " at: " + formatDurationInSeconds(e.getNoEventDuration()) + " at: " +
(e.getDescription() == null ? "<unknown>" : formatDescription(e.getDescription()))) (e.getDescription() == null ? "<unknown>" : formatDescription(e.getDescription())))
try {
playBeat();
} catch (Exception nosound) { /* handling exceptions with style */ }
slowTestsFound = true slowTestsFound = true
} }
void playBeat() throws Exception {
Clip clip = (Clip)AudioSystem.getLine(new Line.Info(Clip.class));
final AtomicBoolean stop = new AtomicBoolean();
clip.addLineListener(new LineListener() {
@Override
public void update(LineEvent event) {
if (event.getType() == LineEvent.Type.STOP) {
stop.set(true);
}
}
});
InputStream stream = getClass().getResourceAsStream("/beat.wav");
try {
clip.open(AudioSystem.getAudioInputStream(stream));
clip.start();
while (!stop.get()) {
Thread.sleep(20);
}
clip.close();
} finally {
stream.close();
}
}
@Subscribe @Subscribe
void onQuit(AggregatedQuitEvent e) throws IOException { void onQuit(AggregatedQuitEvent e) throws IOException {
if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) { if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {

View File

@ -65,7 +65,6 @@ public class PluginBuildPlugin extends BuildPlugin {
// with a full elasticsearch server that includes optional deps // with a full elasticsearch server that includes optional deps
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"
provided "com.vividsolutions:jts:${project.versions.jts}" provided "com.vividsolutions:jts:${project.versions.jts}"
provided "com.github.spullara.mustache.java:compiler:${project.versions.mustache}"
provided "log4j:log4j:${project.versions.log4j}" provided "log4j:log4j:${project.versions.log4j}"
provided "log4j:apache-log4j-extras:${project.versions.log4j}" provided "log4j:apache-log4j-extras:${project.versions.log4j}"
provided "org.slf4j:slf4j-api:${project.versions.slf4j}" provided "org.slf4j:slf4j-api:${project.versions.slf4j}"

View File

@ -525,7 +525,7 @@ class ClusterFormationTasks {
} }
} }
static String pluginTaskName(String action, String name, String suffix) { public static String pluginTaskName(String action, String name, String suffix) {
// replace every dash followed by a character with just the uppercase character // replace every dash followed by a character with just the uppercase character
String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) } String camelName = name.replaceAll(/-(\w)/) { _, c -> c.toUpperCase(Locale.ROOT) }
return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix return action + camelName[0].toUpperCase(Locale.ROOT) + camelName.substring(1) + suffix

View File

@ -0,0 +1,63 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.test
import org.elasticsearch.gradle.plugin.PluginBuildPlugin
import org.gradle.api.Project
import org.gradle.api.artifacts.Dependency
import org.gradle.api.artifacts.ProjectDependency
import org.gradle.api.tasks.Copy
/**
* A plugin to run messy tests, which are generally tests that depend on plugins.
*
* This plugin will add the same test configuration as standalone tests, except
* also add the plugin-metadata and properties files for each plugin project
* dependency.
*/
class MessyTestPlugin extends StandaloneTestPlugin {
@Override
public void apply(Project project) {
super.apply(project)
project.configurations.testCompile.dependencies.all { Dependency dep ->
// this closure is run every time a compile dependency is added
if (dep instanceof ProjectDependency && dep.dependencyProject.plugins.hasPlugin(PluginBuildPlugin)) {
project.gradle.projectsEvaluated {
addPluginResources(project, dep.dependencyProject)
}
}
}
}
private static addPluginResources(Project project, Project pluginProject) {
String outputDir = "generated-resources/${pluginProject.name}"
String taskName = ClusterFormationTasks.pluginTaskName("copy", pluginProject.name, "Metadata")
Copy copyPluginMetadata = project.tasks.create(taskName, Copy.class)
copyPluginMetadata.into(outputDir)
copyPluginMetadata.from(pluginProject.tasks.pluginProperties)
copyPluginMetadata.from(pluginProject.file('src/main/plugin-metadata'))
project.sourceSets.test.output.dir(outputDir, builtBy: taskName)
// add each generated dir to the test classpath in IDEs
//project.eclipse.classpath.sourceSets = [project.sourceSets.test]
project.idea.module.singleEntryLibraries= ['TEST': [project.file(outputDir)]]
}
}

View File

@ -46,6 +46,8 @@ public class StandaloneTestBasePlugin implements Plugin<Project> {
project.eclipse.classpath.sourceSets = [project.sourceSets.test] project.eclipse.classpath.sourceSets = [project.sourceSets.test]
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]
project.idea.module.testSourceDirs += project.sourceSets.test.java.srcDirs
project.idea.module.scopes['TEST'] = [plus: [project.configurations.testRuntime]]
PrecommitTasks.create(project, false) PrecommitTasks.create(project, false)
project.check.dependsOn(project.precommit) project.check.dependsOn(project.precommit)

View File

@ -0,0 +1,20 @@
#
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
implementation-class=org.elasticsearch.gradle.test.MessyTestPlugin

Binary file not shown.

View File

@ -4,7 +4,6 @@ lucene = 5.4.0-snapshot-1715952
# optional dependencies # optional dependencies
spatial4j = 0.5 spatial4j = 0.5
jts = 1.13 jts = 1.13
mustache = 0.9.1
jackson = 2.6.2 jackson = 2.6.2
log4j = 1.2.17 log4j = 1.2.17
slf4j = 1.6.2 slf4j = 1.6.2

View File

@ -74,9 +74,6 @@ dependencies {
compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional compile "com.spatial4j:spatial4j:${versions.spatial4j}", optional
compile "com.vividsolutions:jts:${versions.jts}", optional compile "com.vividsolutions:jts:${versions.jts}", optional
// templating
compile "com.github.spullara.mustache.java:compiler:${versions.mustache}", optional
// logging // logging
compile "log4j:log4j:${versions.log4j}", optional compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional compile "log4j:apache-log4j-extras:${versions.log4j}", optional

View File

@ -20,18 +20,19 @@ package org.elasticsearch.action;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
/** /**
* A base class for the response of a write operation that involves a single doc * A base class for the response of a write operation that involves a single doc
*/ */
public abstract class DocWriteResponse extends ReplicationResponse implements ToXContent { public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent {
private ShardId shardId; private ShardId shardId;
private String id; private String id;
@ -95,6 +96,10 @@ public abstract class DocWriteResponse extends ReplicationResponse implements To
return seqNo; return seqNo;
} }
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
public RestStatus status() {
return getShardInfo().status();
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
@ -128,16 +133,16 @@ public abstract class DocWriteResponse extends ReplicationResponse implements To
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
ReplicationResponse.ShardInfo shardInfo = getShardInfo(); ReplicationResponse.ShardInfo shardInfo = getShardInfo();
builder.field(Fields._INDEX, getIndex()) builder.field(Fields._INDEX, shardId.getIndex())
.field(Fields._TYPE, getType()) .field(Fields._TYPE, type)
.field(Fields._ID, getId()) .field(Fields._ID, id)
.field(Fields._VERSION, getVersion()); .field(Fields._VERSION, version);
shardInfo.toXContent(builder, params);
//nocommit: i'm not sure we want to expose it in the api but it will be handy for debugging while we work... //nocommit: i'm not sure we want to expose it in the api but it will be handy for debugging while we work...
builder.field(Fields._SHARD_ID, shardId.id()); builder.field(Fields._SHARD_ID, shardId.id());
if (getSeqNo() >= 0) { if (getSeqNo() >= 0) {
builder.field(Fields._SEQ_NO, getSeqNo()); builder.field(Fields._SEQ_NO, getSeqNo());
} }
shardInfo.toXContent(builder, params);
return builder; return builder;
} }
} }

View File

@ -78,19 +78,19 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
indices(indices); indices(indices);
aliases(aliases); aliases(aliases);
} }
public AliasActions(AliasAction.Type type, String index, String alias) { public AliasActions(AliasAction.Type type, String index, String alias) {
aliasAction = new AliasAction(type); aliasAction = new AliasAction(type);
indices(index); indices(index);
aliases(alias); aliases(alias);
} }
AliasActions(AliasAction.Type type, String[] index, String alias) { AliasActions(AliasAction.Type type, String[] index, String alias) {
aliasAction = new AliasAction(type); aliasAction = new AliasAction(type);
indices(index); indices(index);
aliases(alias); aliases(alias);
} }
public AliasActions(AliasAction action) { public AliasActions(AliasAction action) {
this.aliasAction = action; this.aliasAction = action;
indices(action.index()); indices(action.index());
@ -110,7 +110,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
aliasAction.filter(filter); aliasAction.filter(filter);
return this; return this;
} }
public AliasActions filter(QueryBuilder filter) { public AliasActions filter(QueryBuilder filter) {
aliasAction.filter(filter); aliasAction.filter(filter);
return this; return this;
@ -197,7 +197,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
aliasAction = readAliasAction(in); aliasAction = readAliasAction(in);
return this; return this;
} }
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeStringArray(indices); out.writeStringArray(indices);
out.writeStringArray(aliases); out.writeStringArray(aliases);
@ -225,7 +225,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
addAliasAction(new AliasActions(action)); addAliasAction(new AliasActions(action));
return this; return this;
} }
/** /**
* Adds an alias to the index. * Adds an alias to the index.
* @param alias The alias * @param alias The alias
@ -247,8 +247,8 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder)); addAliasAction(new AliasActions(AliasAction.Type.ADD, indices, alias).filter(filterBuilder));
return this; return this;
} }
/** /**
* Removes an alias to the index. * Removes an alias to the index.
* *
@ -259,7 +259,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases)); addAliasAction(new AliasActions(AliasAction.Type.REMOVE, indices, aliases));
return this; return this;
} }
/** /**
* Removes an alias to the index. * Removes an alias to the index.
* *
@ -286,25 +286,14 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
return addValidationError("Must specify at least one alias action", validationException); return addValidationError("Must specify at least one alias action", validationException);
} }
for (AliasActions aliasAction : allAliasActions) { for (AliasActions aliasAction : allAliasActions) {
if (aliasAction.actionType() == AliasAction.Type.ADD) { if (aliasAction.aliases.length == 0) {
if (aliasAction.aliases.length != 1) { validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: aliases may not be empty", validationException);
}
for (String alias : aliasAction.aliases) {
if (!Strings.hasText(alias)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH) validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "] requires exactly one [alias] to be set", validationException); + "]: [alias] may not be empty string", validationException);
}
if (!Strings.hasText(aliasAction.aliases[0])) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "] requires an [alias] to be set", validationException);
}
} else {
if (aliasAction.aliases.length == 0) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: aliases may not be empty", validationException);
}
for (String alias : aliasAction.aliases) {
if (!Strings.hasText(alias)) {
validationException = addValidationError("Alias action [" + aliasAction.actionType().name().toLowerCase(Locale.ENGLISH)
+ "]: [alias] may not be empty string", validationException);
}
} }
} }
if (CollectionUtils.isEmpty(aliasAction.indices)) { if (CollectionUtils.isEmpty(aliasAction.indices)) {
@ -345,7 +334,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesReq
public IndicesOptions indicesOptions() { public IndicesOptions indicesOptions() {
return INDICES_OPTIONS; return INDICES_OPTIONS;
} }
private static AliasActions readAliasActions(StreamInput in) throws IOException { private static AliasActions readAliasActions(StreamInput in) throws IOException {
AliasActions actions = new AliasActions(); AliasActions actions = new AliasActions();
return actions.readFrom(in); return actions.readFrom(in);

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.admin.indices.analyze; package org.elasticsearch.action.admin.indices.analyze;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.action.support.single.shard.SingleShardRequest;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -46,6 +47,10 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
private String field; private String field;
private boolean explain = false;
private String[] attributes = Strings.EMPTY_ARRAY;
public AnalyzeRequest() { public AnalyzeRequest() {
} }
@ -86,6 +91,9 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
} }
public AnalyzeRequest tokenFilters(String... tokenFilters) { public AnalyzeRequest tokenFilters(String... tokenFilters) {
if (tokenFilters == null) {
throw new IllegalArgumentException("token filters must not be null");
}
this.tokenFilters = tokenFilters; this.tokenFilters = tokenFilters;
return this; return this;
} }
@ -95,6 +103,9 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
} }
public AnalyzeRequest charFilters(String... charFilters) { public AnalyzeRequest charFilters(String... charFilters) {
if (charFilters == null) {
throw new IllegalArgumentException("char filters must not be null");
}
this.charFilters = charFilters; this.charFilters = charFilters;
return this; return this;
} }
@ -112,18 +123,33 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
return this.field; return this.field;
} }
public AnalyzeRequest explain(boolean explain) {
this.explain = explain;
return this;
}
public boolean explain() {
return this.explain;
}
public AnalyzeRequest attributes(String... attributes) {
if (attributes == null) {
throw new IllegalArgumentException("attributes must not be null");
}
this.attributes = attributes;
return this;
}
public String[] attributes() {
return this.attributes;
}
@Override @Override
public ActionRequestValidationException validate() { public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = null; ActionRequestValidationException validationException = null;
if (text == null || text.length == 0) { if (text == null || text.length == 0) {
validationException = addValidationError("text is missing", validationException); validationException = addValidationError("text is missing", validationException);
} }
if (tokenFilters == null) {
validationException = addValidationError("token filters must not be null", validationException);
}
if (charFilters == null) {
validationException = addValidationError("char filters must not be null", validationException);
}
return validationException; return validationException;
} }
@ -136,6 +162,10 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
tokenFilters = in.readStringArray(); tokenFilters = in.readStringArray();
charFilters = in.readStringArray(); charFilters = in.readStringArray();
field = in.readOptionalString(); field = in.readOptionalString();
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
explain = in.readBoolean();
attributes = in.readStringArray();
}
} }
@Override @Override
@ -147,5 +177,9 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
out.writeStringArray(tokenFilters); out.writeStringArray(tokenFilters);
out.writeStringArray(charFilters); out.writeStringArray(charFilters);
out.writeOptionalString(field); out.writeOptionalString(field);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
out.writeBoolean(explain);
out.writeStringArray(attributes);
}
} }
} }

View File

@ -78,6 +78,22 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
return this; return this;
} }
/**
* Sets explain
*/
public AnalyzeRequestBuilder setExplain(boolean explain) {
request.explain(explain);
return this;
}
/**
* Sets attributes that will include results
*/
public AnalyzeRequestBuilder setAttributes(String attributes){
request.attributes(attributes);
return this;
}
/** /**
* Sets texts to analyze * Sets texts to analyze
*/ */

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.action.admin.indices.analyze; package org.elasticsearch.action.admin.indices.analyze;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -30,28 +31,32 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
/** /**
* *
*/ */
public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContent { public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContent {
public static class AnalyzeToken implements Streamable { public static class AnalyzeToken implements Streamable, ToXContent {
private String term; private String term;
private int startOffset; private int startOffset;
private int endOffset; private int endOffset;
private int position; private int position;
private Map<String, Object> attributes;
private String type; private String type;
AnalyzeToken() { AnalyzeToken() {
} }
public AnalyzeToken(String term, int position, int startOffset, int endOffset, String type) { public AnalyzeToken(String term, int position, int startOffset, int endOffset, String type,
Map<String, Object> attributes) {
this.term = term; this.term = term;
this.position = position; this.position = position;
this.startOffset = startOffset; this.startOffset = startOffset;
this.endOffset = endOffset; this.endOffset = endOffset;
this.type = type; this.type = type;
this.attributes = attributes;
} }
public String getTerm() { public String getTerm() {
@ -74,6 +79,27 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
return this.type; return this.type;
} }
public Map<String, Object> getAttributes(){
return this.attributes;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.TOKEN, term);
builder.field(Fields.START_OFFSET, startOffset);
builder.field(Fields.END_OFFSET, endOffset);
builder.field(Fields.TYPE, type);
builder.field(Fields.POSITION, position);
if (attributes != null && !attributes.isEmpty()) {
for (Map.Entry<String, Object> entity : attributes.entrySet()) {
builder.field(entity.getKey(), entity.getValue());
}
}
builder.endObject();
return builder;
}
public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException { public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException {
AnalyzeToken analyzeToken = new AnalyzeToken(); AnalyzeToken analyzeToken = new AnalyzeToken();
analyzeToken.readFrom(in); analyzeToken.readFrom(in);
@ -87,6 +113,9 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
endOffset = in.readInt(); endOffset = in.readInt();
position = in.readVInt(); position = in.readVInt();
type = in.readOptionalString(); type = in.readOptionalString();
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
attributes = (Map<String, Object>) in.readGenericValue();
}
} }
@Override @Override
@ -96,22 +125,32 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
out.writeInt(endOffset); out.writeInt(endOffset);
out.writeVInt(position); out.writeVInt(position);
out.writeOptionalString(type); out.writeOptionalString(type);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
out.writeGenericValue(attributes);
}
} }
} }
private DetailAnalyzeResponse detail;
private List<AnalyzeToken> tokens; private List<AnalyzeToken> tokens;
AnalyzeResponse() { AnalyzeResponse() {
} }
public AnalyzeResponse(List<AnalyzeToken> tokens) { public AnalyzeResponse(List<AnalyzeToken> tokens, DetailAnalyzeResponse detail) {
this.tokens = tokens; this.tokens = tokens;
this.detail = detail;
} }
public List<AnalyzeToken> getTokens() { public List<AnalyzeToken> getTokens() {
return this.tokens; return this.tokens;
} }
public DetailAnalyzeResponse detail() {
return this.detail;
}
@Override @Override
public Iterator<AnalyzeToken> iterator() { public Iterator<AnalyzeToken> iterator() {
return tokens.iterator(); return tokens.iterator();
@ -119,17 +158,19 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startArray(Fields.TOKENS); if (tokens != null) {
for (AnalyzeToken token : tokens) { builder.startArray(Fields.TOKENS);
builder.startObject(); for (AnalyzeToken token : tokens) {
builder.field(Fields.TOKEN, token.getTerm()); token.toXContent(builder, params);
builder.field(Fields.START_OFFSET, token.getStartOffset()); }
builder.field(Fields.END_OFFSET, token.getEndOffset()); builder.endArray();
builder.field(Fields.TYPE, token.getType()); }
builder.field(Fields.POSITION, token.getPosition());
if (detail != null) {
builder.startObject(Fields.DETAIL);
detail.toXContent(builder, params);
builder.endObject(); builder.endObject();
} }
builder.endArray();
return builder; return builder;
} }
@ -141,14 +182,24 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
tokens.add(AnalyzeToken.readAnalyzeToken(in)); tokens.add(AnalyzeToken.readAnalyzeToken(in));
} }
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
detail = in.readOptionalStreamable(DetailAnalyzeResponse::new);
}
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeVInt(tokens.size()); if (tokens != null) {
for (AnalyzeToken token : tokens) { out.writeVInt(tokens.size());
token.writeTo(out); for (AnalyzeToken token : tokens) {
token.writeTo(out);
}
} else {
out.writeVInt(0);
}
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
out.writeOptionalStreamable(detail);
} }
} }
@ -159,5 +210,6 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset"); static final XContentBuilderString END_OFFSET = new XContentBuilderString("end_offset");
static final XContentBuilderString TYPE = new XContentBuilderString("type"); static final XContentBuilderString TYPE = new XContentBuilderString("type");
static final XContentBuilderString POSITION = new XContentBuilderString("position"); static final XContentBuilderString POSITION = new XContentBuilderString("position");
static final XContentBuilderString DETAIL = new XContentBuilderString("detail");
} }
} }

View File

@ -0,0 +1,319 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.analyze;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import java.io.IOException;
public class DetailAnalyzeResponse implements Streamable, ToXContent {
DetailAnalyzeResponse() {
}
private boolean customAnalyzer = false;
private AnalyzeTokenList analyzer;
private CharFilteredText[] charfilters;
private AnalyzeTokenList tokenizer;
private AnalyzeTokenList[] tokenfilters;
public DetailAnalyzeResponse(AnalyzeTokenList analyzer) {
this(false, analyzer, null, null, null);
}
public DetailAnalyzeResponse(CharFilteredText[] charfilters, AnalyzeTokenList tokenizer, AnalyzeTokenList[] tokenfilters) {
this(true, null, charfilters, tokenizer, tokenfilters);
}
public DetailAnalyzeResponse(boolean customAnalyzer,
AnalyzeTokenList analyzer,
CharFilteredText[] charfilters,
AnalyzeTokenList tokenizer,
AnalyzeTokenList[] tokenfilters) {
this.customAnalyzer = customAnalyzer;
this.analyzer = analyzer;
this.charfilters = charfilters;
this.tokenizer = tokenizer;
this.tokenfilters = tokenfilters;
}
public AnalyzeTokenList analyzer() {
return this.analyzer;
}
public DetailAnalyzeResponse analyzer(AnalyzeTokenList analyzer) {
this.analyzer = analyzer;
return this;
}
public CharFilteredText[] charfilters() {
return this.charfilters;
}
public DetailAnalyzeResponse charfilters(CharFilteredText[] charfilters) {
this.charfilters = charfilters;
return this;
}
public AnalyzeTokenList tokenizer() {
return tokenizer;
}
public DetailAnalyzeResponse tokenizer(AnalyzeTokenList tokenizer) {
this.tokenizer = tokenizer;
return this;
}
public AnalyzeTokenList[] tokenfilters() {
return tokenfilters;
}
public DetailAnalyzeResponse tokenfilters(AnalyzeTokenList[] tokenfilters) {
this.tokenfilters = tokenfilters;
return this;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.CUSTOM_ANALYZER, customAnalyzer);
if (analyzer != null) {
builder.startObject(Fields.ANALYZER);
analyzer.toXContentWithoutObject(builder, params);
builder.endObject();
}
if (charfilters != null) {
builder.startArray(Fields.CHARFILTERS);
for (CharFilteredText charfilter : charfilters) {
charfilter.toXContent(builder, params);
}
builder.endArray();
}
if (tokenizer != null) {
builder.startObject(Fields.TOKENIZER);
tokenizer.toXContentWithoutObject(builder, params);
builder.endObject();
}
if (tokenfilters != null) {
builder.startArray(Fields.TOKENFILTERS);
for (AnalyzeTokenList tokenfilter : tokenfilters) {
tokenfilter.toXContent(builder, params);
}
builder.endArray();
}
return builder;
}
static final class Fields {
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString FILTERED_TEXT = new XContentBuilderString("filtered_text");
static final XContentBuilderString CUSTOM_ANALYZER = new XContentBuilderString("custom_analyzer");
static final XContentBuilderString ANALYZER = new XContentBuilderString("analyzer");
static final XContentBuilderString CHARFILTERS = new XContentBuilderString("charfilters");
static final XContentBuilderString TOKENIZER = new XContentBuilderString("tokenizer");
static final XContentBuilderString TOKENFILTERS = new XContentBuilderString("tokenfilters");
}
@Override
public void readFrom(StreamInput in) throws IOException {
this.customAnalyzer = in.readBoolean();
if (customAnalyzer) {
tokenizer = AnalyzeTokenList.readAnalyzeTokenList(in);
int size = in.readVInt();
if (size > 0) {
charfilters = new CharFilteredText[size];
for (int i = 0; i < size; i++) {
charfilters[i] = CharFilteredText.readCharFilteredText(in);
}
}
size = in.readVInt();
if (size > 0) {
tokenfilters = new AnalyzeTokenList[size];
for (int i = 0; i < size; i++) {
tokenfilters[i] = AnalyzeTokenList.readAnalyzeTokenList(in);
}
}
} else {
analyzer = AnalyzeTokenList.readAnalyzeTokenList(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(customAnalyzer);
if (customAnalyzer) {
tokenizer.writeTo(out);
if (charfilters != null) {
out.writeVInt(charfilters.length);
for (CharFilteredText charfilter : charfilters) {
charfilter.writeTo(out);
}
} else {
out.writeVInt(0);
}
if (tokenfilters != null) {
out.writeVInt(tokenfilters.length);
for (AnalyzeTokenList tokenfilter : tokenfilters) {
tokenfilter.writeTo(out);
}
} else {
out.writeVInt(0);
}
} else {
analyzer.writeTo(out);
}
}
public static class AnalyzeTokenList implements Streamable, ToXContent {
private String name;
private AnalyzeResponse.AnalyzeToken[] tokens;
AnalyzeTokenList() {
}
public AnalyzeTokenList(String name, AnalyzeResponse.AnalyzeToken[] tokens) {
this.name = name;
this.tokens = tokens;
}
public String getName() {
return name;
}
public AnalyzeResponse.AnalyzeToken[] getTokens() {
return tokens;
}
public static AnalyzeTokenList readAnalyzeTokenList(StreamInput in) throws IOException {
AnalyzeTokenList list = new AnalyzeTokenList();
list.readFrom(in);
return list;
}
public XContentBuilder toXContentWithoutObject(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.NAME, this.name);
builder.startArray(AnalyzeResponse.Fields.TOKENS);
for (AnalyzeResponse.AnalyzeToken token : tokens) {
token.toXContent(builder, params);
}
builder.endArray();
return builder;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME, this.name);
builder.startArray(AnalyzeResponse.Fields.TOKENS);
for (AnalyzeResponse.AnalyzeToken token : tokens) {
token.toXContent(builder, params);
}
builder.endArray();
builder.endObject();
return builder;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
int size = in.readVInt();
if (size > 0) {
tokens = new AnalyzeResponse.AnalyzeToken[size];
for (int i = 0; i < size; i++) {
tokens[i] = AnalyzeResponse.AnalyzeToken.readAnalyzeToken(in);
}
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
if (tokens != null) {
out.writeVInt(tokens.length);
for (AnalyzeResponse.AnalyzeToken token : tokens) {
token.writeTo(out);
}
} else {
out.writeVInt(0);
}
}
}
public static class CharFilteredText implements Streamable, ToXContent {
private String name;
private String[] texts;
CharFilteredText() {
}
public CharFilteredText(String name, String[] texts) {
this.name = name;
if (texts != null) {
this.texts = texts;
} else {
this.texts = Strings.EMPTY_ARRAY;
}
}
public String getName() {
return name;
}
public String[] getTexts() {
return texts;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME, name);
builder.field(Fields.FILTERED_TEXT, texts);
builder.endObject();
return builder;
}
public static CharFilteredText readCharFilteredText(StreamInput in) throws IOException {
CharFilteredText text = new CharFilteredText();
text.readFrom(in);
return text;
}
@Override
public void readFrom(StreamInput in) throws IOException {
name = in.readString();
texts = in.readStringArray();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(name);
out.writeStringArray(texts);
}
}
}

View File

@ -20,10 +20,15 @@ package org.elasticsearch.action.admin.indices.analyze;
import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeReflector;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction; import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
@ -33,6 +38,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator; import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
@ -46,8 +52,8 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.io.Reader;
import java.util.List; import java.util.*;
/** /**
* Transport action used to execute analyze requests * Transport action used to execute analyze requests
@ -222,6 +228,23 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
throw new IllegalArgumentException("failed to find analyzer"); throw new IllegalArgumentException("failed to find analyzer");
} }
List<AnalyzeResponse.AnalyzeToken> tokens = null;
DetailAnalyzeResponse detail = null;
if (request.explain()) {
detail = detailAnalyze(request, analyzer, field);
} else {
tokens = simpleAnalyze(request, analyzer, field);
}
if (closeAnalyzer) {
analyzer.close();
}
return new AnalyzeResponse(tokens, detail);
}
private static List<AnalyzeResponse.AnalyzeToken> simpleAnalyze(AnalyzeRequest request, Analyzer analyzer, String field) {
List<AnalyzeResponse.AnalyzeToken> tokens = new ArrayList<>(); List<AnalyzeResponse.AnalyzeToken> tokens = new ArrayList<>();
int lastPosition = -1; int lastPosition = -1;
int lastOffset = 0; int lastOffset = 0;
@ -238,7 +261,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
if (increment > 0) { if (increment > 0) {
lastPosition = lastPosition + increment; lastPosition = lastPosition + increment;
} }
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), lastOffset + offset.endOffset(), type.type())); tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(), lastOffset + offset.endOffset(), type.type(), null));
} }
stream.end(); stream.end();
@ -251,11 +274,211 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
throw new ElasticsearchException("failed to analyze", e); throw new ElasticsearchException("failed to analyze", e);
} }
} }
return tokens;
}
if (closeAnalyzer) { private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analyzer analyzer, String field) {
analyzer.close(); DetailAnalyzeResponse detailResponse;
final Set<String> includeAttributes = new HashSet<>();
if (request.attributes() != null) {
for (String attribute : request.attributes()) {
includeAttributes.add(attribute.toLowerCase(Locale.ROOT));
}
} }
return new AnalyzeResponse(tokens); CustomAnalyzer customAnalyzer = null;
if (analyzer instanceof CustomAnalyzer) {
customAnalyzer = (CustomAnalyzer) analyzer;
} else if (analyzer instanceof NamedAnalyzer && ((NamedAnalyzer) analyzer).analyzer() instanceof CustomAnalyzer) {
customAnalyzer = (CustomAnalyzer) ((NamedAnalyzer) analyzer).analyzer();
}
if (customAnalyzer != null) {
// customAnalyzer = divide charfilter, tokenizer tokenfilters
CharFilterFactory[] charFilterFactories = customAnalyzer.charFilters();
TokenizerFactory tokenizerFactory = customAnalyzer.tokenizerFactory();
TokenFilterFactory[] tokenFilterFactories = customAnalyzer.tokenFilters();
String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length];
TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? tokenFilterFactories.length : 0];
TokenListCreator tokenizerTokenListCreator = new TokenListCreator();
for (int textIndex = 0; textIndex < request.text().length; textIndex++) {
String charFilteredSource = request.text()[textIndex];
Reader reader = new FastStringReader(charFilteredSource);
if (charFilterFactories != null) {
for (int charFilterIndex = 0; charFilterIndex < charFilterFactories.length; charFilterIndex++) {
reader = charFilterFactories[charFilterIndex].create(reader);
Reader readerForWriteOut = new FastStringReader(charFilteredSource);
readerForWriteOut = charFilterFactories[charFilterIndex].create(readerForWriteOut);
charFilteredSource = writeCharStream(readerForWriteOut);
charFiltersTexts[charFilterIndex][textIndex] = charFilteredSource;
}
}
// analyzing only tokenizer
Tokenizer tokenizer = tokenizerFactory.create();
tokenizer.setReader(reader);
tokenizerTokenListCreator.analyze(tokenizer, customAnalyzer, field, includeAttributes);
// analyzing each tokenfilter
if (tokenFilterFactories != null) {
for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFilterFactories.length; tokenFilterIndex++) {
if (tokenFiltersTokenListCreator[tokenFilterIndex] == null) {
tokenFiltersTokenListCreator[tokenFilterIndex] = new TokenListCreator();
}
TokenStream stream = createStackedTokenStream(request.text()[textIndex],
charFilterFactories, tokenizerFactory, tokenFilterFactories, tokenFilterIndex + 1);
tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, customAnalyzer, field, includeAttributes);
}
}
}
DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length];
if (charFilterFactories != null) {
for (int charFilterIndex = 0; charFilterIndex < charFiltersTexts.length; charFilterIndex++) {
charFilteredLists[charFilterIndex] = new DetailAnalyzeResponse.CharFilteredText(
charFilterFactories[charFilterIndex].name(), charFiltersTexts[charFilterIndex]);
}
}
DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length];
if (tokenFilterFactories != null) {
for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFiltersTokenListCreator.length; tokenFilterIndex++) {
tokenFilterLists[tokenFilterIndex] = new DetailAnalyzeResponse.AnalyzeTokenList(
tokenFilterFactories[tokenFilterIndex].name(), tokenFiltersTokenListCreator[tokenFilterIndex].getArrayTokens());
}
}
detailResponse = new DetailAnalyzeResponse(charFilteredLists, new DetailAnalyzeResponse.AnalyzeTokenList(tokenizerFactory.name(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
} else {
String name;
if (analyzer instanceof NamedAnalyzer) {
name = ((NamedAnalyzer) analyzer).name();
} else {
name = analyzer.getClass().getName();
}
TokenListCreator tokenListCreator = new TokenListCreator();
for (String text : request.text()) {
tokenListCreator.analyze(analyzer.tokenStream(field, text), analyzer, field,
includeAttributes);
}
detailResponse = new DetailAnalyzeResponse(new DetailAnalyzeResponse.AnalyzeTokenList(name, tokenListCreator.getArrayTokens()));
}
return detailResponse;
}
private static TokenStream createStackedTokenStream(String source, CharFilterFactory[] charFilterFactories, TokenizerFactory tokenizerFactory, TokenFilterFactory[] tokenFilterFactories, int current) {
Reader reader = new FastStringReader(source);
for (CharFilterFactory charFilterFactory : charFilterFactories) {
reader = charFilterFactory.create(reader);
}
Tokenizer tokenizer = tokenizerFactory.create();
tokenizer.setReader(reader);
TokenStream tokenStream = tokenizer;
for (int i = 0; i < current; i++) {
tokenStream = tokenFilterFactories[i].create(tokenStream);
}
return tokenStream;
}
private static String writeCharStream(Reader input) {
final int BUFFER_SIZE = 1024;
char[] buf = new char[BUFFER_SIZE];
int len;
StringBuilder sb = new StringBuilder();
do {
try {
len = input.read(buf, 0, BUFFER_SIZE);
} catch (IOException e) {
throw new ElasticsearchException("failed to analyze (charFiltering)", e);
}
if (len > 0)
sb.append(buf, 0, len);
} while (len == BUFFER_SIZE);
return sb.toString();
}
private static class TokenListCreator {
int lastPosition = -1;
int lastOffset = 0;
List<AnalyzeResponse.AnalyzeToken> tokens;
TokenListCreator() {
tokens = new ArrayList<>();
}
private void analyze(TokenStream stream, Analyzer analyzer, String field, Set<String> includeAttributes) {
try {
stream.reset();
CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class);
TypeAttribute type = stream.addAttribute(TypeAttribute.class);
while (stream.incrementToken()) {
int increment = posIncr.getPositionIncrement();
if (increment > 0) {
lastPosition = lastPosition + increment;
}
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(),
lastOffset +offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
}
stream.end();
lastOffset += offset.endOffset();
lastPosition += posIncr.getPositionIncrement();
lastPosition += analyzer.getPositionIncrementGap(field);
lastOffset += analyzer.getOffsetGap(field);
} catch (IOException e) {
throw new ElasticsearchException("failed to analyze", e);
} finally {
IOUtils.closeWhileHandlingException(stream);
}
}
private AnalyzeResponse.AnalyzeToken[] getArrayTokens() {
return tokens.toArray(new AnalyzeResponse.AnalyzeToken[tokens.size()]);
}
}
/**
* other attribute extract object.
* Extracted object group by AttributeClassName
*
* @param stream current TokenStream
* @param includeAttributes filtering attributes
* @return Map&lt;key value&gt;
*/
private static Map<String, Object> extractExtendedAttributes(TokenStream stream, final Set<String> includeAttributes) {
final Map<String, Object> extendedAttributes = new TreeMap<>();
stream.reflectWith(new AttributeReflector() {
@Override
public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
if (CharTermAttribute.class.isAssignableFrom(attClass))
return;
if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
return;
if (OffsetAttribute.class.isAssignableFrom(attClass))
return;
if (TypeAttribute.class.isAssignableFrom(attClass))
return;
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
if (value instanceof BytesRef) {
final BytesRef p = (BytesRef) value;
value = p.toString();
}
extendedAttributes.put(key, value);
}
}
});
return extendedAttributes;
} }
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException; import java.io.IOException;
@ -29,8 +30,8 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
private FlushRequest request = new FlushRequest(); private FlushRequest request = new FlushRequest();
public ShardFlushRequest(FlushRequest request) { public ShardFlushRequest(FlushRequest request, ShardId shardId) {
super(request); super(request, shardId);
this.request = request; this.request = request;
} }
@ -53,5 +54,8 @@ public class ShardFlushRequest extends ReplicationRequest<ShardFlushRequest> {
request.writeTo(out); request.writeTo(out);
} }
@Override
public String toString() {
return "flush {" + super.toString() + "}";
}
} }

View File

@ -53,7 +53,7 @@ public class TransportFlushAction extends TransportBroadcastReplicationAction<Fl
@Override @Override
protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId) { protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId) {
return new ShardFlushRequest(request).setShardId(shardId); return new ShardFlushRequest(request, shardId);
} }
@Override @Override

View File

@ -23,18 +23,15 @@ import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -61,15 +58,15 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
} }
@Override @Override
protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<ReplicationResponse, ShardFlushRequest> shardOperationOnPrimary(MetaData metaData, ShardFlushRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.flush(shardRequest.request.getRequest()); indexShard.flush(shardRequest.getRequest());
logger.trace("{} flush request executed on primary", indexShard.shardId()); logger.trace("{} flush request executed on primary", indexShard.shardId());
return new Tuple<>(new ReplicationResponse(), shardRequest.request); return new Tuple<>(new ReplicationResponse(), shardRequest);
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) { protected void shardOperationOnReplica(ShardFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
indexShard.flush(request.getRequest()); indexShard.flush(request.getRequest());
logger.trace("{} flush request executed on replica", indexShard.shardId()); logger.trace("{} flush request executed on replica", indexShard.shardId());
@ -81,18 +78,13 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
} }
@Override @Override
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { protected ClusterBlockLevel globalBlockLevel() {
return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt(); return ClusterBlockLevel.METADATA_WRITE;
} }
@Override @Override
protected ClusterBlockException checkGlobalBlock(ClusterState state) { protected ClusterBlockLevel indexBlockLevel() {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); return ClusterBlockLevel.METADATA_WRITE;
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()});
} }
@Override @Override

View File

@ -54,7 +54,7 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
@Override @Override
protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) { protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) {
return new ReplicationRequest(request).setShardId(shardId); return new ReplicationRequest(request, shardId);
} }
@Override @Override

View File

@ -24,13 +24,11 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -62,15 +60,16 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
} }
@Override @Override
protected Tuple<ReplicationResponse, ReplicationRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<ReplicationResponse, ReplicationRequest> shardOperationOnPrimary(MetaData metaData, ReplicationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId().getIndex()).getShard(shardRequest.shardId().id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId()); logger.trace("{} refresh request executed on primary", indexShard.shardId());
return new Tuple<>(new ReplicationResponse(), shardRequest.request); return new Tuple<>(new ReplicationResponse(), shardRequest);
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) { protected void shardOperationOnReplica(ReplicationRequest request) {
final ShardId shardId = request.shardId();
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on replica", indexShard.shardId()); logger.trace("{} refresh request executed on replica", indexShard.shardId());
@ -82,18 +81,13 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
} }
@Override @Override
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { protected ClusterBlockLevel globalBlockLevel() {
return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt(); return ClusterBlockLevel.METADATA_WRITE;
} }
@Override @Override
protected ClusterBlockException checkGlobalBlock(ClusterState state) { protected ClusterBlockLevel indexBlockLevel() {
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); return ClusterBlockLevel.METADATA_WRITE;
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()});
} }
@Override @Override

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.delete.DeleteResponse;
@ -27,6 +28,9 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
@ -35,7 +39,39 @@ import java.io.IOException;
* Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id * Represents a single item response for an action executed as part of the bulk API. Holds the index/type/id
* of the relevant action, and if it has failed or not (with the failure message incase it failed). * of the relevant action, and if it has failed or not (with the failure message incase it failed).
*/ */
public class BulkItemResponse implements Streamable { public class BulkItemResponse implements Streamable, StatusToXContent {
@Override
public RestStatus status() {
return failure == null ? response.status() : failure.getStatus();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(opType);
if (failure == null) {
response.toXContent(builder, params);
builder.field(Fields.STATUS, response.status());
} else {
builder.field(Fields._INDEX, failure.getIndex());
builder.field(Fields._TYPE, failure.getType());
builder.field(Fields._ID, failure.getId());
builder.field(Fields.STATUS, failure.getStatus());
builder.startObject(Fields.ERROR);
ElasticsearchException.toXContent(builder, params, failure.getCause());
builder.endObject();
}
builder.endObject();
return builder;
}
static final class Fields {
static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
static final XContentBuilderString _ID = new XContentBuilderString("_id");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString ERROR = new XContentBuilderString("error");
}
/** /**
* Represents a failure. * Represents a failure.

View File

@ -40,10 +40,8 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
public BulkShardRequest() { public BulkShardRequest() {
} }
BulkShardRequest(BulkRequest bulkRequest, String index, int shardId, boolean refresh, BulkItemRequest[] items) { BulkShardRequest(BulkRequest bulkRequest, ShardId shardId, boolean refresh, BulkItemRequest[] items) {
super(bulkRequest); super(bulkRequest, shardId);
this.index = index;
this.setShardId(new ShardId(index, shardId));
this.items = items; this.items = items;
this.refresh = refresh; this.refresh = refresh;
} }
@ -93,4 +91,9 @@ public class BulkShardRequest extends ReplicationRequest<BulkShardRequest> {
} }
refresh = in.readBoolean(); refresh = in.readBoolean();
} }
@Override
public String toString() {
return "shard bulk {" + super.toString() + "}";
}
} }

View File

@ -275,7 +275,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
list.add(new BulkItemRequest(i, new DeleteRequest(deleteRequest))); list.add(new BulkItemRequest(i, new DeleteRequest(deleteRequest)));
} }
} else { } else {
ShardId shardId = clusterService.operationRouting().deleteShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId(); ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId); List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) { if (list == null) {
list = new ArrayList<>(); list = new ArrayList<>();
@ -312,7 +312,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) { for (Map.Entry<ShardId, List<BulkItemRequest>> entry : requestsByShard.entrySet()) {
final ShardId shardId = entry.getKey(); final ShardId shardId = entry.getKey();
final List<BulkItemRequest> requests = entry.getValue(); final List<BulkItemRequest> requests = entry.getValue();
BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId.index().name(), shardId.id(), bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()])); BulkShardRequest bulkShardRequest = new BulkShardRequest(bulkRequest, shardId, bulkRequest.refresh(), requests.toArray(new BulkItemRequest[requests.size()]));
bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel()); bulkShardRequest.consistencyLevel(bulkRequest.consistencyLevel());
bulkShardRequest.timeout(bulkRequest.timeout()); bulkShardRequest.timeout(bulkRequest.timeout());
shardBulkAction.execute(bulkShardRequest, new ActionListener<BulkShardResponse>() { shardBulkAction.execute(bulkShardRequest, new ActionListener<BulkShardResponse>() {

View File

@ -35,12 +35,11 @@ import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -51,8 +50,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.Translog;
@ -89,11 +86,6 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
} }
@Override
protected boolean checkWriteConsistency() {
return true;
}
@Override @Override
protected TransportRequestOptions transportOptions() { protected TransportRequestOptions transportOptions() {
return BulkAction.INSTANCE.transportOptions(settings); return BulkAction.INSTANCE.transportOptions(settings);
@ -110,15 +102,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
} }
@Override @Override
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(MetaData metaData, BulkShardRequest request) {
return clusterState.routingTable().index(request.concreteIndex()).shard(request.request().shardId().id()).shardsIt();
}
@Override
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
final BulkShardRequest request = shardRequest.request;
final IndexService indexService = indicesService.indexServiceSafe(request.index()); final IndexService indexService = indicesService.indexServiceSafe(request.index());
final IndexShard indexShard = indexService.getShard(shardRequest.shardId.id()); final IndexShard indexShard = indexService.getShard(request.shardId().id());
long[] preVersions = new long[request.items().length]; long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length];
@ -130,7 +116,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
preVersions[requestIndex] = indexRequest.version(); preVersions[requestIndex] = indexRequest.version();
preVersionTypes[requestIndex] = indexRequest.versionType(); preVersionTypes[requestIndex] = indexRequest.versionType();
try { try {
WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, clusterState, indexShard, true); WriteResult<IndexResponse> result = shardIndexOperation(request, indexRequest, metaData, indexShard, true);
location = locationToSync(location, result.location); location = locationToSync(location, result.location);
// add the response // add the response
IndexResponse indexResponse = result.response(); IndexResponse indexResponse = result.response();
@ -148,9 +134,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
throw (ElasticsearchException) e; throw (ElasticsearchException) e;
} }
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); logger.trace("{} failed to execute bulk item (index) {}", e, request.shardId(), indexRequest);
} else { } else {
logger.debug("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); logger.debug("{} failed to execute bulk item (index) {}", e, request.shardId(), indexRequest);
} }
// if its a conflict failure, and we already executed the request on a primary (and we execute it // if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed) // again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
@ -185,9 +171,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
throw (ElasticsearchException) e; throw (ElasticsearchException) e;
} }
if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); logger.trace("{} failed to execute bulk item (delete) {}", e, request.shardId(), deleteRequest);
} else { } else {
logger.debug("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); logger.debug("{} failed to execute bulk item (delete) {}", e, request.shardId(), deleteRequest);
} }
// if its a conflict failure, and we already executed the request on a primary (and we execute it // if its a conflict failure, and we already executed the request on a primary (and we execute it
// again, due to primary relocation and only processing up to N bulk items when the shard gets closed) // again, due to primary relocation and only processing up to N bulk items when the shard gets closed)
@ -207,7 +193,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) { for (int updateAttemptsCount = 0; updateAttemptsCount <= updateRequest.retryOnConflict(); updateAttemptsCount++) {
UpdateResult updateResult; UpdateResult updateResult;
try { try {
updateResult = shardUpdateOperation(clusterState, request, updateRequest, indexShard); updateResult = shardUpdateOperation(metaData, request, updateRequest, indexShard);
} catch (Throwable t) { } catch (Throwable t) {
updateResult = new UpdateResult(null, null, false, t, null); updateResult = new UpdateResult(null, null, false, t, null);
} }
@ -224,10 +210,10 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
// add the response // add the response
IndexResponse indexResponse = result.response(); IndexResponse indexResponse = result.response();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(),
indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getVersion(), indexResponse.isCreated()); indexResponse.getType(), indexResponse.getId(), indexResponse.getSeqNo(),indexResponse.getVersion(), indexResponse.isCreated());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) { if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, shardRequest.request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
} }
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest); item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), indexRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
@ -237,8 +223,8 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
DeleteResponse response = writeResult.response(); DeleteResponse response = writeResult.response();
DeleteRequest deleteRequest = updateResult.request(); DeleteRequest deleteRequest = updateResult.request();
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(),
response.getId(), response.getSeqNo(), response.getVersion(), false); response.getId(), response.getSeqNo(), response.getVersion(), false);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, shardRequest.request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
// Replace the update request to the translated delete request to execute on the replica. // Replace the update request to the translated delete request to execute on the replica.
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse)); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, updateResponse));
@ -275,16 +261,16 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
if (item.getPrimaryResponse() != null && isConflictException(t)) { if (item.getPrimaryResponse() != null && isConflictException(t)) {
setResponse(item, item.getPrimaryResponse()); setResponse(item, item.getPrimaryResponse());
} else if (updateResult.result == null) { } else if (updateResult.result == null) {
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(shardRequest.request.index(), updateRequest.type(), updateRequest.id(), t))); setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, new BulkItemResponse.Failure(request.index(), updateRequest.type(), updateRequest.id(), t)));
} else { } else {
switch (updateResult.result.operation()) { switch (updateResult.result.operation()) {
case UPSERT: case UPSERT:
case INDEX: case INDEX:
IndexRequest indexRequest = updateResult.request(); IndexRequest indexRequest = updateResult.request();
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); logger.trace("{} failed to execute bulk item (index) {}", t, request.shardId(), indexRequest);
} else { } else {
logger.debug("{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); logger.debug("{} failed to execute bulk item (index) {}", t, request.shardId(), indexRequest);
} }
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE, setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_UPDATE,
new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t))); new BulkItemResponse.Failure(request.index(), indexRequest.type(), indexRequest.id(), t)));
@ -292,9 +278,9 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
case DELETE: case DELETE:
DeleteRequest deleteRequest = updateResult.request(); DeleteRequest deleteRequest = updateResult.request();
if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) {
logger.trace("{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); logger.trace("{} failed to execute bulk item (delete) {}", t, request.shardId(), deleteRequest);
} else { } else {
logger.debug("{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); logger.debug("{} failed to execute bulk item (delete) {}", t, request.shardId(), deleteRequest);
} }
setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE, setResponse(item, new BulkItemResponse(item.id(), OP_TYPE_DELETE,
new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t))); new BulkItemResponse.Failure(request.index(), deleteRequest.type(), deleteRequest.id(), t)));
@ -321,7 +307,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
for (int i = 0; i < items.length; i++) { for (int i = 0; i < items.length; i++) {
responses[i] = items[i].getPrimaryResponse(); responses[i] = items[i].getPrimaryResponse();
} }
return new Tuple<>(new BulkShardResponse(shardRequest.shardId, responses), shardRequest.request); return new Tuple<>(new BulkShardResponse(request.shardId(), responses), request);
} }
private void setResponse(BulkItemRequest request, BulkItemResponse response) { private void setResponse(BulkItemRequest request, BulkItemResponse response) {
@ -331,11 +317,11 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
} }
} }
private WriteResult<IndexResponse> shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState, private WriteResult<IndexResponse> shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, MetaData metaData,
IndexShard indexShard, boolean processed) throws Throwable { IndexShard indexShard, boolean processed) throws Throwable {
// validate, if routing is required, that we got routing // validate, if routing is required, that we got routing
MappingMetaData mappingMd = clusterState.metaData().index(request.index()).mappingOrDefault(indexRequest.type()); MappingMetaData mappingMd = metaData.index(request.index()).mappingOrDefault(indexRequest.type());
if (mappingMd != null && mappingMd.routing().required()) { if (mappingMd != null && mappingMd.routing().required()) {
if (indexRequest.routing() == null) { if (indexRequest.routing() == null) {
throw new RoutingMissingException(request.index(), indexRequest.type(), indexRequest.id()); throw new RoutingMissingException(request.index(), indexRequest.type(), indexRequest.id());
@ -343,9 +329,8 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
} }
if (!processed) { if (!processed) {
indexRequest.process(clusterState.metaData(), mappingMd, allowIdGeneration, request.index()); indexRequest.process(metaData, mappingMd, allowIdGeneration, request.index());
} }
return TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction); return TransportIndexAction.executeIndexRequestOnPrimary(indexRequest, indexShard, mappingUpdatedAction);
} }
@ -402,14 +387,14 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
} }
private UpdateResult shardUpdateOperation(ClusterState clusterState, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) { private UpdateResult shardUpdateOperation(MetaData metaData, BulkShardRequest bulkShardRequest, UpdateRequest updateRequest, IndexShard indexShard) {
UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard); UpdateHelper.Result translate = updateHelper.prepare(updateRequest, indexShard);
switch (translate.operation()) { switch (translate.operation()) {
case UPSERT: case UPSERT:
case INDEX: case INDEX:
IndexRequest indexRequest = translate.action(); IndexRequest indexRequest = translate.action();
try { try {
WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, clusterState, indexShard, false); WriteResult result = shardIndexOperation(bulkShardRequest, indexRequest, metaData, indexShard, false);
return new UpdateResult(translate, indexRequest, result); return new UpdateResult(translate, indexRequest, result);
} catch (Throwable t) { } catch (Throwable t) {
t = ExceptionsHelper.unwrapCause(t); t = ExceptionsHelper.unwrapCause(t);
@ -422,7 +407,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
case DELETE: case DELETE:
DeleteRequest deleteRequest = translate.action(); DeleteRequest deleteRequest = translate.action();
try { try {
WriteResult result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard); WriteResult<DeleteResponse> result = TransportDeleteAction.executeDeleteRequestOnPrimary(deleteRequest, indexShard);
return new UpdateResult(translate, deleteRequest, result); return new UpdateResult(translate, deleteRequest, result);
} catch (Throwable t) { } catch (Throwable t) {
t = ExceptionsHelper.unwrapCause(t); t = ExceptionsHelper.unwrapCause(t);
@ -443,7 +428,8 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) { protected void shardOperationOnReplica(BulkShardRequest request) {
final ShardId shardId = request.shardId();
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
Translog.Location location = null; Translog.Location location = null;
@ -455,16 +441,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
if (item.request() instanceof IndexRequest) { if (item.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) item.request(); IndexRequest indexRequest = (IndexRequest) item.request();
try { try {
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).index(shardId.getIndex()).type(indexRequest.type()).id(indexRequest.id()) Engine.Index operation = TransportIndexAction.executeIndexRequestOnReplica(indexRequest, indexShard);
.routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl());
final Engine.Index operation = indexShard.prepareIndexOnReplica(sourceToParse,
indexRequest.seqNo(), indexRequest.version(), indexRequest.versionType());
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
}
indexShard.index(operation);
location = locationToSync(location, operation.getTranslogLocation()); location = locationToSync(location, operation.getTranslogLocation());
} catch (Throwable e) { } catch (Throwable e) {
// if its not an ignore replica failure, we need to make sure to bubble up the failure // if its not an ignore replica failure, we need to make sure to bubble up the failure
@ -476,8 +453,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
} else if (item.request() instanceof DeleteRequest) { } else if (item.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) item.request(); DeleteRequest deleteRequest = (DeleteRequest) item.request();
try { try {
Engine.Delete delete = indexShard.prepareDeleteOnReplica(deleteRequest.type(), deleteRequest.id(), Engine.Delete delete = TransportDeleteAction.executeDeleteRequestOnReplica(deleteRequest, indexShard);
deleteRequest.seqNo(), deleteRequest.version(), deleteRequest.versionType());
indexShard.delete(delete); indexShard.delete(delete);
location = locationToSync(location, delete.getTranslogLocation()); location = locationToSync(location, delete.getTranslogLocation());
} catch (Throwable e) { } catch (Throwable e) {

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
@ -66,6 +67,14 @@ public class DeleteResponse extends DocWriteResponse {
out.writeBoolean(found); out.writeBoolean(found);
} }
@Override
public RestStatus status() {
if (found == false) {
return RestStatus.NOT_FOUND;
}
return super.status();
}
static final class Fields { static final class Fields {
static final XContentBuilderString FOUND = new XContentBuilderString("found"); static final XContentBuilderString FOUND = new XContentBuilderString("found");
} }
@ -76,4 +85,17 @@ public class DeleteResponse extends DocWriteResponse {
super.toXContent(builder, params); super.toXContent(builder, params);
return builder; return builder;
} }
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("DeleteResponse[");
builder.append("index=").append(getIndex());
builder.append(",type=").append(getType());
builder.append(",id=").append(getId());
builder.append(",version=").append(getVersion());
builder.append(",found=").append(found);
builder.append(",shards=").append(getShardInfo());
return builder.append("]").toString();
}
} }

View File

@ -34,7 +34,7 @@ import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -94,47 +94,41 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
} }
@Override @Override
protected void resolveRequest(final ClusterState state, final InternalRequest request, final ActionListener<DeleteResponse> listener) { protected void resolveRequest(final MetaData metaData, String concreteIndex, DeleteRequest request) {
request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index())); request.routing(metaData.resolveIndexRouting(request.routing(), request.index()));
if (state.metaData().hasIndex(request.concreteIndex())) { if (metaData.hasIndex(concreteIndex)) {
// check if routing is required, if so, do a broadcast delete // check if routing is required, if so, do a broadcast delete
MappingMetaData mappingMd = state.metaData().index(request.concreteIndex()).mappingOrDefault(request.request().type()); MappingMetaData mappingMd = metaData.index(concreteIndex).mappingOrDefault(request.type());
if (mappingMd != null && mappingMd.routing().required()) { if (mappingMd != null && mappingMd.routing().required()) {
if (request.request().routing() == null) { if (request.routing() == null) {
if (request.request().versionType() != VersionType.INTERNAL) { if (request.versionType() != VersionType.INTERNAL) {
// TODO: implement this feature // TODO: implement this feature
throw new IllegalArgumentException("routing value is required for deleting documents of type [" + request.request().type() throw new IllegalArgumentException("routing value is required for deleting documents of type [" + request.type()
+ "] while using version_type [" + request.request().versionType() + "]"); + "] while using version_type [" + request.versionType() + "]");
} }
throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); throw new RoutingMissingException(concreteIndex, request.type(), request.id());
} }
} }
} }
ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), concreteIndex, request.id(), request.routing());
request.setShardId(shardId);
} }
private void innerExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) { private void innerExecute(final DeleteRequest request, final ActionListener<DeleteResponse> listener) {
super.doExecute(request, listener); super.doExecute(request, listener);
} }
@Override
protected boolean checkWriteConsistency() {
return true;
}
@Override @Override
protected DeleteResponse newResponseInstance() { protected DeleteResponse newResponseInstance() {
return new DeleteResponse(); return new DeleteResponse();
} }
@Override @Override
protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(MetaData metaData, DeleteRequest request) {
DeleteRequest request = shardRequest.request; IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
final WriteResult<DeleteResponse> result = executeDeleteRequestOnPrimary(request, indexShard); final WriteResult<DeleteResponse> result = executeDeleteRequestOnPrimary(request, indexShard);
processAfterWrite(request.refresh(), indexShard, result.location); processAfterWrite(request.refresh(), indexShard, result.location);
return new Tuple<>(result.response, request);
return new Tuple<>(result.response, shardRequest.request);
} }
public static WriteResult<DeleteResponse> executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) { public static WriteResult<DeleteResponse> executeDeleteRequestOnPrimary(DeleteRequest request, IndexShard indexShard) {
@ -146,23 +140,23 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
request.seqNo(delete.seqNo()); request.seqNo(delete.seqNo());
assert request.versionType().validateVersionForWrites(request.version()); assert request.versionType().validateVersionForWrites(request.version());
return new WriteResult<>(new DeleteResponse(indexShard.shardId(), request.type(), request.id(), return new WriteResult<>(
delete.seqNo(), delete.version(), delete.found()), delete.getTranslogLocation()); new DeleteResponse(indexShard.shardId(), request.type(), request.id(), delete.seqNo(), delete.version(), delete.found()),
delete.getTranslogLocation());
}
public static Engine.Delete executeDeleteRequestOnReplica(DeleteRequest request, IndexShard indexShard) {
Engine.Delete delete = indexShard.prepareDeleteOnReplica(request.type(), request.id(), request.seqNo(), request.version(), request.versionType());
indexShard.delete(delete);
return delete;
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) { protected void shardOperationOnReplica(DeleteRequest request) {
final ShardId shardId = request.shardId();
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
Engine.Delete delete = indexShard.prepareDeleteOnReplica(request.type(), request.id(), Engine.Delete delete = executeDeleteRequestOnReplica(request, indexShard);
request.seqNo(), request.version(), request.versionType());
indexShard.delete(delete);
processAfterWrite(request.refresh(), indexShard, delete.getTranslogLocation()); processAfterWrite(request.refresh(), indexShard, delete.getTranslogLocation());
} }
@Override
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) {
return clusterService.operationRouting()
.deleteShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing());
}
} }

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
@ -53,6 +54,14 @@ public class IndexResponse extends DocWriteResponse {
return this.created; return this.created;
} }
@Override
public RestStatus status() {
if (created) {
return RestStatus.CREATED;
}
return super.status();
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);

View File

@ -36,7 +36,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -120,64 +119,63 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
} }
@Override @Override
protected void resolveRequest(ClusterState state, InternalRequest request, ActionListener<IndexResponse> indexResponseActionListener) { protected void resolveRequest(MetaData metaData, String concreteIndex, IndexRequest request) {
MetaData metaData = clusterService.state().metaData();
MappingMetaData mappingMd = null; MappingMetaData mappingMd = null;
if (metaData.hasIndex(request.concreteIndex())) { if (metaData.hasIndex(concreteIndex)) {
mappingMd = metaData.index(request.concreteIndex()).mappingOrDefault(request.request().type()); mappingMd = metaData.index(concreteIndex).mappingOrDefault(request.type());
} }
request.request().process(metaData, mappingMd, allowIdGeneration, request.concreteIndex()); request.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
ShardId shardId = clusterService.operationRouting().shardId(clusterService.state(), concreteIndex, request.id(), request.routing());
request.setShardId(shardId);
} }
private void innerExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) { private void innerExecute(final IndexRequest request, final ActionListener<IndexResponse> listener) {
super.doExecute(request, listener); super.doExecute(request, listener);
} }
@Override
protected boolean checkWriteConsistency() {
return true;
}
@Override @Override
protected IndexResponse newResponseInstance() { protected IndexResponse newResponseInstance() {
return new IndexResponse(); return new IndexResponse();
} }
@Override @Override
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Throwable {
return clusterService.operationRouting()
.indexShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing());
}
@Override
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
final IndexRequest request = shardRequest.request;
// validate, if routing is required, that we got routing // validate, if routing is required, that we got routing
IndexMetaData indexMetaData = clusterState.metaData().index(shardRequest.shardId.getIndex()); IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type()); MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
if (mappingMd != null && mappingMd.routing().required()) { if (mappingMd != null && mappingMd.routing().required()) {
if (request.routing() == null) { if (request.routing() == null) {
throw new RoutingMissingException(shardRequest.shardId.getIndex(), request.type(), request.id()); throw new RoutingMissingException(request.shardId().getIndex(), request.type(), request.id());
} }
} }
IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(shardRequest.shardId.id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction); final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(request, indexShard, mappingUpdatedAction);
final IndexResponse response = result.response; final IndexResponse response = result.response;
final Translog.Location location = result.location; final Translog.Location location = result.location;
processAfterWrite(request.refresh(), indexShard, location); processAfterWrite(request.refresh(), indexShard, location);
return new Tuple<>(response, shardRequest.request); return new Tuple<>(response, request);
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) { protected void shardOperationOnReplica(IndexRequest request) {
final ShardId shardId = request.shardId();
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.getShard(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
final Engine.Index operation = executeIndexRequestOnReplica(request, indexShard);
processAfterWrite(request.refresh(), indexShard, operation.getTranslogLocation());
}
/**
* Execute the given {@link IndexRequest} on a replica shard, throwing a
* {@link RetryOnReplicaException} if the operation needs to be re-tried.
*/
public static Engine.Index executeIndexRequestOnReplica(IndexRequest request, IndexShard indexShard) {
final ShardId shardId = indexShard.shardId();
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
@ -187,15 +185,14 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update);
} }
indexShard.index(operation); indexShard.index(operation);
processAfterWrite(request.refresh(), indexShard, operation.getTranslogLocation()); return operation;
} }
/** utility method to prepare indexing operations on the primary */ /** Utility method to prepare an index operation on primary shards */
public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) { public static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard indexShard) {
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).index(request.index()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());
return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType()); return indexShard.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType());
} }
/** /**
@ -213,7 +210,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
update = operation.parsedDoc().dynamicMappingsUpdate(); update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) { if (update != null) {
throw new RetryOnPrimaryException(shardId, throw new RetryOnPrimaryException(shardId,
"Dynamics mappings are not available on the node that holds the primary yet"); "Dynamic mappings are not available on the node that holds the primary yet");
} }
} }
final boolean created = indexShard.index(operation); final boolean created = indexShard.index(operation);
@ -229,3 +226,4 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
return new WriteResult<>(new IndexResponse(shardId, request.type(), request.id(), request.seqNo(), request.version(), created), operation.getTranslogLocation()); return new WriteResult<>(new IndexResponse(shardId, request.type(), request.id(), request.seqNo(), request.version(), created), operation.getTranslogLocation());
} }
} }

View File

@ -42,7 +42,12 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES);
ShardId internalShardId; /**
* Target shard the request should execute on. In case of index and delete requests,
* shard id gets resolved by the transport action before performing request operation
* and at request creation time for shard-level bulk, refresh and flush requests.
*/
protected ShardId shardId;
long seqNo; long seqNo;
long primaryTerm; long primaryTerm;
@ -63,6 +68,15 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
super(request); super(request);
} }
/**
* Creates a new request with resolved shard id
*/
public ReplicationRequest(ActionRequest request, ShardId shardId) {
super(request);
this.index = shardId.getIndex();
this.shardId = shardId;
}
/** /**
* Copy constructor that creates a new request that is a copy of the one provided as an argument. * Copy constructor that creates a new request that is a copy of the one provided as an argument.
*/ */
@ -79,9 +93,9 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
this.timeout = request.timeout(); this.timeout = request.timeout();
this.index = request.index(); this.index = request.index();
this.consistencyLevel = request.consistencyLevel(); this.consistencyLevel = request.consistencyLevel();
this.internalShardId = request.internalShardId; this.shardId = request.shardId();
this.seqNo = request.seqNo; this.seqNo = request.seqNo();
this.primaryTerm = request.primaryTerm; this.primaryTerm = request.primaryTerm();
} }
/** /**
@ -130,12 +144,12 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
/** /**
* @return the shardId of the shard where this operation should be executed on. * @return the shardId of the shard where this operation should be executed on.
* can be null in case the shardId is determined by a single document (index, type, id) for example for index or delete request. * can be null if the shardID has not yet been resolved
*/ */
public public
@Nullable @Nullable
ShardId shardId() { ShardId shardId() {
return internalShardId; return shardId;
} }
/** /**
@ -183,9 +197,9 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
if (in.readBoolean()) { if (in.readBoolean()) {
internalShardId = ShardId.readShardId(in); shardId = ShardId.readShardId(in);
} else { } else {
internalShardId = null; shardId = null;
} }
consistencyLevel = WriteConsistencyLevel.fromId(in.readByte()); consistencyLevel = WriteConsistencyLevel.fromId(in.readByte());
timeout = TimeValue.readTimeValue(in); timeout = TimeValue.readTimeValue(in);
@ -197,9 +211,9 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
if (internalShardId != null) { if (shardId != null) {
out.writeBoolean(true); out.writeBoolean(true);
internalShardId.writeTo(out); shardId.writeTo(out);
} else { } else {
out.writeBoolean(false); out.writeBoolean(false);
} }
@ -210,9 +224,21 @@ public class ReplicationRequest<T extends ReplicationRequest> extends ActionRequ
out.writeVLong(primaryTerm); out.writeVLong(primaryTerm);
} }
/**
* Sets the target shard id for the request. The shard id is set when a
* index/delete request is resolved by the transport action
*/
public T setShardId(ShardId shardId) { public T setShardId(ShardId shardId) {
this.internalShardId = shardId; this.shardId = shardId;
this.index = shardId.getIndex();
return (T) this; return (T) this;
} }
@Override
public String toString() {
if (shardId != null) {
return shardId.toString();
} else {
return index;
}
}
} }

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.termvectors; package org.elasticsearch.action.termvectors;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocumentRequest; import org.elasticsearch.action.DocumentRequest;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
@ -79,8 +78,8 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));
continue; continue;
} }
ShardId shardId = clusterService.operationRouting().getShards(clusterState, concreteSingleIndex, ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex,
termVectorsRequest.type(), termVectorsRequest.id(), termVectorsRequest.routing(), null).shardId(); termVectorsRequest.id(), termVectorsRequest.routing());
MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId); MultiTermVectorsShardRequest shardRequest = shardRequests.get(shardId);
if (shardRequest == null) { if (shardRequest == null) {
shardRequest = new MultiTermVectorsShardRequest(request, shardId.index().name(), shardId.id()); shardRequest = new MultiTermVectorsShardRequest(request, shardId.index().name(), shardId.id());

View File

@ -176,7 +176,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override @Override
public void onResponse(IndexResponse response) { public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(),
response.getId(), response.getSeqNo(), response.getVersion(), response.isCreated()); response.getId(), response.getSeqNo(), response.getVersion(), response.isCreated());
if (request.fields() != null && request.fields().length > 0) { if (request.fields() != null && request.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true); Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
@ -214,7 +214,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override @Override
public void onResponse(IndexResponse response) { public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(),
response.getSeqNo(), response.getVersion(), response.isCreated()); response.getSeqNo(), response.getVersion(), response.isCreated());
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
listener.onResponse(update); listener.onResponse(update);
} }
@ -243,7 +243,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
@Override @Override
public void onResponse(DeleteResponse response) { public void onResponse(DeleteResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(),
response.getId(), response.getSeqNo(), response.getVersion(), false); response.getId(), response.getSeqNo(), response.getVersion(), false);
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
listener.onResponse(update); listener.onResponse(update);
} }

View File

@ -83,7 +83,7 @@ public class UpdateHelper extends AbstractComponent {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
protected Result prepare(UpdateRequest request, final GetResult getResult) { protected Result prepare(UpdateRequest request, final GetResult getResult) {
long getDateNS = System.nanoTime(); long getDateNS = System.nanoTime();
final ShardId shardId = new ShardId(request.index(), request.shardId()); final ShardId shardId = new ShardId(getResult.getIndex(), request.shardId());
if (!getResult.isExists()) { if (!getResult.isExists()) {
if (request.upsertRequest() == null && !request.docAsUpsert()) { if (request.upsertRequest() == null && !request.docAsUpsert()) {
throw new DocumentMissingException(shardId, request.type(), request.id()); throw new DocumentMissingException(shardId, request.type(), request.id());
@ -232,12 +232,12 @@ public class UpdateHelper extends AbstractComponent {
.consistencyLevel(request.consistencyLevel()); .consistencyLevel(request.consistencyLevel());
return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType); return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);
} else if ("none".equals(operation)) { } else if ("none".equals(operation)) {
UpdateResponse update = new UpdateResponse(new ShardId(getResult.getIndex(), request.shardId()), getResult.getType(), getResult.getId(), getResult.getVersion(), false); UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);
update.setGetResult(extractGetResult(request, request.index(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef())); update.setGetResult(extractGetResult(request, request.index(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef()));
return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType); return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
} else { } else {
logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script.getScript()); logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script.getScript());
UpdateResponse update = new UpdateResponse(new ShardId(getResult.getIndex(), request.shardId()), getResult.getType(), getResult.getId(), getResult.getVersion(), false); UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false);
return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType); return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
} }
} }

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.seqno.SequenceNumbersService; import org.elasticsearch.index.seqno.SequenceNumbersService;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
@ -70,6 +71,14 @@ public class UpdateResponse extends DocWriteResponse {
} }
@Override
public RestStatus status() {
if (created) {
return RestStatus.CREATED;
}
return super.status();
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
@ -105,4 +114,17 @@ public class UpdateResponse extends DocWriteResponse {
} }
return builder; return builder;
} }
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("UpdateResponse[");
builder.append("index=").append(getIndex());
builder.append(",type=").append(getType());
builder.append(",id=").append(getId());
builder.append(",version=").append(getVersion());
builder.append(",created=").append(created);
builder.append(",shards=").append(getShardInfo());
return builder.append("]").toString();
}
} }

View File

@ -176,7 +176,6 @@ public class ClusterModule extends AbstractModule {
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC, Validator.BYTES_SIZE);
registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR); registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER); registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER); registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);

View File

@ -748,8 +748,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC,
RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE,
RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC));
RecoverySettings.INDICES_RECOVERY_MAX_SIZE_PER_SEC));
/** All known time cluster settings. */ /** All known time cluster settings. */

View File

@ -25,7 +25,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.math.MathUtils; import org.elasticsearch.common.math.MathUtils;
@ -55,19 +54,16 @@ public class OperationRouting extends AbstractComponent {
} }
public ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) { public ShardIterator indexShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) {
return shards(clusterState, index, type, id, routing).shardsIt(); return shards(clusterState, index, id, routing).shardsIt();
}
public ShardIterator deleteShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing) {
return shards(clusterState, index, type, id, routing).shardsIt();
} }
public ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) { public ShardIterator getShards(ClusterState clusterState, String index, String type, String id, @Nullable String routing, @Nullable String preference) {
return preferenceActiveShardIterator(shards(clusterState, index, type, id, routing), clusterState.nodes().localNodeId(), clusterState.nodes(), preference); return preferenceActiveShardIterator(shards(clusterState, index, id, routing), clusterState.nodes().localNodeId(), clusterState.nodes(), preference);
} }
public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) { public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) {
return preferenceActiveShardIterator(shards(clusterState, index, shardId), clusterState.nodes().localNodeId(), clusterState.nodes(), preference); final IndexShardRoutingTable indexShard = clusterState.getRoutingTable().shardRoutingTable(index, shardId);
return preferenceActiveShardIterator(indexShard, clusterState.nodes().localNodeId(), clusterState.nodes(), preference);
} }
public GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) { public GroupShardsIterator broadcastDeleteShards(ClusterState clusterState, String index) {
@ -102,7 +98,7 @@ public class OperationRouting extends AbstractComponent {
final Set<String> effectiveRouting = routing.get(index); final Set<String> effectiveRouting = routing.get(index);
if (effectiveRouting != null) { if (effectiveRouting != null) {
for (String r : effectiveRouting) { for (String r : effectiveRouting) {
int shardId = shardId(clusterState, index, null, null, r); int shardId = generateShardId(clusterState, index, null, r);
IndexShardRoutingTable indexShard = indexRouting.shard(shardId); IndexShardRoutingTable indexShard = indexRouting.shard(shardId);
if (indexShard == null) { if (indexShard == null) {
throw new ShardNotFoundException(new ShardId(index, shardId)); throw new ShardNotFoundException(new ShardId(index, shardId));
@ -200,14 +196,6 @@ public class OperationRouting extends AbstractComponent {
} }
} }
public IndexMetaData indexMetaData(ClusterState clusterState, String index) {
IndexMetaData indexMetaData = clusterState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
return indexMetaData;
}
protected IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) { protected IndexRoutingTable indexRoutingTable(ClusterState clusterState, String index) {
IndexRoutingTable indexRouting = clusterState.routingTable().index(index); IndexRoutingTable indexRouting = clusterState.routingTable().index(index);
if (indexRouting == null) { if (indexRouting == null) {
@ -216,25 +204,20 @@ public class OperationRouting extends AbstractComponent {
return indexRouting; return indexRouting;
} }
protected IndexShardRoutingTable shards(ClusterState clusterState, String index, String id, String routing) {
// either routing is set, or type/id are set int shardId = generateShardId(clusterState, index, id, routing);
return clusterState.getRoutingTable().shardRoutingTable(index, shardId);
protected IndexShardRoutingTable shards(ClusterState clusterState, String index, String type, String id, String routing) {
int shardId = shardId(clusterState, index, type, id, routing);
return shards(clusterState, index, shardId);
} }
protected IndexShardRoutingTable shards(ClusterState clusterState, String index, int shardId) { public ShardId shardId(ClusterState clusterState, String index, String id, @Nullable String routing) {
IndexShardRoutingTable indexShard = indexRoutingTable(clusterState, index).shard(shardId); return new ShardId(index, generateShardId(clusterState, index, id, routing));
if (indexShard == null) { }
throw new ShardNotFoundException(new ShardId(index, shardId));
private int generateShardId(ClusterState clusterState, String index, String id, @Nullable String routing) {
IndexMetaData indexMetaData = clusterState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
} }
return indexShard;
}
@SuppressForbidden(reason = "Math#abs is trappy")
private int shardId(ClusterState clusterState, String index, String type, String id, @Nullable String routing) {
final IndexMetaData indexMetaData = indexMetaData(clusterState, index);
final int hash; final int hash;
if (routing == null) { if (routing == null) {
hash = Murmur3HashFunction.hash(id); hash = Murmur3HashFunction.hash(id);

View File

@ -32,6 +32,8 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.*; import java.util.*;
@ -89,6 +91,24 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return indicesRouting(); return indicesRouting();
} }
/**
* All shards for the provided index and shard id
* @return All the shard routing entries for the given index and shard id
* @throws IndexNotFoundException if provided index does not exist
* @throws ShardNotFoundException if provided shard id is unknown
*/
public IndexShardRoutingTable shardRoutingTable(String index, int shardId) {
IndexRoutingTable indexRouting = index(index);
if (indexRouting == null) {
throw new IndexNotFoundException(index);
}
IndexShardRoutingTable shard = indexRouting.shard(shardId);
if (shard == null) {
throw new ShardNotFoundException(new ShardId(index, shardId));
}
return shard;
}
public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException { public RoutingTable validateRaiseException(MetaData metaData) throws RoutingValidationException {
RoutingTableValidation validation = validate(metaData); RoutingTableValidation validation = validate(metaData);
if (!validation.valid()) { if (!validation.valid()) {

View File

@ -191,15 +191,6 @@ public final class ShardRouting implements Streamable, ToXContent {
return state == ShardRoutingState.RELOCATING; return state == ShardRoutingState.RELOCATING;
} }
/**
* Returns <code>true</code> if this shard is a relocation target for another shard (i.e., was created with {@link #buildTargetRelocatingShard()}
*
*/
public boolean isRelocationTarget() {
return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null;
}
/** /**
* Returns <code>true</code> iff this shard is assigned to a node ie. not * Returns <code>true</code> iff this shard is assigned to a node ie. not
* {@link ShardRoutingState#UNASSIGNED unassigned}. Otherwise <code>false</code> * {@link ShardRoutingState#UNASSIGNED unassigned}. Otherwise <code>false</code>
@ -230,7 +221,7 @@ public final class ShardRouting implements Streamable, ToXContent {
public ShardRouting buildTargetRelocatingShard() { public ShardRouting buildTargetRelocatingShard() {
assert relocating(); assert relocating();
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primaryTerm, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo, return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primaryTerm, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo,
AllocationId.newTargetRelocation(allocationId), true, expectedShardSize); AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
} }
/** /**
@ -569,29 +560,36 @@ public final class ShardRouting implements Streamable, ToXContent {
return b; return b;
} }
/**
* Returns <code>true</code> if this shard is a relocation target for another shard (i.e., was created with {@link #buildTargetRelocatingShard()}
*/
public boolean isRelocationTarget() {
return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null;
}
/** returns true if the routing is the relocation target of the given routing */ /** returns true if the routing is the relocation target of the given routing */
public boolean isRelocationTargetOf(ShardRouting other) { public boolean isRelocationTargetOf(ShardRouting other) {
boolean b = this.allocationId != null && other.allocationId != null && this.state == ShardRoutingState.INITIALIZING && boolean b = this.allocationId != null && other.allocationId != null && this.state == ShardRoutingState.INITIALIZING &&
this.allocationId.getId().equals(other.allocationId.getRelocationId()); this.allocationId.getId().equals(other.allocationId.getRelocationId());
assert b == false || other.state == ShardRoutingState.RELOCATING : assert b == false || other.state == ShardRoutingState.RELOCATING :
"ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]";
assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId()) : assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId()) :
"ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId. This [" + this + "], other [" + other + "]";
assert b == false || other.currentNodeId().equals(this.relocatingNodeId) : assert b == false || other.currentNodeId().equals(this.relocatingNodeId) :
"ShardRouting is a relocation target but source current node id isn't equal to target relocating node. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation target but source current node id isn't equal to target relocating node. This [" + this + "], other [" + other + "]";
assert b == false || this.currentNodeId().equals(other.relocatingNodeId) : assert b == false || this.currentNodeId().equals(other.relocatingNodeId) :
"ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]";
assert b == false || isSameShard(other) : assert b == false || isSameShard(other) :
"ShardRouting is a relocation target but both routings are not of the same shard. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation target but both routings are not of the same shard. This [" + this + "], other [" + other + "]";
assert b == false || this.primary == other.primary : assert b == false || this.primary == other.primary :
"ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]"; "ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]";
assert b == false || this.primaryTerm == other.primaryTerm : assert b == false || this.primaryTerm == other.primaryTerm :
"ShardRouting is a relocation target but primary term is different. This [" + this + "], target [" + other + "]"; "ShardRouting is a relocation target but primary term is different. This [" + this + "], target [" + other + "]";
@ -602,26 +600,26 @@ public final class ShardRouting implements Streamable, ToXContent {
/** returns true if the routing is the relocation source for the given routing */ /** returns true if the routing is the relocation source for the given routing */
public boolean isRelocationSourceOf(ShardRouting other) { public boolean isRelocationSourceOf(ShardRouting other) {
boolean b = this.allocationId != null && other.allocationId != null && other.state == ShardRoutingState.INITIALIZING && boolean b = this.allocationId != null && other.allocationId != null && other.state == ShardRoutingState.INITIALIZING &&
other.allocationId.getId().equals(this.allocationId.getRelocationId()); other.allocationId.getId().equals(this.allocationId.getRelocationId());
assert b == false || this.state == ShardRoutingState.RELOCATING : assert b == false || this.state == ShardRoutingState.RELOCATING :
"ShardRouting is a relocation source but shard state isn't relocating. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation source but shard state isn't relocating. This [" + this + "], other [" + other + "]";
assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId()) : assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId()) :
"ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId. This [" + this + "], other [" + other + "]";
assert b == false || this.currentNodeId().equals(other.relocatingNodeId) : assert b == false || this.currentNodeId().equals(other.relocatingNodeId) :
"ShardRouting is a relocation source but current node isn't equal to other's relocating node. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation source but current node isn't equal to other's relocating node. This [" + this + "], other [" + other + "]";
assert b == false || other.currentNodeId().equals(this.relocatingNodeId) : assert b == false || other.currentNodeId().equals(this.relocatingNodeId) :
"ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]"; "ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]";
assert b == false || isSameShard(other) : assert b == false || isSameShard(other) :
"ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]"; "ShardRouting is a relocation source but both routings are not of the same shard. This [" + this + "], target [" + other + "]";
assert b == false || this.primary == other.primary : assert b == false || this.primary == other.primary :
"ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]"; "ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]";
assert b == false || this.primaryTerm == other.primaryTerm : assert b == false || this.primaryTerm == other.primaryTerm :
"ShardRouting is a relocation source but primary term is different. This [" + this + "], target [" + other + "]"; "ShardRouting is a relocation source but primary term is different. This [" + this + "], target [" + other + "]";
@ -743,15 +741,15 @@ public final class ShardRouting implements Streamable, ToXContent {
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject() builder.startObject()
.field("state", state()) .field("state", state())
.field("primary", primary()) .field("primary", primary())
.field("primary_term", primaryTerm()) .field("primary_term", primaryTerm())
.field("node", currentNodeId()) .field("node", currentNodeId())
.field("relocating_node", relocatingNodeId()) .field("relocating_node", relocatingNodeId())
.field("shard", shardId().id()) .field("shard", shardId().id())
.field("index", shardId().index().name()) .field("index", shardId().index().name())
.field("version", version); .field("version", version);
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE){ if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
builder.field("expected_shard_size_in_bytes", expectedShardSize); builder.field("expected_shard_size_in_bytes", expectedShardSize);
} }
if (restoreSource() != null) { if (restoreSource() != null) {

View File

@ -137,8 +137,7 @@ public class NetworkService extends AbstractComponent {
* Resolves {@code publishHosts} to a single publish address. The fact that it returns * Resolves {@code publishHosts} to a single publish address. The fact that it returns
* only one address is just a current limitation. * only one address is just a current limitation.
* <p> * <p>
* If {@code publishHosts} resolves to more than one address, <b>then one is selected with magic</b>, * If {@code publishHosts} resolves to more than one address, <b>then one is selected with magic</b>
* and the user is warned (they can always just be more specific).
* @param publishHosts list of hosts to publish as. this may contain special pseudo-hostnames * @param publishHosts list of hosts to publish as. this may contain special pseudo-hostnames
* such as _local_ (see the documentation). if it is null, it will be populated * such as _local_ (see the documentation). if it is null, it will be populated
* based on global default settings. * based on global default settings.
@ -186,13 +185,12 @@ public class NetworkService extends AbstractComponent {
} }
} }
// 3. warn user if we end out with multiple publish addresses // 3. if we end out with multiple publish addresses, select by preference.
// don't warn the user, or they will get confused by bind_host vs publish_host etc.
if (addresses.length > 1) { if (addresses.length > 1) {
List<InetAddress> sorted = new ArrayList<>(Arrays.asList(addresses)); List<InetAddress> sorted = new ArrayList<>(Arrays.asList(addresses));
NetworkUtils.sortAddresses(sorted); NetworkUtils.sortAddresses(sorted);
addresses = new InetAddress[] { sorted.get(0) }; addresses = new InetAddress[] { sorted.get(0) };
logger.warn("publish host: {} resolves to multiple addresses, auto-selecting {{}} as single publish address",
Arrays.toString(publishHosts), NetworkAddress.format(addresses[0]));
} }
return addresses[0]; return addresses[0];
} }

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.common.util; package org.elasticsearch.common.util;
import org.apache.lucene.util.ThreadInterruptedException;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
@ -84,7 +85,7 @@ public class CancellableThreads {
RuntimeException throwable = null; RuntimeException throwable = null;
try { try {
interruptable.run(); interruptable.run();
} catch (InterruptedException e) { } catch (InterruptedException | ThreadInterruptedException e) {
// assume this is us and ignore // assume this is us and ignore
} catch (RuntimeException t) { } catch (RuntimeException t) {
throwable = t; throwable = t;

View File

@ -178,12 +178,6 @@ public interface XContentParser extends Releasable {
NumberType numberType() throws IOException; NumberType numberType() throws IOException;
/**
* Is the number type estimated or not (i.e. an int might actually be a long, its just low enough
* to be an int).
*/
boolean estimatedNumberType();
short shortValue(boolean coerce) throws IOException; short shortValue(boolean coerce) throws IOException;
int intValue(boolean coerce) throws IOException; int intValue(boolean coerce) throws IOException;

View File

@ -68,11 +68,6 @@ public class JsonXContentParser extends AbstractXContentParser {
return convertNumberType(parser.getNumberType()); return convertNumberType(parser.getNumberType());
} }
@Override
public boolean estimatedNumberType() {
return true;
}
@Override @Override
public String currentName() throws IOException { public String currentName() throws IOException {
return parser.getCurrentName(); return parser.getCurrentName();

View File

@ -560,44 +560,19 @@ class DocumentParser implements Closeable {
return builder; return builder;
} else if (token == XContentParser.Token.VALUE_NUMBER) { } else if (token == XContentParser.Token.VALUE_NUMBER) {
XContentParser.NumberType numberType = context.parser().numberType(); XContentParser.NumberType numberType = context.parser().numberType();
if (numberType == XContentParser.NumberType.INT) { if (numberType == XContentParser.NumberType.INT || numberType == XContentParser.NumberType.LONG) {
if (context.parser().estimatedNumberType()) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) {
builder = MapperBuilders.longField(currentFieldName);
}
return builder;
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer");
if (builder == null) {
builder = MapperBuilders.integerField(currentFieldName);
}
return builder;
}
} else if (numberType == XContentParser.NumberType.LONG) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long");
if (builder == null) { if (builder == null) {
builder = MapperBuilders.longField(currentFieldName); builder = MapperBuilders.longField(currentFieldName);
} }
return builder; return builder;
} else if (numberType == XContentParser.NumberType.FLOAT) { } else if (numberType == XContentParser.NumberType.FLOAT || numberType == XContentParser.NumberType.DOUBLE) {
if (context.parser().estimatedNumberType()) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName);
}
return builder;
} else {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float");
if (builder == null) {
builder = MapperBuilders.floatField(currentFieldName);
}
return builder;
}
} else if (numberType == XContentParser.NumberType.DOUBLE) {
Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double");
if (builder == null) { if (builder == null) {
builder = MapperBuilders.doubleField(currentFieldName); // no templates are defined, we use float by default instead of double
// since this is much more space-efficient and should be enough most of
// the time
builder = MapperBuilders.floatField(currentFieldName);
} }
return builder; return builder;
} }
@ -713,37 +688,64 @@ class DocumentParser implements Closeable {
// The path of the dest field might be completely different from the current one so we need to reset it // The path of the dest field might be completely different from the current one so we need to reset it
context = context.overridePath(new ContentPath(0)); context = context.overridePath(new ContentPath(0));
String[] paths = Strings.splitStringToArray(field, '.');
String fieldName = paths[paths.length-1];
ObjectMapper mapper = context.root(); ObjectMapper mapper = context.root();
String objectPath = ""; ObjectMapper[] mappers = new ObjectMapper[paths.length-1];
String fieldPath = field; if (paths.length > 1) {
int posDot = field.lastIndexOf('.'); ObjectMapper parent = context.root();
if (posDot > 0) { for (int i = 0; i < paths.length-1; i++) {
objectPath = field.substring(0, posDot); mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i]));
context.path().add(objectPath); if (mapper == null) {
mapper = context.docMapper().objectMappers().get(objectPath); // One mapping is missing, check if we are allowed to create a dynamic one.
fieldPath = field.substring(posDot + 1); ObjectMapper.Dynamic dynamic = parent.dynamic();
if (dynamic == null) {
dynamic = dynamicOrDefault(context.root().dynamic());
}
switch (dynamic) {
case STRICT:
throw new StrictDynamicMappingException(parent.fullPath(), paths[i]);
case TRUE:
Mapper.Builder builder = context.root().findTemplateBuilder(context, paths[i], "object");
if (builder == null) {
// if this is a non root object, then explicitly set the dynamic behavior if set
if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) {
((ObjectMapper.Builder) builder).dynamic(parent.dynamic());
}
builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType());
}
Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path());
mapper = (ObjectMapper) builder.build(builderContext);
if (mapper.nested() != ObjectMapper.Nested.NO) {
throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`");
}
break;
case FALSE:
// Maybe we should log something to tell the user that the copy_to is ignored in this case.
break;
default:
throw new AssertionError("Unexpected dynamic type " + dynamic);
}
}
context.path().add(paths[i]);
mappers[i] = mapper;
parent = mapper;
}
} }
if (mapper == null) { ObjectMapper update = parseDynamicValue(context, mapper, fieldName, context.parser().currentToken());
//TODO: Create an object dynamically?
throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]");
}
ObjectMapper update = parseDynamicValue(context, mapper, fieldPath, context.parser().currentToken());
assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping
// propagate the update to the root if (paths.length > 1) {
while (objectPath.length() > 0) { for (int i = paths.length - 2; i >= 0; i--) {
String parentPath = ""; ObjectMapper parent = context.root();
ObjectMapper parent = context.root(); if (i > 0) {
posDot = objectPath.lastIndexOf('.'); parent = mappers[i-1];
if (posDot > 0) { }
parentPath = objectPath.substring(0, posDot); assert parent != null;
parent = context.docMapper().objectMappers().get(parentPath); update = parent.mappingUpdate(update);
} }
if (parent == null) {
throw new IllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]");
}
update = parent.mappingUpdate(update);
objectPath = parentPath;
} }
context.addDynamicMappingsUpdate(update); context.addDynamicMappingsUpdate(update);
} }

View File

@ -370,15 +370,6 @@ public abstract class FieldMapper extends Mapper {
return; return;
} }
FieldMapper fieldMergeWith = (FieldMapper) mergeWith; FieldMapper fieldMergeWith = (FieldMapper) mergeWith;
List<String> subConflicts = new ArrayList<>(); // TODO: just expose list from MergeResult?
fieldType().checkTypeName(fieldMergeWith.fieldType(), subConflicts);
if (subConflicts.isEmpty() == false) {
// return early if field types don't match
assert subConflicts.size() == 1;
mergeResult.addConflict(subConflicts.get(0));
return;
}
multiFields.merge(mergeWith, mergeResult); multiFields.merge(mergeWith, mergeResult);
if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) {

View File

@ -154,12 +154,9 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
MappedFieldTypeReference ref = fullNameToFieldType.get(fieldMapper.fieldType().names().fullName()); MappedFieldTypeReference ref = fullNameToFieldType.get(fieldMapper.fieldType().names().fullName());
if (ref != null) { if (ref != null) {
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
ref.get().checkTypeName(fieldMapper.fieldType(), conflicts); final Set<String> types = fullNameToTypes.get(fieldMapper.fieldType().names().fullName());
if (conflicts.isEmpty()) { // only check compat if they are the same type boolean strict = beStrict(type, types, updateAllTypes);
final Set<String> types = fullNameToTypes.get(fieldMapper.fieldType().names().fullName()); ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
boolean strict = beStrict(type, types, updateAllTypes);
ref.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
}
if (conflicts.isEmpty() == false) { if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with existing mapping in other types:\n" + conflicts.toString()); throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with existing mapping in other types:\n" + conflicts.toString());
} }
@ -169,12 +166,9 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
MappedFieldTypeReference indexNameRef = indexNameToFieldType.get(fieldMapper.fieldType().names().indexName()); MappedFieldTypeReference indexNameRef = indexNameToFieldType.get(fieldMapper.fieldType().names().indexName());
if (indexNameRef != null) { if (indexNameRef != null) {
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
indexNameRef.get().checkTypeName(fieldMapper.fieldType(), conflicts); final Set<String> types = indexNameToTypes.get(fieldMapper.fieldType().names().indexName());
if (conflicts.isEmpty()) { // only check compat if they are the same type boolean strict = beStrict(type, types, updateAllTypes);
final Set<String> types = indexNameToTypes.get(fieldMapper.fieldType().names().indexName()); indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
boolean strict = beStrict(type, types, updateAllTypes);
indexNameRef.get().checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
}
if (conflicts.isEmpty() == false) { if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with mapping with the same index name in other types" + conflicts.toString()); throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().names().fullName() + "] conflicts with mapping with the same index name in other types" + conflicts.toString());
} }

View File

@ -229,9 +229,9 @@ public abstract class MappedFieldType extends FieldType {
public abstract String typeName(); public abstract String typeName();
/** Checks this type is the same type as other. Adds a conflict if they are different. */ /** Checks this type is the same type as other. Adds a conflict if they are different. */
public final void checkTypeName(MappedFieldType other, List<String> conflicts) { private final void checkTypeName(MappedFieldType other) {
if (typeName().equals(other.typeName()) == false) { if (typeName().equals(other.typeName()) == false) {
conflicts.add("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]"); throw new IllegalArgumentException("mapper [" + names().fullName() + "] cannot be changed from type [" + typeName() + "] to [" + other.typeName() + "]");
} else if (getClass() != other.getClass()) { } else if (getClass() != other.getClass()) {
throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName()); throw new IllegalStateException("Type names equal for class " + getClass().getSimpleName() + " and " + other.getClass().getSimpleName());
} }
@ -243,6 +243,8 @@ public abstract class MappedFieldType extends FieldType {
* Otherwise, only properties which must never change in an index are checked. * Otherwise, only properties which must never change in an index are checked.
*/ */
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
checkTypeName(other);
boolean indexed = indexOptions() != IndexOptions.NONE; boolean indexed = indexOptions() != IndexOptions.NONE;
boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE; boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE;
// TODO: should be validating if index options go "up" (but "down" is ok) // TODO: should be validating if index options go "up" (but "down" is ok)

View File

@ -134,6 +134,26 @@ public abstract class Mapper implements ToXContent, Iterable<Mapper> {
public ParseFieldMatcher parseFieldMatcher() { public ParseFieldMatcher parseFieldMatcher() {
return parseFieldMatcher; return parseFieldMatcher;
} }
public boolean isWithinMultiField() { return false; }
protected Function<String, TypeParser> typeParsers() { return typeParsers; }
protected Function<String, SimilarityProvider> similarityLookupService() { return similarityLookupService; }
public ParserContext createMultiFieldContext(ParserContext in) {
return new MultiFieldParserContext(in) {
@Override
public boolean isWithinMultiField() { return true; }
};
}
static class MultiFieldParserContext extends ParserContext {
MultiFieldParserContext(ParserContext in) {
super(in.type(), in.analysisService, in.similarityLookupService(), in.mapperService(), in.typeParsers(), in.indexVersionCreated(), in.parseFieldMatcher());
}
}
} }
Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException; Mapper.Builder<?,?> parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException;

View File

@ -46,7 +46,7 @@ import java.util.Map;
import static org.apache.lucene.index.IndexOptions.NONE; import static org.apache.lucene.index.IndexOptions.NONE;
import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.MapperBuilders.stringField;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField;
public class StringFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll { public class StringFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
@ -159,7 +159,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc
@Override @Override
public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException { public Mapper.Builder parse(String name, Map<String, Object> node, ParserContext parserContext) throws MapperParsingException {
StringFieldMapper.Builder builder = stringField(name); StringFieldMapper.Builder builder = stringField(name);
parseField(builder, name, node, parserContext); parseTextField(builder, name, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) { for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next(); Map.Entry<String, Object> entry = iterator.next();
String propName = Strings.toUnderscoreCase(entry.getKey()); String propName = Strings.toUnderscoreCase(entry.getKey());

View File

@ -25,6 +25,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.NamedAnalyzer;
@ -181,34 +182,17 @@ public class TypeParsers {
} }
} }
public static void parseField(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) { private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
NamedAnalyzer indexAnalyzer = builder.fieldType().indexAnalyzer(); NamedAnalyzer indexAnalyzer = builder.fieldType().indexAnalyzer();
NamedAnalyzer searchAnalyzer = builder.fieldType().searchAnalyzer(); NamedAnalyzer searchAnalyzer = builder.fieldType().searchAnalyzer();
for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) { for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next(); Map.Entry<String, Object> entry = iterator.next();
final String propName = Strings.toUnderscoreCase(entry.getKey()); final String propName = Strings.toUnderscoreCase(entry.getKey());
final Object propNode = entry.getValue(); final Object propNode = entry.getValue();
if (propName.equals("index_name") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { if (propName.equals("term_vector")) {
builder.indexName(propNode.toString());
iterator.remove();
} else if (propName.equals("store")) {
builder.store(parseStore(name, propNode.toString()));
iterator.remove();
} else if (propName.equals("index")) {
parseIndex(name, propNode.toString(), builder);
iterator.remove();
} else if (propName.equals("tokenized")) {
builder.tokenized(nodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals(DOC_VALUES)) {
builder.docValues(nodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("term_vector")) {
parseTermVector(name, propNode.toString(), builder); parseTermVector(name, propNode.toString(), builder);
iterator.remove(); iterator.remove();
} else if (propName.equals("boost")) {
builder.boost(nodeFloatValue(propNode));
iterator.remove();
} else if (propName.equals("store_term_vectors")) { } else if (propName.equals("store_term_vectors")) {
builder.storeTermVectors(nodeBooleanValue(propNode)); builder.storeTermVectors(nodeBooleanValue(propNode));
iterator.remove(); iterator.remove();
@ -221,6 +205,69 @@ public class TypeParsers {
} else if (propName.equals("store_term_vector_payloads")) { } else if (propName.equals("store_term_vector_payloads")) {
builder.storeTermVectorPayloads(nodeBooleanValue(propNode)); builder.storeTermVectorPayloads(nodeBooleanValue(propNode));
iterator.remove(); iterator.remove();
} else if (propName.equals("analyzer") || // for backcompat, reading old indexes, remove for v3.0
propName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
indexAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("search_analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
searchAnalyzer = analyzer;
iterator.remove();
}
}
if (indexAnalyzer == null) {
if (searchAnalyzer != null) {
throw new MapperParsingException("analyzer on field [" + name + "] must be set when search_analyzer is set");
}
} else if (searchAnalyzer == null) {
searchAnalyzer = indexAnalyzer;
}
builder.indexAnalyzer(indexAnalyzer);
builder.searchAnalyzer(searchAnalyzer);
}
/**
* Parse text field attributes. In addition to {@link #parseField common attributes}
* this will parse analysis and term-vectors related settings.
*/
public static void parseTextField(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
parseField(builder, name, fieldNode, parserContext);
parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext);
}
/**
* Parse common field attributes such as {@code doc_values} or {@code store}.
*/
public static void parseField(FieldMapper.Builder builder, String name, Map<String, Object> fieldNode, Mapper.TypeParser.ParserContext parserContext) {
Version indexVersionCreated = parserContext.indexVersionCreated();
for (Iterator<Map.Entry<String, Object>> iterator = fieldNode.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();
final String propName = Strings.toUnderscoreCase(entry.getKey());
final Object propNode = entry.getValue();
if (propName.equals("index_name") && indexVersionCreated.before(Version.V_2_0_0_beta1)) {
builder.indexName(propNode.toString());
iterator.remove();
} else if (propName.equals("store")) {
builder.store(parseStore(name, propNode.toString()));
iterator.remove();
} else if (propName.equals("index")) {
parseIndex(name, propNode.toString(), builder);
iterator.remove();
} else if (propName.equals(DOC_VALUES)) {
builder.docValues(nodeBooleanValue(propNode));
iterator.remove();
} else if (propName.equals("boost")) {
builder.boost(nodeFloatValue(propNode));
iterator.remove();
} else if (propName.equals("omit_norms")) { } else if (propName.equals("omit_norms")) {
builder.omitNorms(nodeBooleanValue(propNode)); builder.omitNorms(nodeBooleanValue(propNode));
iterator.remove(); iterator.remove();
@ -242,7 +289,7 @@ public class TypeParsers {
iterator.remove(); iterator.remove();
} else if (propName.equals("omit_term_freq_and_positions")) { } else if (propName.equals("omit_term_freq_and_positions")) {
final IndexOptions op = nodeBooleanValue(propNode) ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS; final IndexOptions op = nodeBooleanValue(propNode) ? IndexOptions.DOCS : IndexOptions.DOCS_AND_FREQS_AND_POSITIONS;
if (parserContext.indexVersionCreated().onOrAfter(Version.V_1_0_0_RC2)) { if (indexVersionCreated.onOrAfter(Version.V_1_0_0_RC2)) {
throw new ElasticsearchParseException("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'docs'] instead"); throw new ElasticsearchParseException("'omit_term_freq_and_positions' is not supported anymore - use ['index_options' : 'docs'] instead");
} }
// deprecated option for BW compat // deprecated option for BW compat
@ -251,29 +298,13 @@ public class TypeParsers {
} else if (propName.equals("index_options")) { } else if (propName.equals("index_options")) {
builder.indexOptions(nodeIndexOptionValue(propNode)); builder.indexOptions(nodeIndexOptionValue(propNode));
iterator.remove(); iterator.remove();
} else if (propName.equals("analyzer") || // for backcompat, reading old indexes, remove for v3.0
propName.equals("index_analyzer") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
indexAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("search_analyzer")) {
NamedAnalyzer analyzer = parserContext.analysisService().analyzer(propNode.toString());
if (analyzer == null) {
throw new MapperParsingException("analyzer [" + propNode.toString() + "] not found for field [" + name + "]");
}
searchAnalyzer = analyzer;
iterator.remove();
} else if (propName.equals("include_in_all")) { } else if (propName.equals("include_in_all")) {
builder.includeInAll(nodeBooleanValue(propNode)); builder.includeInAll(nodeBooleanValue(propNode));
iterator.remove(); iterator.remove();
} else if (propName.equals("postings_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { } else if (propName.equals("postings_format") && indexVersionCreated.before(Version.V_2_0_0_beta1)) {
// ignore for old indexes // ignore for old indexes
iterator.remove(); iterator.remove();
} else if (propName.equals("doc_values_format") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { } else if (propName.equals("doc_values_format") && indexVersionCreated.before(Version.V_2_0_0_beta1)) {
// ignore for old indexes // ignore for old indexes
iterator.remove(); iterator.remove();
} else if (propName.equals("similarity")) { } else if (propName.equals("similarity")) {
@ -284,23 +315,28 @@ public class TypeParsers {
builder.fieldDataSettings(settings); builder.fieldDataSettings(settings);
iterator.remove(); iterator.remove();
} else if (propName.equals("copy_to")) { } else if (propName.equals("copy_to")) {
parseCopyFields(propNode, builder); if (parserContext.isWithinMultiField()) {
if (indexVersionCreated.after(Version.V_2_1_0) ||
(indexVersionCreated.after(Version.V_2_0_1) && indexVersionCreated.before(Version.V_2_1_0))) {
throw new MapperParsingException("copy_to in multi fields is not allowed. Found the copy_to in field [" + name + "] which is within a multi field.");
} else {
ESLoggerFactory.getLogger("mapping [" + parserContext.type() + "]").warn("Found a copy_to in field [" + name + "] which is within a multi field. This feature has been removed and the copy_to will be removed from the mapping.");
}
} else {
parseCopyFields(propNode, builder);
}
iterator.remove(); iterator.remove();
} }
} }
if (indexVersionCreated.before(Version.V_2_2_0)) {
if (indexAnalyzer == null) { // analyzer, search_analyzer, term_vectors were accepted on all fields
if (searchAnalyzer != null) { // before 2.2, even though it made little sense
throw new MapperParsingException("analyzer on field [" + name + "] must be set when search_analyzer is set"); parseAnalyzersAndTermVectors(builder, name, fieldNode, parserContext);
}
} else if (searchAnalyzer == null) {
searchAnalyzer = indexAnalyzer;
} }
builder.indexAnalyzer(indexAnalyzer);
builder.searchAnalyzer(searchAnalyzer);
} }
public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) { public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) {
parserContext = parserContext.createMultiFieldContext(parserContext);
if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
builder.multiFieldPathType(parsePathType(name, propNode.toString())); builder.multiFieldPathType(parsePathType(name, propNode.toString()));
return true; return true;

View File

@ -49,7 +49,7 @@ import java.util.Map;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue;
import static org.elasticsearch.index.mapper.core.TypeParsers.parseField; import static org.elasticsearch.index.mapper.core.TypeParsers.parseTextField;
/** /**
* *
@ -134,7 +134,7 @@ public class AllFieldMapper extends MetadataFieldMapper {
} }
} }
parseField(builder, builder.name, node, parserContext); parseTextField(builder, builder.name, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) { for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next(); Map.Entry<String, Object> entry = iterator.next();
String fieldName = Strings.toUnderscoreCase(entry.getKey()); String fieldName = Strings.toUnderscoreCase(entry.getKey());

View File

@ -18,25 +18,15 @@
*/ */
package org.elasticsearch.index.query; package org.elasticsearch.index.query;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.HasContextAndHeaders;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.script.*; import org.elasticsearch.script.*;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.common.Strings.hasLength;
/** /**
* In the simplest case, parse template string and variables from the request, * In the simplest case, parse template string and variables from the request,
* compile the template and execute the template against the given variables. * compile the template and execute the template against the given variables.

View File

@ -220,7 +220,7 @@ public class IndexShard extends AbstractIndexShardComponent {
this.indexCache = indexCache; this.indexCache = indexCache;
this.indexingService = new ShardIndexingService(shardId, indexSettings); this.indexingService = new ShardIndexingService(shardId, indexSettings);
this.getService = new ShardGetService(indexSettings, this, mapperService); this.getService = new ShardGetService(indexSettings, this, mapperService);
this.termVectorsService = provider.getTermVectorsService(); this.termVectorsService = provider.getTermVectorsService();
this.searchService = new ShardSearchStats(settings); this.searchService = new ShardSearchStats(settings);
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings); this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
this.indicesQueryCache = provider.getIndicesQueryCache(); this.indicesQueryCache = provider.getIndicesQueryCache();
@ -239,7 +239,7 @@ public class IndexShard extends AbstractIndexShardComponent {
this.checkIndexOnStartup = settings.get("index.shard.check_on_startup", "false"); this.checkIndexOnStartup = settings.get("index.shard.check_on_startup", "false");
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, settings, Translog.Durabilty.REQUEST), this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, settings, Translog.Durabilty.REQUEST),
provider.getBigArrays(), threadPool); provider.getBigArrays(), threadPool);
final QueryCachingPolicy cachingPolicy; final QueryCachingPolicy cachingPolicy;
// the query cache is a node-level thing, however we want the most popular filters // the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis // to be computed on a per-shard basis
@ -395,7 +395,7 @@ public class IndexShard extends AbstractIndexShardComponent {
* Marks the shard as recovering based on a recovery state, fails with exception is recovering is not allowed to be set. * Marks the shard as recovering based on a recovery state, fails with exception is recovering is not allowed to be set.
*/ */
public IndexShardState markAsRecovering(String reason, RecoveryState recoveryState) throws IndexShardStartedException, public IndexShardState markAsRecovering(String reason, RecoveryState recoveryState) throws IndexShardStartedException,
IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException { IndexShardRelocatedException, IndexShardRecoveringException, IndexShardClosedException {
synchronized (mutex) { synchronized (mutex) {
if (state == IndexShardState.CLOSED) { if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId); throw new IndexShardClosedException(shardId);
@ -521,8 +521,9 @@ public class IndexShard extends AbstractIndexShardComponent {
return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, versionType, Engine.Operation.Origin.PRIMARY); return prepareDelete(type, id, uid, SequenceNumbersService.UNASSIGNED_SEQ_NO, version, versionType, Engine.Operation.Origin.PRIMARY);
} }
public Engine.Delete prepareDeleteOnReplica(String type, String id, long seqNo, long version, VersionType versionType) { public Engine.Delete prepareDeleteOnReplica(String type, String id, long seqNo, long version, VersionType versionType) {
if (shardRouting.primary()) { if (shardRouting.primary() && shardRouting.isRelocationTarget() == false) {
throw new IllegalIndexShardStateException(shardId, state, "shard is not a replica"); throw new IllegalIndexShardStateException(shardId, state, "shard is not a replica");
} }
final DocumentMapper documentMapper = docMapper(type).getDocumentMapper(); final DocumentMapper documentMapper = docMapper(type).getDocumentMapper();
@ -535,6 +536,7 @@ public class IndexShard extends AbstractIndexShardComponent {
return new Engine.Delete(type, id, uid, seqNo, version, versionType, origin, startTime, false); return new Engine.Delete(type, id, uid, seqNo, version, versionType, origin, startTime, false);
} }
public void delete(Engine.Delete delete) { public void delete(Engine.Delete delete) {
ensureWriteAllowed(delete); ensureWriteAllowed(delete);
markLastWrite(); markLastWrite();
@ -692,7 +694,7 @@ public class IndexShard extends AbstractIndexShardComponent {
logger.trace("force merge with {}", forceMerge); logger.trace("force merge with {}", forceMerge);
} }
getEngine().forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(), getEngine().forceMerge(forceMerge.flush(), forceMerge.maxNumSegments(),
forceMerge.onlyExpungeDeletes(), false, false); forceMerge.onlyExpungeDeletes(), false, false);
} }
/** /**
@ -706,8 +708,8 @@ public class IndexShard extends AbstractIndexShardComponent {
org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion();
// we just want to upgrade the segments, not actually forge merge to a single segment // we just want to upgrade the segments, not actually forge merge to a single segment
getEngine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable getEngine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
false, true, upgrade.upgradeOnlyAncientSegments()); false, true, upgrade.upgradeOnlyAncientSegments());
org.apache.lucene.util.Version version = minimumCompatibleVersion(); org.apache.lucene.util.Version version = minimumCompatibleVersion();
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version); logger.trace("upgraded segment {} from version {} to version {}", previousVersion, version);
@ -941,7 +943,7 @@ public class IndexShard extends AbstractIndexShardComponent {
public boolean ignoreRecoveryAttempt() { public boolean ignoreRecoveryAttempt() {
IndexShardState state = state(); // one time volatile read IndexShardState state = state(); // one time volatile read
return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED || return state == IndexShardState.POST_RECOVERY || state == IndexShardState.RECOVERING || state == IndexShardState.STARTED ||
state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED; state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED;
} }
public void readAllowed() throws IllegalIndexShardStateException { public void readAllowed() throws IllegalIndexShardStateException {
@ -1046,7 +1048,7 @@ public class IndexShard extends AbstractIndexShardComponent {
long iwBytesUsed = engine.indexWriterRAMBytesUsed(); long iwBytesUsed = engine.indexWriterRAMBytesUsed();
String message = LoggerMessageFormat.format("updating index_buffer_size from [{}] to [{}]; IndexWriter now using [{}] bytes", String message = LoggerMessageFormat.format("updating index_buffer_size from [{}] to [{}]; IndexWriter now using [{}] bytes",
preValue, shardIndexingBufferSize, iwBytesUsed); preValue, shardIndexingBufferSize, iwBytesUsed);
if (iwBytesUsed > shardIndexingBufferSize.bytes()) { if (iwBytesUsed > shardIndexingBufferSize.bytes()) {
// our allowed buffer was changed to less than we are currently using; we ask IW to refresh // our allowed buffer was changed to less than we are currently using; we ask IW to refresh
@ -1476,9 +1478,9 @@ public class IndexShard extends AbstractIndexShardComponent {
writeReason = "routing changed from " + currentRouting + " to " + newRouting; writeReason = "routing changed from " + currentRouting + " to " + newRouting;
} else { } else {
logger.trace("skip writing shard state, has been written before; previous version: [" + logger.trace("skip writing shard state, has been written before; previous version: [" +
currentRouting.version() + "] current version [" + newRouting.version() + "]"); currentRouting.version() + "] current version [" + newRouting.version() + "]");
assert currentRouting.version() <= newRouting.version() : "version should not go backwards for shardID: " + shardId + assert currentRouting.version() <= newRouting.version() : "version should not go backwards for shardID: " + shardId +
" previous version: [" + currentRouting.version() + "] current version [" + newRouting.version() + "]"; " previous version: [" + currentRouting.version() + "] current version [" + newRouting.version() + "]";
return; return;
} }
final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.version(), newRouting.primary(), getIndexUUID(), newRouting.allocationId()); final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.version(), newRouting.primary(), getIndexUUID(), newRouting.allocationId());
@ -1510,9 +1512,8 @@ public class IndexShard extends AbstractIndexShardComponent {
}; };
final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel); final Engine.Warmer engineWarmer = (searcher, toLevel) -> warmer.warm(searcher, this, idxSettings, toLevel);
return new EngineConfig(shardId, return new EngineConfig(shardId,
threadPool, indexingService, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, threadPool, indexingService, indexSettings, engineWarmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig, inactiveTime);
indexCache.query(), cachingPolicy, translogConfig, inactiveTime);
} }
private static class IndexShardOperationCounter extends AbstractRefCounted { private static class IndexShardOperationCounter extends AbstractRefCounted {
@ -1536,14 +1537,29 @@ public class IndexShard extends AbstractIndexShardComponent {
} }
} }
/**
* increments the ongoing operations counter on a primary shard. Returns the primary term of this shard.
*/
public long incrementOperationCounterOnPrimary() {
if (shardRouting.primary() == false) {
throw new IllegalIndexShardStateException(shardId, state, "shard is not a primary");
}
indexShardOperationCounter.incRef();
return shardRouting.primaryTerm();
}
/** /**
* increments the ongoing operations counter. If the given primary term is lower then the one in {@link #shardRouting} * increments the ongoing operations counter. If the given primary term is lower then the one in {@link #shardRouting}
* an {@link IllegalIndexShardStateException} is thrown. * an {@link IllegalIndexShardStateException} is thrown.
*/ */
public void incrementOperationCounter(long opPrimaryTerm) { public void incrementOperationCounterOnReplica(long opPrimaryTerm) {
if (shardRouting.primaryTerm() > opPrimaryTerm) { if (shardRouting.primaryTerm() > opPrimaryTerm) {
throw new IllegalIndexShardStateException(shardId, state, "operation term [{}] is too old (current [{}])", opPrimaryTerm, shardRouting.primaryTerm()); throw new IllegalIndexShardStateException(shardId, state, "operation term [{}] is too old (current [{}])", opPrimaryTerm, shardRouting.primaryTerm());
} }
if (shardRouting.primary() && shardRouting.isRelocationTarget() == false) {
throw new IllegalIndexShardStateException(shardId, state, "shard is not a replica");
}
indexShardOperationCounter.incRef(); indexShardOperationCounter.incRef();
} }

View File

@ -19,7 +19,6 @@
package org.elasticsearch.indices.memory; package org.elasticsearch.indices.memory;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -213,23 +212,19 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
} }
/** returns true if shard exists and is availabe for updates */ /** returns true if shard exists and is availabe for updates */
protected boolean shardAvailable(@Nullable IndexShard shard) { protected boolean shardAvailable(IndexShard shard) {
// shadow replica doesn't have an indexing buffer // shadow replica doesn't have an indexing buffer
return shard != null && shard.canIndex() && CAN_UPDATE_INDEX_BUFFER_STATES.contains(shard.state()); return shard.canIndex() && CAN_UPDATE_INDEX_BUFFER_STATES.contains(shard.state());
} }
/** set new indexing and translog buffers on this shard. this may cause the shard to refresh to free up heap. */ /** set new indexing and translog buffers on this shard. this may cause the shard to refresh to free up heap. */
protected void updateShardBuffers(IndexShard shard, ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) { protected void updateShardBuffers(IndexShard shard, ByteSizeValue shardIndexingBufferSize, ByteSizeValue shardTranslogBufferSize) {
if (shard != null) { try {
try { shard.updateBufferSize(shardIndexingBufferSize, shardTranslogBufferSize);
shard.updateBufferSize(shardIndexingBufferSize, shardTranslogBufferSize); } catch (EngineClosedException | FlushNotAllowedEngineException e) {
} catch (EngineClosedException e) { // ignore
// ignore } catch (Exception e) {
} catch (FlushNotAllowedEngineException e) { logger.warn("failed to set shard {} index buffer to [{}]", e, shard.shardId(), shardIndexingBufferSize);
// ignore
} catch (Exception e) {
logger.warn("failed to set shard {} index buffer to [{}]", e, shard.shardId(), shardIndexingBufferSize);
}
} }
} }

View File

@ -75,12 +75,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes();
/**
* Use {@link #INDICES_RECOVERY_MAX_BYTES_PER_SEC} instead
*/
@Deprecated
public static final String INDICES_RECOVERY_MAX_SIZE_PER_SEC = "indices.recovery.max_size_per_sec";
private volatile ByteSizeValue fileChunkSize; private volatile ByteSizeValue fileChunkSize;
private volatile boolean compress; private volatile boolean compress;
@ -105,9 +99,9 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
super(settings); super(settings);
this.fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, settings.getAsBytesSize("index.shard.recovery.file_chunk_size", new ByteSizeValue(512, ByteSizeUnit.KB))); this.fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB));
this.translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, settings.getAsInt("index.shard.recovery.translog_ops", 1000)); this.translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, 1000);
this.translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, settings.getAsBytesSize("index.shard.recovery.translog_size", new ByteSizeValue(512, ByteSizeUnit.KB))); this.translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB));
this.compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, true); this.compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, true);
this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500)); this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500));
@ -124,14 +118,14 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
); );
this.concurrentStreams = settings.getAsInt("indices.recovery.concurrent_streams", settings.getAsInt("index.shard.recovery.concurrent_streams", 3)); this.concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, 3);
this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS, this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS,
EsExecutors.daemonThreadFactory(settings, "[recovery_stream]")); EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
this.concurrentSmallFileStreams = settings.getAsInt("indices.recovery.concurrent_small_file_streams", settings.getAsInt("index.shard.recovery.concurrent_small_file_streams", 2)); this.concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 2);
this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60, this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60,
TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]")); TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
this.maxBytesPerSec = settings.getAsBytesSize("indices.recovery.max_bytes_per_sec", settings.getAsBytesSize("indices.recovery.max_size_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB))); this.maxBytesPerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(40, ByteSizeUnit.MB));
if (maxBytesPerSec.bytes() <= 0) { if (maxBytesPerSec.bytes() <= 0) {
rateLimiter = null; rateLimiter = null;
} else { } else {
@ -206,7 +200,7 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
class ApplySettings implements NodeSettingsService.Listener { class ApplySettings implements NodeSettingsService.Listener {
@Override @Override
public void onRefreshSettings(Settings settings) { public void onRefreshSettings(Settings settings) {
ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, settings.getAsBytesSize(INDICES_RECOVERY_MAX_SIZE_PER_SEC, RecoverySettings.this.maxBytesPerSec)); ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec);
if (!Objects.equals(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) { if (!Objects.equals(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) {
logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec); logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec);
RecoverySettings.this.maxBytesPerSec = maxSizePerSec; RecoverySettings.this.maxBytesPerSec = maxSizePerSec;

View File

@ -134,12 +134,12 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryStatus.recoveryId(), retryAfter); logger.trace("will retry recovery with id [{}] in [{}]", reason, recoveryStatus.recoveryId(), retryAfter);
retryRecovery(recoveryStatus, retryAfter, currentRequest); retryRecovery(recoveryStatus, retryAfter, currentRequest);
} }
protected void retryRecovery(final RecoveryStatus recoveryStatus, final String reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { protected void retryRecovery(final RecoveryStatus recoveryStatus, final String reason, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryStatus.recoveryId(), retryAfter, reason); logger.trace("will retry recovery with id [{}] in [{}] (reason [{}])", recoveryStatus.recoveryId(), retryAfter, reason);
retryRecovery(recoveryStatus, retryAfter, currentRequest); retryRecovery(recoveryStatus, retryAfter, currentRequest);
} }
private void retryRecovery(final RecoveryStatus recoveryStatus, TimeValue retryAfter, final StartRecoveryRequest currentRequest) { private void retryRecovery(final RecoveryStatus recoveryStatus, TimeValue retryAfter, final StartRecoveryRequest currentRequest) {
try { try {
recoveryStatus.resetRecovery(); recoveryStatus.resetRecovery();
@ -208,11 +208,15 @@ public class RecoveryTarget extends AbstractComponent implements IndexEventListe
} catch (CancellableThreads.ExecutionCancelledException e) { } catch (CancellableThreads.ExecutionCancelledException e) {
logger.trace("recovery cancelled", e); logger.trace("recovery cancelled", e);
} catch (Throwable e) { } catch (Throwable e) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().index().name(), request.shardId().id()); logger.trace("[{}][{}] Got exception on recovery", e, request.shardId().index().name(), request.shardId().id());
} }
Throwable cause = ExceptionsHelper.unwrapCause(e); Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause instanceof CancellableThreads.ExecutionCancelledException) {
// this can also come from the source wrapped in a RemoteTransportException
onGoingRecoveries.failRecovery(recoveryStatus.recoveryId(), new RecoveryFailedException(request, "source has canceled the recovery", cause), false);
return;
}
if (cause instanceof RecoveryEngineException) { if (cause instanceof RecoveryEngineException) {
// unwrap an exception that was thrown as part of the recovery // unwrap an exception that was thrown as part of the recovery
cause = cause.getCause(); cause = cause.getCause();

View File

@ -1,3 +1,22 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.monitor; package org.elasticsearch.monitor;
import java.lang.management.OperatingSystemMXBean; import java.lang.management.OperatingSystemMXBean;

View File

@ -128,14 +128,13 @@ public class Node implements Releasable {
* @param preparedSettings Base settings to configure the node with * @param preparedSettings Base settings to configure the node with
*/ */
public Node(Settings preparedSettings) { public Node(Settings preparedSettings) {
this(preparedSettings, Version.CURRENT, Collections.<Class<? extends Plugin>>emptyList()); this(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), Version.CURRENT, Collections.<Class<? extends Plugin>>emptyList());
} }
Node(Settings preparedSettings, Version version, Collection<Class<? extends Plugin>> classpathPlugins) { protected Node(Environment tmpEnv, Version version, Collection<Class<? extends Plugin>> classpathPlugins) {
final Settings pSettings = settingsBuilder().put(preparedSettings) Settings tmpSettings = settingsBuilder().put(tmpEnv.settings())
.put(Client.CLIENT_TYPE_SETTING, CLIENT_TYPE).build(); .put(Client.CLIENT_TYPE_SETTING, CLIENT_TYPE).build();
Environment tmpEnv = InternalSettingsPreparer.prepareEnvironment(pSettings, null); tmpSettings = TribeService.processSettings(tmpSettings);
Settings tmpSettings = TribeService.processSettings(tmpEnv.settings());
ESLogger logger = Loggers.getLogger(Node.class, tmpSettings.get("name")); ESLogger logger = Loggers.getLogger(Node.class, tmpSettings.get("name"));
logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.shortHash(), Build.CURRENT.date()); logger.info("version[{}], pid[{}], build[{}/{}]", version, JvmInfo.jvmInfo().pid(), Build.CURRENT.shortHash(), Build.CURRENT.date());

View File

@ -83,6 +83,7 @@ public class PluginManager {
"discovery-gce", "discovery-gce",
"discovery-multicast", "discovery-multicast",
"lang-javascript", "lang-javascript",
"lang-plan-a",
"lang-python", "lang-python",
"mapper-attachments", "mapper-attachments",
"mapper-murmur3", "mapper-murmur3",

View File

@ -316,7 +316,8 @@ public class PluginsService extends AbstractComponent {
// gather urls for jar files // gather urls for jar files
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(module, "*.jar")) { try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(module, "*.jar")) {
for (Path jar : jarStream) { for (Path jar : jarStream) {
bundle.urls.add(jar.toUri().toURL()); // normalize with toRealPath to get symlinks out of our hair
bundle.urls.add(jar.toRealPath().toUri().toURL());
} }
} }
bundles.add(bundle); bundles.add(bundle);
@ -357,7 +358,8 @@ public class PluginsService extends AbstractComponent {
// a jvm plugin: gather urls for jar files // a jvm plugin: gather urls for jar files
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) { try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
for (Path jar : jarStream) { for (Path jar : jarStream) {
urls.add(jar.toUri().toURL()); // normalize with toRealPath to get symlinks out of our hair
urls.add(jar.toRealPath().toUri().toURL());
} }
} }
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.rest.action.admin.indices.alias; package org.elasticsearch.rest.action.admin.indices.alias;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.AliasAction; import org.elasticsearch.cluster.metadata.AliasAction;
@ -30,9 +31,10 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.AcknowledgedRestListener; import org.elasticsearch.rest.action.support.AcknowledgedRestListener;
import java.util.ArrayList;
import java.util.List;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.cluster.metadata.AliasAction.newAddAliasAction;
import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.POST;
/** /**
@ -75,8 +77,8 @@ public class RestIndicesAliasesAction extends BaseRestHandler {
} else { } else {
throw new IllegalArgumentException("Alias action [" + action + "] not supported"); throw new IllegalArgumentException("Alias action [" + action + "] not supported");
} }
String index = null; String[] indices = null;
String alias = null; String[] aliases = null;
Map<String, Object> filter = null; Map<String, Object> filter = null;
String routing = null; String routing = null;
boolean routingSet = false; boolean routingSet = false;
@ -90,9 +92,9 @@ public class RestIndicesAliasesAction extends BaseRestHandler {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
} else if (token.isValue()) { } else if (token.isValue()) {
if ("index".equals(currentFieldName)) { if ("index".equals(currentFieldName)) {
index = parser.text(); indices = new String[] { parser.text() };
} else if ("alias".equals(currentFieldName)) { } else if ("alias".equals(currentFieldName)) {
alias = parser.text(); aliases = new String[] { parser.text() };
} else if ("routing".equals(currentFieldName)) { } else if ("routing".equals(currentFieldName)) {
routing = parser.textOrNull(); routing = parser.textOrNull();
routingSet = true; routingSet = true;
@ -103,6 +105,23 @@ public class RestIndicesAliasesAction extends BaseRestHandler {
searchRouting = parser.textOrNull(); searchRouting = parser.textOrNull();
searchRoutingSet = true; searchRoutingSet = true;
} }
} else if (token == XContentParser.Token.START_ARRAY) {
if ("indices".equals(currentFieldName)) {
List<String> indexNames = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String index = parser.text();
indexNames.add(index);
}
indices = indexNames.toArray(new String[indexNames.size()]);
}
if ("aliases".equals(currentFieldName)) {
List<String> aliasNames = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
String alias = parser.text();
aliasNames.add(alias);
}
aliases = aliasNames.toArray(new String[aliasNames.size()]);
}
} else if (token == XContentParser.Token.START_OBJECT) { } else if (token == XContentParser.Token.START_OBJECT) {
if ("filter".equals(currentFieldName)) { if ("filter".equals(currentFieldName)) {
filter = parser.mapOrdered(); filter = parser.mapOrdered();
@ -111,19 +130,19 @@ public class RestIndicesAliasesAction extends BaseRestHandler {
} }
if (type == AliasAction.Type.ADD) { if (type == AliasAction.Type.ADD) {
AliasAction aliasAction = newAddAliasAction(index, alias).filter(filter); AliasActions aliasActions = new AliasActions(type, indices, aliases);
if (routingSet) { if (routingSet) {
aliasAction.routing(routing); aliasActions.routing(routing);
} }
if (indexRoutingSet) { if (indexRoutingSet) {
aliasAction.indexRouting(indexRouting); aliasActions.indexRouting(indexRouting);
} }
if (searchRoutingSet) { if (searchRoutingSet) {
aliasAction.searchRouting(searchRouting); aliasActions.searchRouting(searchRouting);
} }
indicesAliasesRequest.addAliasAction(aliasAction); indicesAliasesRequest.addAliasAction(aliasActions);
} else if (type == AliasAction.Type.REMOVE) { } else if (type == AliasAction.Type.REMOVE) {
indicesAliasesRequest.removeAlias(index, alias); indicesAliasesRequest.removeAlias(indices, aliases);
} }
} }
} }

View File

@ -21,7 +21,8 @@ package org.elasticsearch.rest.action.admin.indices.analyze;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest;
import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -47,6 +48,17 @@ import static org.elasticsearch.rest.RestRequest.Method.POST;
*/ */
public class RestAnalyzeAction extends BaseRestHandler { public class RestAnalyzeAction extends BaseRestHandler {
public static class Fields {
public static final ParseField ANALYZER = new ParseField("analyzer");
public static final ParseField TEXT = new ParseField("text");
public static final ParseField FIELD = new ParseField("field");
public static final ParseField TOKENIZER = new ParseField("tokenizer");
public static final ParseField TOKEN_FILTERS = new ParseField("token_filters", "filters");
public static final ParseField CHAR_FILTERS = new ParseField("char_filters");
public static final ParseField EXPLAIN = new ParseField("explain");
public static final ParseField ATTRIBUTES = new ParseField("attributes");
}
@Inject @Inject
public RestAnalyzeAction(Settings settings, RestController controller, Client client) { public RestAnalyzeAction(Settings settings, RestController controller, Client client) {
super(settings, controller, client); super(settings, controller, client);
@ -68,6 +80,8 @@ public class RestAnalyzeAction extends BaseRestHandler {
analyzeRequest.tokenizer(request.param("tokenizer")); analyzeRequest.tokenizer(request.param("tokenizer"));
analyzeRequest.tokenFilters(request.paramAsStringArray("token_filters", request.paramAsStringArray("filters", analyzeRequest.tokenFilters()))); analyzeRequest.tokenFilters(request.paramAsStringArray("token_filters", request.paramAsStringArray("filters", analyzeRequest.tokenFilters())));
analyzeRequest.charFilters(request.paramAsStringArray("char_filters", analyzeRequest.charFilters())); analyzeRequest.charFilters(request.paramAsStringArray("char_filters", analyzeRequest.charFilters()));
analyzeRequest.explain(request.paramAsBoolean("explain", false));
analyzeRequest.attributes(request.paramAsStringArray("attributes", analyzeRequest.attributes()));
if (RestActions.hasBodyContent(request)) { if (RestActions.hasBodyContent(request)) {
XContentType type = RestActions.guessBodyContentType(request); XContentType type = RestActions.guessBodyContentType(request);
@ -78,14 +92,14 @@ public class RestAnalyzeAction extends BaseRestHandler {
} }
} else { } else {
// NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values // NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values
buildFromContent(RestActions.getRestContent(request), analyzeRequest); buildFromContent(RestActions.getRestContent(request), analyzeRequest, parseFieldMatcher);
} }
} }
client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<AnalyzeResponse>(channel)); client.admin().indices().analyze(analyzeRequest, new RestToXContentListener<AnalyzeResponse>(channel));
} }
public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) { public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest, ParseFieldMatcher parseFieldMatcher) {
try (XContentParser parser = XContentHelper.createParser(content)) { try (XContentParser parser = XContentHelper.createParser(content)) {
if (parser.nextToken() != XContentParser.Token.START_OBJECT) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Malforrmed content, must start with an object"); throw new IllegalArgumentException("Malforrmed content, must start with an object");
@ -95,9 +109,9 @@ public class RestAnalyzeAction extends BaseRestHandler {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) { if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
} else if ("text".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { } else if (parseFieldMatcher.match(currentFieldName, Fields.TEXT) && token == XContentParser.Token.VALUE_STRING) {
analyzeRequest.text(parser.text()); analyzeRequest.text(parser.text());
} else if ("text".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) { } else if (parseFieldMatcher.match(currentFieldName, Fields.TEXT) && token == XContentParser.Token.START_ARRAY) {
List<String> texts = new ArrayList<>(); List<String> texts = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue() == false) { if (token.isValue() == false) {
@ -105,14 +119,14 @@ public class RestAnalyzeAction extends BaseRestHandler {
} }
texts.add(parser.text()); texts.add(parser.text());
} }
analyzeRequest.text(texts.toArray(Strings.EMPTY_ARRAY)); analyzeRequest.text(texts.toArray(new String[texts.size()]));
} else if ("analyzer".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { } else if (parseFieldMatcher.match(currentFieldName, Fields.ANALYZER) && token == XContentParser.Token.VALUE_STRING) {
analyzeRequest.analyzer(parser.text()); analyzeRequest.analyzer(parser.text());
} else if ("field".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { } else if (parseFieldMatcher.match(currentFieldName, Fields.FIELD) && token == XContentParser.Token.VALUE_STRING) {
analyzeRequest.field(parser.text()); analyzeRequest.field(parser.text());
} else if ("tokenizer".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { } else if (parseFieldMatcher.match(currentFieldName, Fields.TOKENIZER) && token == XContentParser.Token.VALUE_STRING) {
analyzeRequest.tokenizer(parser.text()); analyzeRequest.tokenizer(parser.text());
} else if (("token_filters".equals(currentFieldName) || "filters".equals(currentFieldName)) && token == XContentParser.Token.START_ARRAY) { } else if (parseFieldMatcher.match(currentFieldName, Fields.TOKEN_FILTERS) && token == XContentParser.Token.START_ARRAY) {
List<String> filters = new ArrayList<>(); List<String> filters = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue() == false) { if (token.isValue() == false) {
@ -120,8 +134,8 @@ public class RestAnalyzeAction extends BaseRestHandler {
} }
filters.add(parser.text()); filters.add(parser.text());
} }
analyzeRequest.tokenFilters(filters.toArray(Strings.EMPTY_ARRAY)); analyzeRequest.tokenFilters(filters.toArray(new String[filters.size()]));
} else if ("char_filters".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) { } else if (parseFieldMatcher.match(currentFieldName, Fields.CHAR_FILTERS) && token == XContentParser.Token.START_ARRAY) {
List<String> charFilters = new ArrayList<>(); List<String> charFilters = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue() == false) { if (token.isValue() == false) {
@ -129,7 +143,18 @@ public class RestAnalyzeAction extends BaseRestHandler {
} }
charFilters.add(parser.text()); charFilters.add(parser.text());
} }
analyzeRequest.tokenFilters(charFilters.toArray(Strings.EMPTY_ARRAY)); analyzeRequest.charFilters(charFilters.toArray(new String[charFilters.size()]));
} else if (parseFieldMatcher.match(currentFieldName, Fields.EXPLAIN) && token == XContentParser.Token.VALUE_BOOLEAN) {
analyzeRequest.explain(parser.booleanValue());
} else if (parseFieldMatcher.match(currentFieldName, Fields.ATTRIBUTES) && token == XContentParser.Token.START_ARRAY){
List<String> attributes = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue() == false) {
throw new IllegalArgumentException(currentFieldName + " array element should only contain attribute name");
}
attributes.add(parser.text());
}
analyzeRequest.attributes(attributes.toArray(new String[attributes.size()]));
} else { } else {
throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] ");
} }

View File

@ -41,7 +41,6 @@ import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.script.Script.ScriptField;
import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.script.Template; import org.elasticsearch.script.Template;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import java.util.Map; import java.util.Map;
@ -89,7 +88,7 @@ public class RestRenderSearchTemplateAction extends BaseRestHandler {
throw new ElasticsearchParseException("failed to parse request. unknown field [{}] of type [{}]", currentFieldName, token); throw new ElasticsearchParseException("failed to parse request. unknown field [{}] of type [{}]", currentFieldName, token);
} }
} }
template = new Template(templateId, ScriptType.INDEXED, MustacheScriptEngineService.NAME, null, params); template = new Template(templateId, ScriptType.INDEXED, Template.DEFAULT_LANG, null, params);
} }
renderSearchTemplateRequest = new RenderSearchTemplateRequest(); renderSearchTemplateRequest = new RenderSearchTemplateRequest();
renderSearchTemplateRequest.template(template); renderSearchTemplateRequest.template(template);

View File

@ -19,16 +19,11 @@
package org.elasticsearch.rest.action.bulk; package org.elasticsearch.rest.action.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.WriteConsistencyLevel;
import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.bulk.BulkShardRequest; import org.elasticsearch.action.bulk.BulkShardRequest;
import org.elasticsearch.action.delete.DeleteResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests; import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -96,38 +91,7 @@ public class RestBulkAction extends BaseRestHandler {
builder.startArray(Fields.ITEMS); builder.startArray(Fields.ITEMS);
for (BulkItemResponse itemResponse : response) { for (BulkItemResponse itemResponse : response) {
builder.startObject(); builder.startObject();
builder.startObject(itemResponse.getOpType()); itemResponse.toXContent(builder, request);
if (itemResponse.isFailed()) {
builder.field(Fields._INDEX, itemResponse.getIndex());
builder.field(Fields._TYPE, itemResponse.getType());
builder.field(Fields._ID, itemResponse.getId());
builder.field(Fields.STATUS, itemResponse.getFailure().getStatus().getStatus());
builder.startObject(Fields.ERROR);
ElasticsearchException.toXContent(builder, request, itemResponse.getFailure().getCause());
builder.endObject();
} else {
final DocWriteResponse docResponse = itemResponse.getResponse();
docResponse.toXContent(builder, request);
RestStatus status = docResponse.getShardInfo().status();
if (docResponse instanceof DeleteResponse) {
DeleteResponse deleteResponse = (DeleteResponse) docResponse;
if (deleteResponse.isFound() == false) {
status = RestStatus.NOT_FOUND;
}
} else if (docResponse instanceof IndexResponse) {
IndexResponse indexResponse = (IndexResponse) docResponse;
if (indexResponse.isCreated()) {
status = RestStatus.CREATED;
}
} else if (docResponse instanceof UpdateResponse) {
UpdateResponse updateResponse = (UpdateResponse) docResponse;
if (updateResponse.isCreated()) {
status = RestStatus.CREATED;
}
}
builder.field(Fields.STATUS, status.getStatus());
}
builder.endObject();
builder.endObject(); builder.endObject();
} }
builder.endArray(); builder.endArray();
@ -141,11 +105,6 @@ public class RestBulkAction extends BaseRestHandler {
static final class Fields { static final class Fields {
static final XContentBuilderString ITEMS = new XContentBuilderString("items"); static final XContentBuilderString ITEMS = new XContentBuilderString("items");
static final XContentBuilderString ERRORS = new XContentBuilderString("errors"); static final XContentBuilderString ERRORS = new XContentBuilderString("errors");
static final XContentBuilderString _INDEX = new XContentBuilderString("_index");
static final XContentBuilderString _TYPE = new XContentBuilderString("_type");
static final XContentBuilderString _ID = new XContentBuilderString("_id");
static final XContentBuilderString STATUS = new XContentBuilderString("status");
static final XContentBuilderString ERROR = new XContentBuilderString("error");
static final XContentBuilderString TOOK = new XContentBuilderString("took"); static final XContentBuilderString TOOK = new XContentBuilderString("took");
} }
} }

View File

@ -30,9 +30,9 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.rest.action.support.RestStatusToXContentListener;
import static org.elasticsearch.rest.RestRequest.Method.DELETE; import static org.elasticsearch.rest.RestRequest.Method.DELETE;
import static org.elasticsearch.rest.RestStatus.NOT_FOUND;
/** /**
* *
@ -60,18 +60,6 @@ public class RestDeleteAction extends BaseRestHandler {
deleteRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); deleteRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
} }
client.delete(deleteRequest, new RestBuilderListener<DeleteResponse>(channel) { client.delete(deleteRequest, new RestStatusToXContentListener<>(channel));
@Override
public RestResponse buildResponse(DeleteResponse result, XContentBuilder builder) throws Exception {
builder.startObject();
result.toXContent(builder, request);
builder.endObject();
RestStatus status = result.getShardInfo().status();
if (!result.isFound()) {
status = NOT_FOUND;
}
return new BytesRestResponse(status, builder);
}
});
} }
} }

View File

@ -30,6 +30,7 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.rest.action.support.RestStatusToXContentListener;
import java.io.IOException; import java.io.IOException;
@ -98,17 +99,6 @@ public class RestIndexAction extends BaseRestHandler {
if (consistencyLevel != null) { if (consistencyLevel != null) {
indexRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); indexRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel));
} }
client.index(indexRequest, new RestBuilderListener<IndexResponse>(channel) { client.index(indexRequest, new RestStatusToXContentListener<>(channel));
@Override
public RestResponse buildResponse(IndexResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
response.toXContent(builder, request);
RestStatus status = response.getShardInfo().status();
if (response.isCreated()) {
status = CREATED;
}
return new BytesRestResponse(status, builder);
}
});
} }
} }

View File

@ -75,20 +75,26 @@ public class RestTable {
BytesStreamOutput bytesOut = channel.bytesOutput(); BytesStreamOutput bytesOut = channel.bytesOutput();
UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOut); UTF8StreamWriter out = new UTF8StreamWriter().setOutput(bytesOut);
int lastHeader = headers.size() - 1;
if (verbose) { if (verbose) {
for (int col = 0; col < headers.size(); col++) { for (int col = 0; col < headers.size(); col++) {
DisplayHeader header = headers.get(col); DisplayHeader header = headers.get(col);
pad(new Table.Cell(header.display, table.findHeaderByName(header.name)), width[col], request, out); boolean isLastColumn = col == lastHeader;
out.append(" "); pad(new Table.Cell(header.display, table.findHeaderByName(header.name)), width[col], request, out, isLastColumn);
if (!isLastColumn) {
out.append(" ");
}
} }
out.append("\n"); out.append("\n");
} }
for (int row = 0; row < table.getRows().size(); row++) { for (int row = 0; row < table.getRows().size(); row++) {
for (int col = 0; col < headers.size(); col++) { for (int col = 0; col < headers.size(); col++) {
DisplayHeader header = headers.get(col); DisplayHeader header = headers.get(col);
pad(table.getAsMap().get(header.name).get(row), width[col], request, out); boolean isLastColumn = col == lastHeader;
out.append(" "); pad(table.getAsMap().get(header.name).get(row), width[col], request, out, isLastColumn);
if (!isLastColumn) {
out.append(" ");
}
} }
out.append("\n"); out.append("\n");
} }
@ -236,6 +242,10 @@ public class RestTable {
} }
public static void pad(Table.Cell cell, int width, RestRequest request, UTF8StreamWriter out) throws IOException { public static void pad(Table.Cell cell, int width, RestRequest request, UTF8StreamWriter out) throws IOException {
pad(cell, width, request, out, false);
}
public static void pad(Table.Cell cell, int width, RestRequest request, UTF8StreamWriter out, boolean isLast) throws IOException {
String sValue = renderValue(request, cell.value); String sValue = renderValue(request, cell.value);
int length = sValue == null ? 0 : sValue.length(); int length = sValue == null ? 0 : sValue.length();
byte leftOver = (byte) (width - length); byte leftOver = (byte) (width - length);
@ -254,8 +264,11 @@ public class RestTable {
if (sValue != null) { if (sValue != null) {
out.append(sValue); out.append(sValue);
} }
for (byte i = 0; i < leftOver; i++) { // Ignores the leftover spaces if the cell is the last of the column.
out.append(" "); if (!isLast) {
for (byte i = 0; i < leftOver; i++) {
out.append(" ");
}
} }
} }
} }

View File

@ -24,7 +24,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction;
import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.script.Template;
import static org.elasticsearch.rest.RestRequest.Method.DELETE; import static org.elasticsearch.rest.RestRequest.Method.DELETE;
@ -38,6 +38,6 @@ public class RestDeleteSearchTemplateAction extends RestDeleteIndexedScriptActio
@Override @Override
protected String getScriptLang(RestRequest request) { protected String getScriptLang(RestRequest request) {
return MustacheScriptEngineService.NAME; return Template.DEFAULT_LANG;
} }
} }

View File

@ -25,7 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction;
import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.script.Template;
import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.GET;
@ -42,7 +42,7 @@ public class RestGetSearchTemplateAction extends RestGetIndexedScriptAction {
@Override @Override
protected String getScriptLang(RestRequest request) { protected String getScriptLang(RestRequest request) {
return MustacheScriptEngineService.NAME; return Template.DEFAULT_LANG;
} }
@Override @Override

View File

@ -23,7 +23,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction;
import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.script.Template;
import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT; import static org.elasticsearch.rest.RestRequest.Method.PUT;
@ -59,6 +59,6 @@ public class RestPutSearchTemplateAction extends RestPutIndexedScriptAction {
@Override @Override
protected String getScriptLang(RestRequest request) { protected String getScriptLang(RestRequest request) {
return MustacheScriptEngineService.NAME; return Template.DEFAULT_LANG;
} }
} }

View File

@ -32,6 +32,7 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.rest.action.support.RestBuilderListener;
import org.elasticsearch.rest.action.support.RestStatusToXContentListener;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser;
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
@ -40,7 +41,6 @@ import java.util.HashMap;
import java.util.Map; import java.util.Map;
import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestStatus.CREATED;
/** /**
*/ */
@ -115,18 +115,6 @@ public class RestUpdateAction extends BaseRestHandler {
} }
} }
client.update(updateRequest, new RestBuilderListener<UpdateResponse>(channel) { client.update(updateRequest, new RestStatusToXContentListener<>(channel));
@Override
public RestResponse buildResponse(UpdateResponse response, XContentBuilder builder) throws Exception {
builder.startObject();
response.toXContent(builder, request);
builder.endObject();
RestStatus status = response.getShardInfo().status();
if (response.isCreated()) {
status = CREATED;
}
return new BytesRestResponse(status, builder);
}
});
} }
} }

View File

@ -22,9 +22,7 @@ package org.elasticsearch.script;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
@ -75,13 +73,6 @@ public class ScriptModule extends AbstractModule {
Multibinder<ScriptEngineService> multibinder = Multibinder.newSetBinder(binder(), ScriptEngineService.class); Multibinder<ScriptEngineService> multibinder = Multibinder.newSetBinder(binder(), ScriptEngineService.class);
multibinder.addBinding().to(NativeScriptEngineService.class); multibinder.addBinding().to(NativeScriptEngineService.class);
try {
Class.forName("com.github.mustachejava.Mustache");
multibinder.addBinding().to(MustacheScriptEngineService.class).asEagerSingleton();
} catch (Throwable t) {
Loggers.getLogger(ScriptService.class, settings).debug("failed to load mustache", t);
}
for (Class<? extends ScriptEngineService> scriptEngine : scriptEngines) { for (Class<? extends ScriptEngineService> scriptEngine : scriptEngines) {
multibinder.addBinding().to(scriptEngine).asEagerSingleton(); multibinder.addBinding().to(scriptEngine).asEagerSingleton();

View File

@ -29,13 +29,15 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.Map; import java.util.Map;
public class Template extends Script { public class Template extends Script {
/** Default templating language */
public static final String DEFAULT_LANG = "mustache";
private XContentType contentType; private XContentType contentType;
@ -51,7 +53,7 @@ public class Template extends Script {
* The inline template. * The inline template.
*/ */
public Template(String template) { public Template(String template) {
super(template, MustacheScriptEngineService.NAME); super(template, DEFAULT_LANG);
} }
/** /**
@ -73,7 +75,7 @@ public class Template extends Script {
*/ */
public Template(String template, ScriptType type, @Nullable String lang, @Nullable XContentType xContentType, public Template(String template, ScriptType type, @Nullable String lang, @Nullable XContentType xContentType,
@Nullable Map<String, Object> params) { @Nullable Map<String, Object> params) {
super(template, type, lang == null ? MustacheScriptEngineService.NAME : lang, params); super(template, type, lang == null ? DEFAULT_LANG : lang, params);
this.contentType = xContentType; this.contentType = xContentType;
} }
@ -120,16 +122,16 @@ public class Template extends Script {
} }
public static Script parse(Map<String, Object> config, boolean removeMatchedEntries, ParseFieldMatcher parseFieldMatcher) { public static Script parse(Map<String, Object> config, boolean removeMatchedEntries, ParseFieldMatcher parseFieldMatcher) {
return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(config, removeMatchedEntries, parseFieldMatcher); return new TemplateParser(Collections.emptyMap(), DEFAULT_LANG).parse(config, removeMatchedEntries, parseFieldMatcher);
} }
public static Template parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException { public static Template parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException {
return new TemplateParser(Collections.emptyMap(), MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher); return new TemplateParser(Collections.emptyMap(), DEFAULT_LANG).parse(parser, parseFieldMatcher);
} }
@Deprecated @Deprecated
public static Template parse(XContentParser parser, Map<String, ScriptType> additionalTemplateFieldNames, ParseFieldMatcher parseFieldMatcher) throws IOException { public static Template parse(XContentParser parser, Map<String, ScriptType> additionalTemplateFieldNames, ParseFieldMatcher parseFieldMatcher) throws IOException {
return new TemplateParser(additionalTemplateFieldNames, MustacheScriptEngineService.NAME).parse(parser, parseFieldMatcher); return new TemplateParser(additionalTemplateFieldNames, DEFAULT_LANG).parse(parser, parseFieldMatcher);
} }
@Deprecated @Deprecated
@ -172,7 +174,7 @@ public class Template extends Script {
@Override @Override
protected Template createSimpleScript(XContentParser parser) throws IOException { protected Template createSimpleScript(XContentParser parser) throws IOException {
return new Template(String.valueOf(parser.objectText()), ScriptType.INLINE, MustacheScriptEngineService.NAME, contentType, null); return new Template(String.valueOf(parser.objectText()), ScriptType.INLINE, DEFAULT_LANG, contentType, null);
} }
@Override @Override

View File

@ -43,7 +43,7 @@ import java.util.Map;
/** /**
* *
*/ */
public class StatsAggegator extends NumericMetricsAggregator.MultiValue { public class StatsAggregator extends NumericMetricsAggregator.MultiValue {
final ValuesSource.Numeric valuesSource; final ValuesSource.Numeric valuesSource;
final ValueFormatter formatter; final ValueFormatter formatter;
@ -54,10 +54,10 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue {
DoubleArray maxes; DoubleArray maxes;
public StatsAggegator(String name, ValuesSource.Numeric valuesSource, ValueFormatter formatter, public StatsAggregator(String name, ValuesSource.Numeric valuesSource, ValueFormatter formatter,
AggregationContext context, AggregationContext context,
Aggregator parent, List<PipelineAggregator> pipelineAggregators, Aggregator parent, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) throws IOException { Map<String, Object> metaData) throws IOException {
super(name, context, parent, pipelineAggregators, metaData); super(name, context, parent, pipelineAggregators, metaData);
this.valuesSource = valuesSource; this.valuesSource = valuesSource;
if (valuesSource != null) { if (valuesSource != null) {
@ -164,14 +164,14 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue {
@Override @Override
protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException { List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
return new StatsAggegator(name, null, config.formatter(), aggregationContext, parent, pipelineAggregators, metaData); return new StatsAggregator(name, null, config.formatter(), aggregationContext, parent, pipelineAggregators, metaData);
} }
@Override @Override
protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent,
boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) boolean collectsFromSingleBucket, List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData)
throws IOException { throws IOException {
return new StatsAggegator(name, valuesSource, config.formatter(), aggregationContext, parent, pipelineAggregators, metaData); return new StatsAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, pipelineAggregators, metaData);
} }
} }

View File

@ -34,6 +34,6 @@ public class StatsParser extends NumericValuesSourceMetricsAggregatorParser<Inte
@Override @Override
protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<ValuesSource.Numeric> config) { protected AggregatorFactory createFactory(String aggregationName, ValuesSourceConfig<ValuesSource.Numeric> config) {
return new StatsAggegator.Factory(aggregationName, config); return new StatsAggregator.Factory(aggregationName, config);
} }
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.search.highlight;
import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleFragmenter;
import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -35,7 +36,29 @@ import java.util.Objects;
* This abstract class holds parameters shared by {@link HighlightBuilder} and {@link HighlightBuilder.Field} * This abstract class holds parameters shared by {@link HighlightBuilder} and {@link HighlightBuilder.Field}
* and provides the common setters, equality, hashCode calculation and common serialization * and provides the common setters, equality, hashCode calculation and common serialization
*/ */
public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterBuilder> { public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterBuilder<?>> {
public static final ParseField PRE_TAGS_FIELD = new ParseField("pre_tags");
public static final ParseField POST_TAGS_FIELD = new ParseField("post_tags");
public static final ParseField FIELDS_FIELD = new ParseField("fields");
public static final ParseField ORDER_FIELD = new ParseField("order");
public static final ParseField TAGS_SCHEMA_FIELD = new ParseField("tags_schema");
public static final ParseField HIGHLIGHT_FILTER_FIELD = new ParseField("highlight_filter");
public static final ParseField FRAGMENT_SIZE_FIELD = new ParseField("fragment_size");
public static final ParseField FRAGMENT_OFFSET_FIELD = new ParseField("fragment_offset");
public static final ParseField NUMBER_OF_FRAGMENTS_FIELD = new ParseField("number_of_fragments");
public static final ParseField ENCODER_FIELD = new ParseField("encoder");
public static final ParseField REQUIRE_FIELD_MATCH_FIELD = new ParseField("require_field_match");
public static final ParseField BOUNDARY_MAX_SCAN_FIELD = new ParseField("boundary_max_scan");
public static final ParseField BOUNDARY_CHARS_FIELD = new ParseField("boundary_chars");
public static final ParseField TYPE_FIELD = new ParseField("type");
public static final ParseField FRAGMENTER_FIELD = new ParseField("fragmenter");
public static final ParseField NO_MATCH_SIZE_FIELD = new ParseField("no_match_size");
public static final ParseField FORCE_SOURCE_FIELD = new ParseField("force_source");
public static final ParseField PHRASE_LIMIT_FIELD = new ParseField("phrase_limit");
public static final ParseField OPTIONS_FIELD = new ParseField("options");
public static final ParseField HIGHLIGHT_QUERY_FIELD = new ParseField("highlight_query");
public static final ParseField MATCHED_FIELDS_FIELD = new ParseField("matched_fields");
protected String[] preTags; protected String[] preTags;
@ -49,7 +72,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
protected String fragmenter; protected String fragmenter;
protected QueryBuilder highlightQuery; protected QueryBuilder<?> highlightQuery;
protected String order; protected String order;
@ -102,7 +125,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
} }
/** /**
* Set the fragment size in characters, defaults to {@link HighlighterParseElement#DEFAULT_FRAGMENT_CHAR_SIZE} * Set the fragment size in characters, defaults to {@link HighlightBuilder#DEFAULT_FRAGMENT_CHAR_SIZE}
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public HB fragmentSize(Integer fragmentSize) { public HB fragmentSize(Integer fragmentSize) {
@ -118,7 +141,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
} }
/** /**
* Set the number of fragments, defaults to {@link HighlighterParseElement#DEFAULT_NUMBER_OF_FRAGMENTS} * Set the number of fragments, defaults to {@link HighlightBuilder#DEFAULT_NUMBER_OF_FRAGMENTS}
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public HB numOfFragments(Integer numOfFragments) { public HB numOfFragments(Integer numOfFragments) {
@ -175,7 +198,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
* Sets a query to be used for highlighting instead of the search query. * Sets a query to be used for highlighting instead of the search query.
*/ */
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public HB highlightQuery(QueryBuilder highlightQuery) { public HB highlightQuery(QueryBuilder<?> highlightQuery) {
this.highlightQuery = highlightQuery; this.highlightQuery = highlightQuery;
return (HB) this; return (HB) this;
} }
@ -183,7 +206,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
/** /**
* @return the value set by {@link #highlightQuery(QueryBuilder)} * @return the value set by {@link #highlightQuery(QueryBuilder)}
*/ */
public QueryBuilder highlightQuery() { public QueryBuilder<?> highlightQuery() {
return this.highlightQuery; return this.highlightQuery;
} }
@ -347,52 +370,52 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
void commonOptionsToXContent(XContentBuilder builder) throws IOException { void commonOptionsToXContent(XContentBuilder builder) throws IOException {
if (preTags != null) { if (preTags != null) {
builder.array("pre_tags", preTags); builder.array(PRE_TAGS_FIELD.getPreferredName(), preTags);
} }
if (postTags != null) { if (postTags != null) {
builder.array("post_tags", postTags); builder.array(POST_TAGS_FIELD.getPreferredName(), postTags);
} }
if (fragmentSize != null) { if (fragmentSize != null) {
builder.field("fragment_size", fragmentSize); builder.field(FRAGMENT_SIZE_FIELD.getPreferredName(), fragmentSize);
} }
if (numOfFragments != null) { if (numOfFragments != null) {
builder.field("number_of_fragments", numOfFragments); builder.field(NUMBER_OF_FRAGMENTS_FIELD.getPreferredName(), numOfFragments);
} }
if (highlighterType != null) { if (highlighterType != null) {
builder.field("type", highlighterType); builder.field(TYPE_FIELD.getPreferredName(), highlighterType);
} }
if (fragmenter != null) { if (fragmenter != null) {
builder.field("fragmenter", fragmenter); builder.field(FRAGMENTER_FIELD.getPreferredName(), fragmenter);
} }
if (highlightQuery != null) { if (highlightQuery != null) {
builder.field("highlight_query", highlightQuery); builder.field(HIGHLIGHT_QUERY_FIELD.getPreferredName(), highlightQuery);
} }
if (order != null) { if (order != null) {
builder.field("order", order); builder.field(ORDER_FIELD.getPreferredName(), order);
} }
if (highlightFilter != null) { if (highlightFilter != null) {
builder.field("highlight_filter", highlightFilter); builder.field(HIGHLIGHT_FILTER_FIELD.getPreferredName(), highlightFilter);
} }
if (boundaryMaxScan != null) { if (boundaryMaxScan != null) {
builder.field("boundary_max_scan", boundaryMaxScan); builder.field(BOUNDARY_MAX_SCAN_FIELD.getPreferredName(), boundaryMaxScan);
} }
if (boundaryChars != null) { if (boundaryChars != null) {
builder.field("boundary_chars", boundaryChars); builder.field(BOUNDARY_CHARS_FIELD.getPreferredName(), new String(boundaryChars));
} }
if (options != null && options.size() > 0) { if (options != null && options.size() > 0) {
builder.field("options", options); builder.field(OPTIONS_FIELD.getPreferredName(), options);
} }
if (forceSource != null) { if (forceSource != null) {
builder.field("force_source", forceSource); builder.field(FORCE_SOURCE_FIELD.getPreferredName(), forceSource);
} }
if (requireFieldMatch != null) { if (requireFieldMatch != null) {
builder.field("require_field_match", requireFieldMatch); builder.field(REQUIRE_FIELD_MATCH_FIELD.getPreferredName(), requireFieldMatch);
} }
if (noMatchSize != null) { if (noMatchSize != null) {
builder.field("no_match_size", noMatchSize); builder.field(NO_MATCH_SIZE_FIELD.getPreferredName(), noMatchSize);
} }
if (phraseLimit != null) { if (phraseLimit != null) {
builder.field("phrase_limit", phraseLimit); builder.field(PHRASE_LIMIT_FIELD.getPreferredName(), phraseLimit);
} }
} }
@ -405,7 +428,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
} }
/** /**
* internal hashCode calculation to overwrite for the implementing classes. * fields only present in subclass should contribute to hashCode in the implementation
*/ */
protected abstract int doHashCode(); protected abstract int doHashCode();
@ -439,7 +462,7 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
} }
/** /**
* internal equals to overwrite for the implementing classes. * fields only present in subclass should be checked for equality in the implementation
*/ */
protected abstract boolean doEquals(HB other); protected abstract boolean doEquals(HB other);
@ -506,4 +529,4 @@ public abstract class AbstractHighlighterBuilder<HB extends AbstractHighlighterB
} }
out.writeOptionalBoolean(requireFieldMatch); out.writeOptionalBoolean(requireFieldMatch);
} }
} }

View File

@ -19,19 +19,31 @@
package org.elasticsearch.search.highlight; package org.elasticsearch.search.highlight;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions;
import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions.Builder;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Objects; import java.util.Objects;
import java.util.Set;
/** /**
* A builder for search highlighting. Settings can control how large fields * A builder for search highlighting. Settings can control how large fields
@ -43,6 +55,53 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
public static final HighlightBuilder PROTOTYPE = new HighlightBuilder(); public static final HighlightBuilder PROTOTYPE = new HighlightBuilder();
public static final String HIGHLIGHT_ELEMENT_NAME = "highlight";
/** default for whether to highlight fields based on the source even if stored separately */
public static final boolean DEFAULT_FORCE_SOURCE = false;
/** default for whether a field should be highlighted only if a query matches that field */
public static final boolean DEFAULT_REQUIRE_FIELD_MATCH = true;
/** default for whether <tt>fvh</tt> should provide highlighting on filter clauses */
public static final boolean DEFAULT_HIGHLIGHT_FILTER = false;
/** default for highlight fragments being ordered by score */
public static final boolean DEFAULT_SCORE_ORDERED = false;
/** the default encoder setting */
public static final String DEFAULT_ENCODER = "default";
/** default for the maximum number of phrases the fvh will consider */
public static final int DEFAULT_PHRASE_LIMIT = 256;
/** default for fragment size when there are no matches */
public static final int DEFAULT_NO_MATCH_SIZE = 0;
/** the default number of fragments for highlighting */
public static final int DEFAULT_NUMBER_OF_FRAGMENTS = 5;
/** the default number of fragments size in characters */
public static final int DEFAULT_FRAGMENT_CHAR_SIZE = 100;
/** the default opening tag */
public static final String[] DEFAULT_PRE_TAGS = new String[]{"<em>"};
/** the default closing tag */
public static final String[] DEFAULT_POST_TAGS = new String[]{"</em>"};
/** the default opening tags when <tt>tag_schema = "styled"</tt> */
public static final String[] DEFAULT_STYLED_PRE_TAG = {
"<em class=\"hlt1\">", "<em class=\"hlt2\">", "<em class=\"hlt3\">",
"<em class=\"hlt4\">", "<em class=\"hlt5\">", "<em class=\"hlt6\">",
"<em class=\"hlt7\">", "<em class=\"hlt8\">", "<em class=\"hlt9\">",
"<em class=\"hlt10\">"
};
/** the default closing tags when <tt>tag_schema = "styled"</tt> */
public static final String[] DEFAULT_STYLED_POST_TAGS = {"</em>"};
/**
* a {@link FieldOptions.Builder} with default settings
*/
public final static Builder defaultFieldOptions() {
return new SearchContextHighlight.FieldOptions.Builder()
.preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(DEFAULT_SCORE_ORDERED).highlightFilter(DEFAULT_HIGHLIGHT_FILTER)
.requireFieldMatch(DEFAULT_REQUIRE_FIELD_MATCH).forceSource(DEFAULT_FORCE_SOURCE).fragmentCharSize(DEFAULT_FRAGMENT_CHAR_SIZE).numberOfFragments(DEFAULT_NUMBER_OF_FRAGMENTS)
.encoder(DEFAULT_ENCODER).boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN)
.boundaryChars(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS)
.noMatchSize(DEFAULT_NO_MATCH_SIZE).phraseLimit(DEFAULT_PHRASE_LIMIT);
}
private final List<Field> fields = new ArrayList<>(); private final List<Field> fields = new ArrayList<>();
private String encoder; private String encoder;
@ -115,12 +174,12 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
public HighlightBuilder tagsSchema(String schemaName) { public HighlightBuilder tagsSchema(String schemaName) {
switch (schemaName) { switch (schemaName) {
case "default": case "default":
preTags(HighlighterParseElement.DEFAULT_PRE_TAGS); preTags(DEFAULT_PRE_TAGS);
postTags(HighlighterParseElement.DEFAULT_POST_TAGS); postTags(DEFAULT_POST_TAGS);
break; break;
case "styled": case "styled":
preTags(HighlighterParseElement.STYLED_PRE_TAG); preTags(DEFAULT_STYLED_PRE_TAG);
postTags(HighlighterParseElement.STYLED_POST_TAGS); postTags(DEFAULT_STYLED_POST_TAGS);
break; break;
default: default:
throw new IllegalArgumentException("Unknown tag schema ["+ schemaName +"]"); throw new IllegalArgumentException("Unknown tag schema ["+ schemaName +"]");
@ -164,24 +223,220 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("highlight"); builder.startObject(HIGHLIGHT_ELEMENT_NAME);
innerXContent(builder); innerXContent(builder);
builder.endObject(); builder.endObject();
return builder; return builder;
} }
/**
* Creates a new {@link HighlightBuilder} from the highlighter held by the {@link QueryParseContext}
* in {@link org.elasticsearch.common.xcontent.XContent} format
*
* @param parseContext
* the input parse context. The state on the parser contained in
* this context will be changed as a side effect of this method
* call
* @return the new {@link HighlightBuilder}
*/
public static HighlightBuilder fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
XContentParser.Token token;
String topLevelFieldName = null;
HighlightBuilder highlightBuilder = new HighlightBuilder();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
topLevelFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if (parseContext.parseFieldMatcher().match(topLevelFieldName, PRE_TAGS_FIELD)) {
List<String> preTagsList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
preTagsList.add(parser.text());
}
highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()]));
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, POST_TAGS_FIELD)) {
List<String> postTagsList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
postTagsList.add(parser.text());
}
highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()]));
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) {
highlightBuilder.useExplicitFieldOrder(true);
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.START_OBJECT) {
String highlightFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
if (highlightFieldName != null) {
throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field");
}
highlightFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext));
}
}
} else {
throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field");
}
}
} else {
throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", topLevelFieldName);
}
} else if (token.isValue()) {
if (parseContext.parseFieldMatcher().match(topLevelFieldName, ORDER_FIELD)) {
highlightBuilder.order(parser.text());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TAGS_SCHEMA_FIELD)) {
highlightBuilder.tagsSchema(parser.text());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_FILTER_FIELD)) {
highlightBuilder.highlightFilter(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENT_SIZE_FIELD)) {
highlightBuilder.fragmentSize(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
highlightBuilder.numOfFragments(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, ENCODER_FIELD)) {
highlightBuilder.encoder(parser.text());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
highlightBuilder.requireFieldMatch(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
highlightBuilder.boundaryMaxScan(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_CHARS_FIELD)) {
highlightBuilder.boundaryChars(parser.text().toCharArray());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TYPE_FIELD)) {
highlightBuilder.highlighterType(parser.text());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENTER_FIELD)) {
highlightBuilder.fragmenter(parser.text());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NO_MATCH_SIZE_FIELD)) {
highlightBuilder.noMatchSize(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FORCE_SOURCE_FIELD)) {
highlightBuilder.forceSource(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, PHRASE_LIMIT_FIELD)) {
highlightBuilder.phraseLimit(parser.intValue());
} else {
throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", topLevelFieldName);
}
} else if (token == XContentParser.Token.START_OBJECT && topLevelFieldName != null) {
if (parseContext.parseFieldMatcher().match(topLevelFieldName, OPTIONS_FIELD)) {
highlightBuilder.options(parser.map());
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) {
String highlightFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
highlightFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext));
}
}
} else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_QUERY_FIELD)) {
highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder());
} else {
throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", topLevelFieldName);
}
} else if (topLevelFieldName != null) {
throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, topLevelFieldName);
}
}
if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) {
throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set");
}
return highlightBuilder;
}
public SearchContextHighlight build(QueryShardContext context) throws IOException {
// create template global options that are later merged with any partial field options
final SearchContextHighlight.FieldOptions.Builder globalOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder();
globalOptionsBuilder.encoder(this.encoder);
transferOptions(this, globalOptionsBuilder, context);
// overwrite unset global options by default values
globalOptionsBuilder.merge(defaultFieldOptions().build());
// create field options
Collection<org.elasticsearch.search.highlight.SearchContextHighlight.Field> fieldOptions = new ArrayList<>();
for (Field field : this.fields) {
final SearchContextHighlight.FieldOptions.Builder fieldOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder();
fieldOptionsBuilder.fragmentOffset(field.fragmentOffset);
if (field.matchedFields != null) {
Set<String> matchedFields = new HashSet<String>(field.matchedFields.length);
Collections.addAll(matchedFields, field.matchedFields);
fieldOptionsBuilder.matchedFields(matchedFields);
}
transferOptions(field, fieldOptionsBuilder, context);
fieldOptions.add(new SearchContextHighlight.Field(field.name(), fieldOptionsBuilder.merge(globalOptionsBuilder.build()).build()));
}
return new SearchContextHighlight(fieldOptions);
}
/**
* Transfers field options present in the input {@link AbstractHighlighterBuilder} to the receiving
* {@link FieldOptions.Builder}, effectively overwriting existing settings
* @param targetOptionsBuilder the receiving options builder
* @param highlighterBuilder highlight builder with the input options
* @param context needed to convert {@link QueryBuilder} to {@link Query}
* @throws IOException on errors parsing any optional nested highlight query
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private static void transferOptions(AbstractHighlighterBuilder highlighterBuilder, SearchContextHighlight.FieldOptions.Builder targetOptionsBuilder, QueryShardContext context) throws IOException {
targetOptionsBuilder.preTags(highlighterBuilder.preTags);
targetOptionsBuilder.postTags(highlighterBuilder.postTags);
targetOptionsBuilder.scoreOrdered("score".equals(highlighterBuilder.order));
if (highlighterBuilder.highlightFilter != null) {
targetOptionsBuilder.highlightFilter(highlighterBuilder.highlightFilter);
}
if (highlighterBuilder.fragmentSize != null) {
targetOptionsBuilder.fragmentCharSize(highlighterBuilder.fragmentSize);
}
if (highlighterBuilder.numOfFragments != null) {
targetOptionsBuilder.numberOfFragments(highlighterBuilder.numOfFragments);
}
if (highlighterBuilder.requireFieldMatch != null) {
targetOptionsBuilder.requireFieldMatch(highlighterBuilder.requireFieldMatch);
}
if (highlighterBuilder.boundaryMaxScan != null) {
targetOptionsBuilder.boundaryMaxScan(highlighterBuilder.boundaryMaxScan);
}
targetOptionsBuilder.boundaryChars(convertCharArray(highlighterBuilder.boundaryChars));
targetOptionsBuilder.highlighterType(highlighterBuilder.highlighterType);
targetOptionsBuilder.fragmenter(highlighterBuilder.fragmenter);
if (highlighterBuilder.noMatchSize != null) {
targetOptionsBuilder.noMatchSize(highlighterBuilder.noMatchSize);
}
if (highlighterBuilder.forceSource != null) {
targetOptionsBuilder.forceSource(highlighterBuilder.forceSource);
}
if (highlighterBuilder.phraseLimit != null) {
targetOptionsBuilder.phraseLimit(highlighterBuilder.phraseLimit);
}
targetOptionsBuilder.options(highlighterBuilder.options);
if (highlighterBuilder.highlightQuery != null) {
targetOptionsBuilder.highlightQuery(highlighterBuilder.highlightQuery.toQuery(context));
}
}
private static Character[] convertCharArray(char[] array) {
if (array == null) {
return null;
}
Character[] charArray = new Character[array.length];
for (int i = 0; i < array.length; i++) {
charArray[i] = array[i];
}
return charArray;
}
public void innerXContent(XContentBuilder builder) throws IOException { public void innerXContent(XContentBuilder builder) throws IOException {
// first write common options // first write common options
commonOptionsToXContent(builder); commonOptionsToXContent(builder);
// special options for top-level highlighter // special options for top-level highlighter
if (encoder != null) { if (encoder != null) {
builder.field("encoder", encoder); builder.field(ENCODER_FIELD.getPreferredName(), encoder);
} }
if (fields.size() > 0) { if (fields.size() > 0) {
if (useExplicitFieldOrder) { if (useExplicitFieldOrder) {
builder.startArray("fields"); builder.startArray(FIELDS_FIELD.getPreferredName());
} else { } else {
builder.startObject("fields"); builder.startObject(FIELDS_FIELD.getPreferredName());
} }
for (Field field : fields) { for (Field field : fields) {
if (useExplicitFieldOrder) { if (useExplicitFieldOrder) {
@ -205,7 +460,7 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
try { try {
XContentBuilder builder = XContentFactory.jsonBuilder(); XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint(); builder.prettyPrint();
toXContent(builder, ToXContent.EMPTY_PARAMS); toXContent(builder, EMPTY_PARAMS);
return builder.string(); return builder.string();
} catch (Exception e) { } catch (Exception e) {
return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}"; return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}";
@ -286,14 +541,90 @@ public class HighlightBuilder extends AbstractHighlighterBuilder<HighlightBuilde
commonOptionsToXContent(builder); commonOptionsToXContent(builder);
// write special field-highlighter options // write special field-highlighter options
if (fragmentOffset != -1) { if (fragmentOffset != -1) {
builder.field("fragment_offset", fragmentOffset); builder.field(FRAGMENT_OFFSET_FIELD.getPreferredName(), fragmentOffset);
} }
if (matchedFields != null) { if (matchedFields != null) {
builder.field("matched_fields", matchedFields); builder.field(MATCHED_FIELDS_FIELD.getPreferredName(), matchedFields);
} }
builder.endObject(); builder.endObject();
} }
private static HighlightBuilder.Field fromXContent(String fieldname, QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
XContentParser.Token token;
final HighlightBuilder.Field field = new HighlightBuilder.Field(fieldname);
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_ARRAY) {
if (parseContext.parseFieldMatcher().match(currentFieldName, PRE_TAGS_FIELD)) {
List<String> preTagsList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
preTagsList.add(parser.text());
}
field.preTags(preTagsList.toArray(new String[preTagsList.size()]));
} else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) {
List<String> postTagsList = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
postTagsList.add(parser.text());
}
field.postTags(postTagsList.toArray(new String[postTagsList.size()]));
} else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)) {
List<String> matchedFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
matchedFields.add(parser.text());
}
field.matchedFields(matchedFields.toArray(new String[matchedFields.size()]));
} else {
throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName);
}
} else if (token.isValue()) {
if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) {
field.fragmentSize(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) {
field.numOfFragments(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD)) {
field.fragmentOffset(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) {
field.highlightFilter(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) {
field.order(parser.text());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) {
field.requireFieldMatch(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) {
field.boundaryMaxScan(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) {
field.boundaryChars(parser.text().toCharArray());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) {
field.highlighterType(parser.text());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) {
field.fragmenter(parser.text());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) {
field.noMatchSize(parser.intValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) {
field.forceSource(parser.booleanValue());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) {
field.phraseLimit(parser.intValue());
} else {
throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName);
}
} else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) {
if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) {
field.highlightQuery(parseContext.parseInnerQueryBuilder());
} else if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) {
field.options(parser.map());
} else {
throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName);
}
} else if (currentFieldName != null) {
throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName);
}
}
return field;
}
@Override @Override
protected int doHashCode() { protected int doHashCode() {
return Objects.hash(name, fragmentOffset, Arrays.hashCode(matchedFields)); return Objects.hash(name, fragmentOffset, Arrays.hashCode(matchedFields));

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.highlight; package org.elasticsearch.search.highlight;
import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
@ -52,39 +51,6 @@ import java.util.Set;
*/ */
public class HighlighterParseElement implements SearchParseElement { public class HighlighterParseElement implements SearchParseElement {
/** default for whether to highlight fields based on the source even if stored separately */
public static final boolean DEFAULT_FORCE_SOURCE = false;
/** default for whether a field should be highlighted only if a query matches that field */
public static final boolean DEFAULT_REQUIRE_FIELD_MATCH = true;
/** default for whether <tt>fvh</tt> should provide highlighting on filter clauses */
public static final boolean DEFAULT_HIGHLIGHT_FILTER = false;
/** default for highlight fragments being ordered by score */
public static final boolean DEFAULT_SCORE_ORDERED = false;
/** the default encoder setting */
public static final String DEFAULT_ENCODER = "default";
/** default for the maximum number of phrases the fvh will consider */
public static final int DEFAULT_PHRASE_LIMIT = 256;
/** default for fragment size when there are no matches */
public static final int DEFAULT_NO_MATCH_SIZE = 0;
/** the default number of fragments for highlighting */
public static final int DEFAULT_NUMBER_OF_FRAGMENTS = 5;
/** the default number of fragments size in characters */
public static final int DEFAULT_FRAGMENT_CHAR_SIZE = 100;
/** the default opening tag */
public static final String[] DEFAULT_PRE_TAGS = new String[]{"<em>"};
/** the default closing tag */
public static final String[] DEFAULT_POST_TAGS = new String[]{"</em>"};
/** the default opening tags when <tt>tag_schema = "styled"</tt> */
public static final String[] STYLED_PRE_TAG = {
"<em class=\"hlt1\">", "<em class=\"hlt2\">", "<em class=\"hlt3\">",
"<em class=\"hlt4\">", "<em class=\"hlt5\">", "<em class=\"hlt6\">",
"<em class=\"hlt7\">", "<em class=\"hlt8\">", "<em class=\"hlt9\">",
"<em class=\"hlt10\">"
};
/** the default closing tags when <tt>tag_schema = "styled"</tt> */
public static final String[] STYLED_POST_TAGS = {"</em>"};
@Override @Override
public void parse(XContentParser parser, SearchContext context) throws Exception { public void parse(XContentParser parser, SearchContext context) throws Exception {
try { try {
@ -99,12 +65,7 @@ public class HighlighterParseElement implements SearchParseElement {
String topLevelFieldName = null; String topLevelFieldName = null;
final List<Tuple<String, SearchContextHighlight.FieldOptions.Builder>> fieldsOptions = new ArrayList<>(); final List<Tuple<String, SearchContextHighlight.FieldOptions.Builder>> fieldsOptions = new ArrayList<>();
final SearchContextHighlight.FieldOptions.Builder globalOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder() final SearchContextHighlight.FieldOptions.Builder globalOptionsBuilder = HighlightBuilder.defaultFieldOptions();
.preTags(DEFAULT_PRE_TAGS).postTags(DEFAULT_POST_TAGS).scoreOrdered(DEFAULT_SCORE_ORDERED).highlightFilter(DEFAULT_HIGHLIGHT_FILTER)
.requireFieldMatch(DEFAULT_REQUIRE_FIELD_MATCH).forceSource(DEFAULT_FORCE_SOURCE).fragmentCharSize(DEFAULT_FRAGMENT_CHAR_SIZE).numberOfFragments(DEFAULT_NUMBER_OF_FRAGMENTS)
.encoder(DEFAULT_ENCODER).boundaryMaxScan(SimpleBoundaryScanner.DEFAULT_MAX_SCAN)
.boundaryChars(SimpleBoundaryScanner.DEFAULT_BOUNDARY_CHARS)
.noMatchSize(DEFAULT_NO_MATCH_SIZE).phraseLimit(DEFAULT_PHRASE_LIMIT);
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) { if (token == XContentParser.Token.FIELD_NAME) {
@ -147,8 +108,8 @@ public class HighlighterParseElement implements SearchParseElement {
} else if ("tags_schema".equals(topLevelFieldName) || "tagsSchema".equals(topLevelFieldName)) { } else if ("tags_schema".equals(topLevelFieldName) || "tagsSchema".equals(topLevelFieldName)) {
String schema = parser.text(); String schema = parser.text();
if ("styled".equals(schema)) { if ("styled".equals(schema)) {
globalOptionsBuilder.preTags(STYLED_PRE_TAG); globalOptionsBuilder.preTags(HighlightBuilder.DEFAULT_STYLED_PRE_TAG);
globalOptionsBuilder.postTags(STYLED_POST_TAGS); globalOptionsBuilder.postTags(HighlightBuilder.DEFAULT_STYLED_POST_TAGS);
} }
} else if ("highlight_filter".equals(topLevelFieldName) || "highlightFilter".equals(topLevelFieldName)) { } else if ("highlight_filter".equals(topLevelFieldName) || "highlightFilter".equals(topLevelFieldName)) {
globalOptionsBuilder.highlightFilter(parser.booleanValue()); globalOptionsBuilder.highlightFilter(parser.booleanValue());
@ -211,7 +172,7 @@ public class HighlighterParseElement implements SearchParseElement {
return new SearchContextHighlight(fields); return new SearchContextHighlight(fields);
} }
protected SearchContextHighlight.FieldOptions.Builder parseFields(XContentParser parser, QueryShardContext queryShardContext) throws IOException { private static SearchContextHighlight.FieldOptions.Builder parseFields(XContentParser parser, QueryShardContext queryShardContext) throws IOException {
XContentParser.Token token; XContentParser.Token token;
final SearchContextHighlight.FieldOptions.Builder fieldOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder(); final SearchContextHighlight.FieldOptions.Builder fieldOptionsBuilder = new SearchContextHighlight.FieldOptions.Builder();

View File

@ -53,6 +53,10 @@ public class SearchContextHighlight {
this.globalForceSource = globalForceSource; this.globalForceSource = globalForceSource;
} }
boolean globalForceSource() {
return this.globalForceSource;
}
public boolean forceSource(Field field) { public boolean forceSource(Field field) {
if (globalForceSource) { if (globalForceSource) {
return true; return true;

View File

@ -0,0 +1,37 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.tribe;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
import org.elasticsearch.plugins.Plugin;
import java.util.Collections;
/**
* An internal node that connects to a remove cluster, as part of a tribe node.
*/
class TribeClientNode extends Node {
TribeClientNode(Settings settings) {
super(new Environment(settings), Version.CURRENT, Collections.<Class<? extends Plugin>>emptyList());
}
}

View File

@ -132,14 +132,14 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client nodesSettings.remove("on_conflict"); // remove prefix settings that don't indicate a client
for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) { for (Map.Entry<String, Settings> entry : nodesSettings.entrySet()) {
Settings.Builder sb = Settings.builder().put(entry.getValue()); Settings.Builder sb = Settings.builder().put(entry.getValue());
sb.put("node.name", settings.get("name") + "/" + entry.getKey()); sb.put("name", settings.get("name") + "/" + entry.getKey());
sb.put("path.home", settings.get("path.home")); // pass through ES home dir sb.put("path.home", settings.get("path.home")); // pass through ES home dir
sb.put(TRIBE_NAME, entry.getKey()); sb.put(TRIBE_NAME, entry.getKey());
sb.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true);
if (sb.get("http.enabled") == null) { if (sb.get("http.enabled") == null) {
sb.put("http.enabled", false); sb.put("http.enabled", false);
} }
nodes.add(NodeBuilder.nodeBuilder().settings(sb).client(true).build()); sb.put("node.client", true);
nodes.add(new TribeClientNode(sb.build()));
} }
String[] blockIndicesWrite = Strings.EMPTY_ARRAY; String[] blockIndicesWrite = Strings.EMPTY_ARRAY;

View File

@ -44,6 +44,7 @@ OFFICIAL PLUGINS
- discovery-gce - discovery-gce
- discovery-multicast - discovery-multicast
- lang-javascript - lang-javascript
- lang-plan-a
- lang-python - lang-python
- mapper-attachments - mapper-attachments
- mapper-murmur3 - mapper-murmur3

View File

@ -105,7 +105,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
public void testBulkProcessorConcurrentRequests() throws Exception { public void testBulkProcessorConcurrentRequests() throws Exception {
int bulkActions = randomIntBetween(10, 100); int bulkActions = randomIntBetween(10, 100);
int numDocs = randomIntBetween(bulkActions, bulkActions + 100); int numDocs = randomIntBetween(bulkActions, bulkActions + 100);
int concurrentRequests = randomIntBetween(0, 10); int concurrentRequests = randomIntBetween(0, 7);
int expectedBulkActions = numDocs / bulkActions; int expectedBulkActions = numDocs / bulkActions;
@ -141,7 +141,7 @@ public class BulkProcessorIT extends ESIntegTestCase {
Set<String> ids = new HashSet<>(); Set<String> ids = new HashSet<>();
for (BulkItemResponse bulkItemResponse : listener.bulkItems) { for (BulkItemResponse bulkItemResponse : listener.bulkItems) {
assertThat(bulkItemResponse.isFailed(), equalTo(false)); assertThat(bulkItemResponse.getFailureMessage(), bulkItemResponse.isFailed(), equalTo(false));
assertThat(bulkItemResponse.getIndex(), equalTo("test")); assertThat(bulkItemResponse.getIndex(), equalTo("test"));
assertThat(bulkItemResponse.getType(), equalTo("test")); assertThat(bulkItemResponse.getType(), equalTo("test"));
//with concurrent requests > 1 we can't rely on the order of the bulk requests //with concurrent requests > 1 we can't rely on the order of the bulk requests

View File

@ -20,6 +20,7 @@ package org.elasticsearch.action.support.replication;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.ReplicationResponse; import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.UnavailableShardsException;

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -46,9 +45,11 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.IndexShardNotStartedException; import org.elasticsearch.index.shard.IndexShardNotStartedException;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.cluster.TestClusterService;
@ -128,22 +129,22 @@ public class TransportReplicationActionTests extends ESTestCase {
ClusterBlocks.Builder block = ClusterBlocks.builder() ClusterBlocks.Builder block = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(request, listener);
assertFalse("primary phase should stop execution", primaryPhase.checkBlocks()); reroutePhase.run();
assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class);
block = ClusterBlocks.builder() block = ClusterBlocks.builder()
.addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL));
clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block));
listener = new PlainActionFuture<>(); listener = new PlainActionFuture<>();
primaryPhase = action.new PrimaryPhase(new Request().timeout("5ms"), listener); reroutePhase = action.new ReroutePhase(new Request().timeout("5ms"), listener);
assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks()); reroutePhase.run();
assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class); assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class);
listener = new PlainActionFuture<>(); listener = new PlainActionFuture<>();
primaryPhase = action.new PrimaryPhase(new Request(), listener); reroutePhase = action.new ReroutePhase(new Request(), listener);
assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks()); reroutePhase.run();
assertFalse("primary phase should wait on retryable block", listener.isDone()); assertFalse("primary phase should wait on retryable block", listener.isDone());
block = ClusterBlocks.builder() block = ClusterBlocks.builder()
@ -168,25 +169,47 @@ public class TransportReplicationActionTests extends ESTestCase {
Request request = new Request(shardId).timeout("1ms"); Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(request, listener);
primaryPhase.run(); reroutePhase.run();
assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class); assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class);
request = new Request(shardId); request = new Request(shardId);
listener = new PlainActionFuture<>(); listener = new PlainActionFuture<>();
primaryPhase = action.new PrimaryPhase(request, listener); reroutePhase = action.new ReroutePhase(request, listener);
primaryPhase.run(); reroutePhase.run();
assertFalse("unassigned primary didn't cause a retry", listener.isDone()); assertFalse("unassigned primary didn't cause a retry", listener.isDone());
clusterService.setState(state(index, true, ShardRoutingState.STARTED)); clusterService.setState(state(index, true, ShardRoutingState.STARTED));
logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint());
listener.get(); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
assertTrue("request wasn't processed on primary, despite of it being assigned", request.processedOnPrimary.get()); final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId();
final List<CapturingTransport.CapturedRequest> capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId);
assertThat(capturedRequests, notNullValue());
assertThat(capturedRequests.size(), equalTo(1));
assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
assertIndexShardCounter(1); assertIndexShardCounter(1);
} }
public void testRoutingToPrimary() { public void testUnknownIndexOrShardOnReroute() throws InterruptedException {
final String index = "test";
// no replicas in oder to skip the replication part
clusterService.setState(state(index, true,
randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(new ShardId("unknown_index", 0)).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(request, listener);
reroutePhase.run();
assertListenerThrows("must throw index not found exception", listener, IndexNotFoundException.class);
request = new Request(new ShardId(index, 10)).timeout("1ms");
listener = new PlainActionFuture<>();
reroutePhase = action.new ReroutePhase(request, listener);
reroutePhase.run();
assertListenerThrows("must throw shard not found exception", listener, ShardNotFoundException.class);
}
public void testRoutePhaseExecutesRequest() {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, 0); final ShardId shardId = new ShardId(index, 0);
@ -199,25 +222,126 @@ public class TransportReplicationActionTests extends ESTestCase {
Request request = new Request(shardId); Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); TransportReplicationAction.ReroutePhase reroutePhase = action.new ReroutePhase(request, listener);
assertTrue(primaryPhase.checkBlocks()); reroutePhase.run();
primaryPhase.routeRequestOrPerformLocally(shardRoutingTable.primaryShard(), shardRoutingTable.shardsIt()); assertThat(request.shardId(), equalTo(shardId));
if (primaryNodeId.equals(clusterService.localNode().id())) { logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId);
logger.info("--> primary is assigned locally, testing for execution"); final List<CapturingTransport.CapturedRequest> capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId);
assertTrue("request failed to be processed on a local primary", request.processedOnPrimary.get()); assertThat(capturedRequests, notNullValue());
if (transport.capturedRequests().length > 0) { assertThat(capturedRequests.size(), equalTo(1));
assertIndexShardCounter(2); if (clusterService.state().nodes().localNodeId().equals(primaryNodeId)) {
} else { assertThat(capturedRequests.get(0).action, equalTo("testAction[p]"));
assertIndexShardCounter(1);
}
} else { } else {
logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId);
final List<CapturingTransport.CapturedRequest> capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId);
assertThat(capturedRequests, notNullValue());
assertThat(capturedRequests.size(), equalTo(1));
assertThat(capturedRequests.get(0).action, equalTo("testAction")); assertThat(capturedRequests.get(0).action, equalTo("testAction"));
assertIndexShardUninitialized();
} }
assertIndexShardUninitialized();
}
public void testPrimaryPhaseExecutesRequest() throws InterruptedException, ExecutionException {
final String index = "test";
final ShardId shardId = new ShardId(index, 0);
clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED));
Request request = new Request(shardId).timeout("1ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
final String replicaNodeId = clusterService.state().getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0).currentNodeId();
final List<CapturingTransport.CapturedRequest> requests = transport.capturedRequestsByTargetNode().get(replicaNodeId);
assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1));
assertThat("replica request was not sent", requests.get(0).action, equalTo("testAction[r]"));
}
public void testAddedReplicaAfterPrimaryOperation() {
final String index = "test";
final ShardId shardId = new ShardId(index, 0);
// start with no replicas
clusterService.setState(stateWithStartedPrimary(index, true, 0));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithAddedReplicas = state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED);
final Action actionWithAddedReplicaAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// add replicas after primary operation
((TestClusterService) clusterService).setState(stateWithAddedReplicas);
logger.debug("--> state after primary operation:\n{}", clusterService.state().prettyPrint());
return operationOnPrimary;
}
};
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithAddedReplicaAfterPrimaryOp.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
for (ShardRouting replica : stateWithAddedReplicas.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards()) {
List<CapturingTransport.CapturedRequest> requests = transport.capturedRequestsByTargetNode().get(replica.currentNodeId());
assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1));
assertThat("replica request was not sent", requests.get(0).action, equalTo("testAction[r]"));
}
}
public void testRelocatingReplicaAfterPrimaryOperation() {
final String index = "test";
final ShardId shardId = new ShardId(index, 0);
// start with a replica
clusterService.setState(state(index, true, ShardRoutingState.STARTED, randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.STARTED));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithRelocatingReplica = state(index, true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
final Action actionWithRelocatingReplicasAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// set replica to relocating
((TestClusterService) clusterService).setState(stateWithRelocatingReplica);
logger.debug("--> state after primary operation:\n{}", clusterService.state().prettyPrint());
return operationOnPrimary;
}
};
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithRelocatingReplicasAfterPrimaryOp.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
ShardRouting relocatingReplicaShard = stateWithRelocatingReplica.getRoutingTable().shardRoutingTable(index, shardId.id()).replicaShards().get(0);
for (String node : new String[] {relocatingReplicaShard.currentNodeId(), relocatingReplicaShard.relocatingNodeId()}) {
List<CapturingTransport.CapturedRequest> requests = transport.capturedRequestsByTargetNode().get(node);
assertThat(requests, notNullValue());
assertThat(requests.size(), equalTo(1));
assertThat("replica request was not sent to replica", requests.get(0).action, equalTo("testAction[r]"));
}
}
public void testIndexDeletedAfterPrimaryOperation() {
final String index = "test";
final ShardId shardId = new ShardId(index, 0);
clusterService.setState(state(index, true, ShardRoutingState.STARTED, ShardRoutingState.STARTED));
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
final ClusterState stateWithDeletedIndex = state(index + "_new", true, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING);
final Action actionWithDeletedIndexAfterPrimaryOp = new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
final Tuple<Response, Request> operationOnPrimary = super.shardOperationOnPrimary(metaData, shardRequest);
// delete index after primary op
((TestClusterService) clusterService).setState(stateWithDeletedIndex);
logger.debug("--> state after primary operation:\n{}", clusterService.state().prettyPrint());
return operationOnPrimary;
}
};
Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = actionWithDeletedIndexAfterPrimaryOp.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run();
assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true));
assertThat("replication phase should be skipped if index gets deleted after primary operation", transport.capturedRequestsByTargetNode().size(), equalTo(0));
} }
public void testWriteConsistency() throws ExecutionException, InterruptedException { public void testWriteConsistency() throws ExecutionException, InterruptedException {
@ -262,10 +386,9 @@ public class TransportReplicationActionTests extends ESTestCase {
final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id());
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener);
if (passesWriteConsistency) { if (passesWriteConsistency) {
assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), nullValue()); assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard().shardId()), nullValue());
primaryPhase.run(); primaryPhase.run();
assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get()); assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get());
if (assignedReplicas > 0) { if (assignedReplicas > 0) {
@ -274,14 +397,18 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardCounter(1); assertIndexShardCounter(1);
} }
} else { } else {
assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), notNullValue()); assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard().shardId()), notNullValue());
primaryPhase.run(); primaryPhase.run();
assertFalse("operations should not have been perform, consistency level is *NOT* met", request.processedOnPrimary.get()); assertFalse("operations should not have been perform, consistency level is *NOT* met", request.processedOnPrimary.get());
assertListenerThrows("should throw exception to trigger retry", listener, UnavailableShardsException.class);
assertIndexShardUninitialized(); assertIndexShardUninitialized();
for (int i = 0; i < replicaStates.length; i++) { for (int i = 0; i < replicaStates.length; i++) {
replicaStates[i] = ShardRoutingState.STARTED; replicaStates[i] = ShardRoutingState.STARTED;
} }
clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates));
listener = new PlainActionFuture<>();
primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run();
assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get()); assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get());
assertIndexShardCounter(2); assertIndexShardCounter(2);
} }
@ -336,25 +463,21 @@ public class TransportReplicationActionTests extends ESTestCase {
protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException { protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException {
final ShardRouting primaryShard = shardRoutingTable.primaryShard();
final ShardIterator shardIt = shardRoutingTable.shardsIt(); final ShardIterator shardIt = shardRoutingTable.shardsIt();
final ShardId shardId = shardIt.shardId(); final ShardId shardId = shardIt.shardId();
final Request request = new Request(); final Request request = new Request(shardId);
final long primaryTerm = randomInt(200); final long primaryTerm = randomInt(200);
request.primaryTerm(primaryTerm); request.primaryTerm(primaryTerm);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); final PlainActionFuture<Response> listener = new PlainActionFuture<>();
logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint()); logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint());
final TransportReplicationAction<Request, Request, Response>.InternalRequest internalRequest = action.new InternalRequest(request); Releasable reference = createIndexShardReference(0);
internalRequest.concreteIndex(shardId.index().name());
Releasable reference = getOrCreateIndexShardOperationsCounter();
assertIndexShardCounter(2); assertIndexShardCounter(2);
// TODO: set a default timeout // TODO: set a default timeout
TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase = TransportReplicationAction<Request, Request, Response>.ReplicationPhase replicationPhase =
action.new ReplicationPhase(shardIt, request, action.new ReplicationPhase(request,
new Response(), new ClusterStateObserver(clusterService, logger), new Response(),
primaryShard, internalRequest, listener, reference, null); request.shardId(), createTransportChannel(listener), reference, null);
assertThat(replicationPhase.totalShards(), equalTo(totalShards)); assertThat(replicationPhase.totalShards(), equalTo(totalShards));
assertThat(replicationPhase.pending(), equalTo(assignedReplicas)); assertThat(replicationPhase.pending(), equalTo(assignedReplicas));
@ -416,7 +539,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertThat(request.primaryTerm(), equalTo(primaryTerm)); assertThat(request.primaryTerm(), equalTo(primaryTerm));
} }
public void testSeqNoIsSetOnPrimary() { public void testSeqNoIsSetOnPrimary() throws Exception {
final String index = "test"; final String index = "test";
final ShardId shardId = new ShardId(index, 0); final ShardId shardId = new ShardId(index, 0);
// we use one replica to check the primary term was set on the operation and sent to the replica // we use one replica to check the primary term was set on the operation and sent to the replica
@ -425,7 +548,7 @@ public class TransportReplicationActionTests extends ESTestCase {
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId); Request request = new Request(shardId);
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.doRun(); primaryPhase.doRun();
CapturingTransport.CapturedRequest[] requestsToReplicas = transport.capturedRequests(); CapturingTransport.CapturedRequest[] requestsToReplicas = transport.capturedRequests();
assertThat(requestsToReplicas, arrayWithSize(1)); assertThat(requestsToReplicas, arrayWithSize(1));
@ -450,7 +573,7 @@ public class TransportReplicationActionTests extends ESTestCase {
* However, this failure would only become apparent once listener.get is called. Seems a little implicit. * However, this failure would only become apparent once listener.get is called. Seems a little implicit.
* */ * */
action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
final TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); final TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
Thread t = new Thread() { Thread t = new Thread() {
@Override @Override
public void run() { public void run() {
@ -481,7 +604,7 @@ public class TransportReplicationActionTests extends ESTestCase {
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("100ms"); Request request = new Request(shardId).timeout("100ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run(); primaryPhase.run();
assertIndexShardCounter(2); assertIndexShardCounter(2);
assertThat(transport.capturedRequests().length, equalTo(1)); assertThat(transport.capturedRequests().length, equalTo(1));
@ -490,7 +613,7 @@ public class TransportReplicationActionTests extends ESTestCase {
assertIndexShardCounter(1); assertIndexShardCounter(1);
transport.clear(); transport.clear();
request = new Request(shardId).timeout("100ms"); request = new Request(shardId).timeout("100ms");
primaryPhase = action.new PrimaryPhase(request, listener); primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run(); primaryPhase.run();
assertIndexShardCounter(2); assertIndexShardCounter(2);
CapturingTransport.CapturedRequest[] replicationRequests = transport.capturedRequests(); CapturingTransport.CapturedRequest[] replicationRequests = transport.capturedRequests();
@ -515,7 +638,7 @@ public class TransportReplicationActionTests extends ESTestCase {
@Override @Override
public void run() { public void run() {
try { try {
replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel()); replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel(new PlainActionFuture<>()));
} catch (Exception e) { } catch (Exception e) {
} }
} }
@ -532,7 +655,7 @@ public class TransportReplicationActionTests extends ESTestCase {
action = new ActionWithExceptions(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); action = new ActionWithExceptions(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool);
final Action.ReplicaOperationTransportHandler replicaOperationTransportHandlerForException = action.new ReplicaOperationTransportHandler(); final Action.ReplicaOperationTransportHandler replicaOperationTransportHandlerForException = action.new ReplicaOperationTransportHandler();
try { try {
replicaOperationTransportHandlerForException.messageReceived(new Request(shardId), createTransportChannel()); replicaOperationTransportHandlerForException.messageReceived(new Request(shardId), createTransportChannel(new PlainActionFuture<>()));
fail(); fail();
} catch (Throwable t2) { } catch (Throwable t2) {
} }
@ -548,7 +671,7 @@ public class TransportReplicationActionTests extends ESTestCase {
logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint());
Request request = new Request(shardId).timeout("100ms"); Request request = new Request(shardId).timeout("100ms");
PlainActionFuture<Response> listener = new PlainActionFuture<>(); PlainActionFuture<Response> listener = new PlainActionFuture<>();
TransportReplicationAction<Request, Request, Response>.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, createTransportChannel(listener));
primaryPhase.run(); primaryPhase.run();
// no replica request should have been sent yet // no replica request should have been sent yet
assertThat(transport.capturedRequests().length, equalTo(0)); assertThat(transport.capturedRequests().length, equalTo(0));
@ -565,9 +688,14 @@ public class TransportReplicationActionTests extends ESTestCase {
/* /*
* Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run. * Returns testIndexShardOperationsCounter or initializes it if it was already created in this test run.
* */ * */
private synchronized Releasable getOrCreateIndexShardOperationsCounter() { private synchronized TransportReplicationAction.IndexShardReference createIndexShardReference(long primaryTerm) {
count.incrementAndGet(); count.incrementAndGet();
return new Releasable() { return new TransportReplicationAction.IndexShardReference() {
@Override
public long opPrimaryTerm() {
return primaryTerm;
}
@Override @Override
public void close() { public void close() {
count.decrementAndGet(); count.decrementAndGet();
@ -576,7 +704,6 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
public static class Request extends ReplicationRequest<Request> { public static class Request extends ReplicationRequest<Request> {
int shardId;
public AtomicBoolean processedOnPrimary = new AtomicBoolean(); public AtomicBoolean processedOnPrimary = new AtomicBoolean();
public AtomicInteger processedOnReplicas = new AtomicInteger(); public AtomicInteger processedOnReplicas = new AtomicInteger();
@ -585,21 +712,19 @@ public class TransportReplicationActionTests extends ESTestCase {
Request(ShardId shardId) { Request(ShardId shardId) {
this(); this();
this.shardId = shardId.id(); this.shardId = shardId;
this.index(shardId.index().name()); this.index = shardId.getIndex();
// keep things simple // keep things simple
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeVInt(shardId);
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
shardId = in.readVInt();
} }
} }
@ -622,22 +747,17 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
boolean executedBefore = shardRequest.request.processedOnPrimary.getAndSet(true); boolean executedBefore = shardRequest.processedOnPrimary.getAndSet(true);
assert executedBefore == false : "request has already been executed on the primary"; assert executedBefore == false : "request has already been executed on the primary";
return new Tuple<>(new Response(), shardRequest.request); return new Tuple<>(new Response(), shardRequest);
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, Request request) { protected void shardOperationOnReplica(Request request) {
request.processedOnReplicas.incrementAndGet(); request.processedOnReplicas.incrementAndGet();
} }
@Override
protected ShardIterator shards(ClusterState clusterState, InternalRequest request) {
return clusterState.getRoutingTable().index(request.concreteIndex()).shard(request.request().shardId).shardsIt();
}
@Override @Override
protected boolean checkWriteConsistency() { protected boolean checkWriteConsistency() {
return false; return false;
@ -648,9 +768,16 @@ public class TransportReplicationActionTests extends ESTestCase {
return false; return false;
} }
@Override @Override
protected Releasable getIndexShardOperationsCounter(ShardId shardId, long opPrimaryTerm) { protected IndexShardReference getIndexShardOperationsCounterOnReplica(ShardId shardId, long opPrimaryTerm) {
return getOrCreateIndexShardOperationsCounter(); return createIndexShardReference(opPrimaryTerm);
}
@Override
protected IndexShardReference getIndexShardOperationsCounterOnPrimary(ShardId shardId) {
final IndexMetaData indexMetaData = clusterService.state().metaData().index(shardId.getIndex());
return createIndexShardReference(indexMetaData.primaryTerm(shardId.id()));
} }
} }
@ -676,8 +803,8 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
return throwException(shardRequest.shardId); return throwException(shardRequest.shardId());
} }
private Tuple<Response, Request> throwException(ShardId shardId) { private Tuple<Response, Request> throwException(ShardId shardId) {
@ -698,8 +825,8 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, Request shardRequest) { protected void shardOperationOnReplica(Request shardRequest) {
throwException(shardRequest.internalShardId); throwException(shardRequest.shardId());
} }
} }
@ -714,9 +841,9 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected Tuple<Response, Request> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<Response, Request> shardOperationOnPrimary(MetaData metaData, Request shardRequest) throws Throwable {
awaitLatch(); awaitLatch();
return new Tuple<>(new Response(), shardRequest.request); return new Tuple<>(new Response(), shardRequest);
} }
private void awaitLatch() throws InterruptedException { private void awaitLatch() throws InterruptedException {
@ -725,7 +852,7 @@ public class TransportReplicationActionTests extends ESTestCase {
} }
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, Request shardRequest) { protected void shardOperationOnReplica(Request shardRequest) {
try { try {
awaitLatch(); awaitLatch();
} catch (InterruptedException e) { } catch (InterruptedException e) {
@ -737,7 +864,7 @@ public class TransportReplicationActionTests extends ESTestCase {
/* /*
* Transport channel that is needed for replica operation testing. * Transport channel that is needed for replica operation testing.
* */ * */
public TransportChannel createTransportChannel() { public TransportChannel createTransportChannel(final PlainActionFuture<Response> listener) {
return new TransportChannel() { return new TransportChannel() {
@Override @Override
@ -752,14 +879,17 @@ public class TransportReplicationActionTests extends ESTestCase {
@Override @Override
public void sendResponse(TransportResponse response) throws IOException { public void sendResponse(TransportResponse response) throws IOException {
listener.onResponse(((Response) response));
} }
@Override @Override
public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException { public void sendResponse(TransportResponse response, TransportResponseOptions options) throws IOException {
listener.onResponse(((Response) response));
} }
@Override @Override
public void sendResponse(Throwable error) throws IOException { public void sendResponse(Throwable error) throws IOException {
listener.onFailure(error);
} }
}; };
} }

View File

@ -759,7 +759,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get(); admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", null)).get();
fail("Expected ActionRequestValidationException"); fail("Expected ActionRequestValidationException");
} catch (ActionRequestValidationException e) { } catch (ActionRequestValidationException e) {
assertThat(e.getMessage(), containsString("requires an [alias] to be set")); assertThat(e.getMessage(), containsString("[alias] may not be empty string"));
} }
} }
@ -768,7 +768,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get(); admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction("index1", "")).get();
fail("Expected ActionRequestValidationException"); fail("Expected ActionRequestValidationException");
} catch (ActionRequestValidationException e) { } catch (ActionRequestValidationException e) {
assertThat(e.getMessage(), containsString("requires an [alias] to be set")); assertThat(e.getMessage(), containsString("[alias] may not be empty string"));
} }
} }

View File

@ -26,13 +26,11 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.VersionUtils;
import java.io.BufferedReader; import java.io.BufferedReader;
import java.io.InputStreamReader; import java.io.InputStreamReader;
import java.nio.file.Path;
import java.util.Arrays; import java.util.Arrays;
public class RoutingBackwardCompatibilityTests extends ESTestCase { public class RoutingBackwardCompatibilityTests extends ESTestCase {

View File

@ -24,14 +24,16 @@ import org.elasticsearch.test.ESTestCase;
import java.net.InetAddress; import java.net.InetAddress;
import static org.hamcrest.Matchers.is;
/** /**
* Tests for network service... try to keep them safe depending upon configuration * Tests for network service... try to keep them safe depending upon configuration
* please don't actually bind to anything, just test the addresses. * please don't actually bind to anything, just test the addresses.
*/ */
public class NetworkServiceTests extends ESTestCase { public class NetworkServiceTests extends ESTestCase {
/** /**
* ensure exception if we bind to multicast ipv4 address * ensure exception if we bind to multicast ipv4 address
*/ */
public void testBindMulticastV4() throws Exception { public void testBindMulticastV4() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
@ -42,9 +44,8 @@ public class NetworkServiceTests extends ESTestCase {
assertTrue(e.getMessage().contains("invalid: multicast")); assertTrue(e.getMessage().contains("invalid: multicast"));
} }
} }
/**
/** * ensure exception if we bind to multicast ipv6 address
* ensure exception if we bind to multicast ipv6 address
*/ */
public void testBindMulticastV6() throws Exception { public void testBindMulticastV6() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
@ -55,9 +56,9 @@ public class NetworkServiceTests extends ESTestCase {
assertTrue(e.getMessage().contains("invalid: multicast")); assertTrue(e.getMessage().contains("invalid: multicast"));
} }
} }
/** /**
* ensure exception if we publish to multicast ipv4 address * ensure exception if we publish to multicast ipv4 address
*/ */
public void testPublishMulticastV4() throws Exception { public void testPublishMulticastV4() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
@ -68,9 +69,9 @@ public class NetworkServiceTests extends ESTestCase {
assertTrue(e.getMessage().contains("invalid: multicast")); assertTrue(e.getMessage().contains("invalid: multicast"));
} }
} }
/** /**
* ensure exception if we publish to multicast ipv6 address * ensure exception if we publish to multicast ipv6 address
*/ */
public void testPublishMulticastV6() throws Exception { public void testPublishMulticastV6() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
@ -82,24 +83,24 @@ public class NetworkServiceTests extends ESTestCase {
} }
} }
/** /**
* ensure specifying wildcard ipv4 address will bind to all interfaces * ensure specifying wildcard ipv4 address will bind to all interfaces
*/ */
public void testBindAnyLocalV4() throws Exception { public void testBindAnyLocalV4() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
assertEquals(InetAddress.getByName("0.0.0.0"), service.resolveBindHostAddresses(new String[] { "0.0.0.0" })[0]); assertEquals(InetAddress.getByName("0.0.0.0"), service.resolveBindHostAddresses(new String[] { "0.0.0.0" })[0]);
} }
/** /**
* ensure specifying wildcard ipv6 address will bind to all interfaces * ensure specifying wildcard ipv6 address will bind to all interfaces
*/ */
public void testBindAnyLocalV6() throws Exception { public void testBindAnyLocalV6() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
assertEquals(InetAddress.getByName("::"), service.resolveBindHostAddresses(new String[] { "::" })[0]); assertEquals(InetAddress.getByName("::"), service.resolveBindHostAddresses(new String[] { "::" })[0]);
} }
/** /**
* ensure specifying wildcard ipv4 address selects reasonable publish address * ensure specifying wildcard ipv4 address selects reasonable publish address
*/ */
public void testPublishAnyLocalV4() throws Exception { public void testPublishAnyLocalV4() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
@ -107,12 +108,34 @@ public class NetworkServiceTests extends ESTestCase {
assertFalse(address.isAnyLocalAddress()); assertFalse(address.isAnyLocalAddress());
} }
/** /**
* ensure specifying wildcard ipv6 address selects reasonable publish address * ensure specifying wildcard ipv6 address selects reasonable publish address
*/ */
public void testPublishAnyLocalV6() throws Exception { public void testPublishAnyLocalV6() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY); NetworkService service = new NetworkService(Settings.EMPTY);
InetAddress address = service.resolvePublishHostAddresses(new String[] { "::" }); InetAddress address = service.resolvePublishHostAddresses(new String[] { "::" });
assertFalse(address.isAnyLocalAddress()); assertFalse(address.isAnyLocalAddress());
} }
/**
* ensure we can bind to multiple addresses
*/
public void testBindMultipleAddresses() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY);
InetAddress[] addresses = service.resolveBindHostAddresses(new String[]{"127.0.0.1", "127.0.0.2"});
assertThat(addresses.length, is(2));
}
/**
* ensure we can't bind to multiple addresses when using wildcard
*/
public void testBindMultipleAddressesWithWildcard() throws Exception {
NetworkService service = new NetworkService(Settings.EMPTY);
try {
service.resolveBindHostAddresses(new String[]{"0.0.0.0", "127.0.0.1"});
fail("should have hit exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("is wildcard, but multiple addresses specified"));
}
}
} }

View File

@ -53,7 +53,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
fail("can't index, does not match consistency"); fail("can't index, does not match consistency");
} catch (UnavailableShardsException e) { } catch (UnavailableShardsException e) {
assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
assertThat(e.getMessage(), equalTo("[test][0] Not enough active copies to meet write consistency of [QUORUM] (have 1, needed 2). Timeout: [100ms], request: index {[test][type1][1], source[{ type1 : { \"id\" : \"1\", \"name\" : \"test\" } }]}")); assertThat(e.getMessage(), equalTo("[test][0] Not enough active copies to meet write consistency of [QUORUM] (have 1, needed 2). Timeout: [100ms], request: [index {[test][type1][1], source[{ type1 : { \"id\" : \"1\", \"name\" : \"test\" } }]}]"));
// but really, all is well // but really, all is well
} }
@ -76,7 +76,7 @@ public class WriteConsistencyLevelIT extends ESIntegTestCase {
fail("can't index, does not match consistency"); fail("can't index, does not match consistency");
} catch (UnavailableShardsException e) { } catch (UnavailableShardsException e) {
assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
assertThat(e.getMessage(), equalTo("[test][0] Not enough active copies to meet write consistency of [ALL] (have 2, needed 3). Timeout: [100ms], request: index {[test][type1][1], source[{ type1 : { \"id\" : \"1\", \"name\" : \"test\" } }]}")); assertThat(e.getMessage(), equalTo("[test][0] Not enough active copies to meet write consistency of [ALL] (have 2, needed 3). Timeout: [100ms], request: [index {[test][type1][1], source[{ type1 : { \"id\" : \"1\", \"name\" : \"test\" } }]}]"));
// but really, all is well // but really, all is well
} }

View File

@ -67,7 +67,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
} }
public void testString() { public void testString() {
createIndex("test", Settings.EMPTY, "field", "value", "type=string"); createIndex("test", Settings.EMPTY, "test", "field", "type=string");
for (int value = 0; value <= 10; value++) { for (int value = 0; value <= 10; value++) {
client().prepareIndex("test", "test").setSource("field", String.format(Locale.ENGLISH, "%03d", value)).get(); client().prepareIndex("test", "test").setSource("field", String.format(Locale.ENGLISH, "%03d", value)).get();
} }
@ -85,7 +85,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
public void testDouble() { public void testDouble() {
String fieldName = "field"; String fieldName = "field";
createIndex("test", Settings.EMPTY, fieldName, "value", "type=double"); createIndex("test", Settings.EMPTY, "test", fieldName, "type=double");
for (double value = -1; value <= 9; value++) { for (double value = -1; value <= 9; value++) {
client().prepareIndex("test", "test").setSource(fieldName, value).get(); client().prepareIndex("test", "test").setSource(fieldName, value).get();
} }
@ -102,7 +102,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
public void testFloat() { public void testFloat() {
String fieldName = "field"; String fieldName = "field";
createIndex("test", Settings.EMPTY, fieldName, "value", "type=float"); createIndex("test", Settings.EMPTY, "test", fieldName, "type=float");
for (float value = -1; value <= 9; value++) { for (float value = -1; value <= 9; value++) {
client().prepareIndex("test", "test").setSource(fieldName, value).get(); client().prepareIndex("test", "test").setSource(fieldName, value).get();
} }
@ -112,14 +112,14 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l)); assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l));
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l)); assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l));
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1.0)); assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1f));
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9.0)); assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9f));
assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Float.toString(-1))); assertThat(result.getAllFieldStats().get(fieldName).getMinValueAsString(), equalTo(Float.toString(-1)));
assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(), equalTo(Float.toString(9))); assertThat(result.getAllFieldStats().get(fieldName).getMaxValueAsString(), equalTo(Float.toString(9)));
} }
private void testNumberRange(String fieldName, String fieldType, long min, long max) { private void testNumberRange(String fieldName, String fieldType, long min, long max) {
createIndex("test", Settings.EMPTY, fieldName, "value", "type=" + fieldType); createIndex("test", Settings.EMPTY, "test", fieldName, "type=" + fieldType);
for (long value = min; value <= max; value++) { for (long value = min; value <= max; value++) {
client().prepareIndex("test", "test").setSource(fieldName, value).get(); client().prepareIndex("test", "test").setSource(fieldName, value).get();
} }
@ -180,11 +180,11 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
} }
public void testInvalidField() { public void testInvalidField() {
createIndex("test1", Settings.EMPTY, "field1", "value", "type=string"); createIndex("test1", Settings.EMPTY, "test", "field1", "type=string");
client().prepareIndex("test1", "test").setSource("field1", "a").get(); client().prepareIndex("test1", "test").setSource("field1", "a").get();
client().prepareIndex("test1", "test").setSource("field1", "b").get(); client().prepareIndex("test1", "test").setSource("field1", "b").get();
createIndex("test2", Settings.EMPTY, "field2", "value", "type=string"); createIndex("test2", Settings.EMPTY, "test", "field2", "type=string");
client().prepareIndex("test2", "test").setSource("field2", "a").get(); client().prepareIndex("test2", "test").setSource("field2", "a").get();
client().prepareIndex("test2", "test").setSource("field2", "b").get(); client().prepareIndex("test2", "test").setSource("field2", "b").get();
client().admin().indices().prepareRefresh().get(); client().admin().indices().prepareRefresh().get();

View File

@ -59,7 +59,6 @@ import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.ScriptContextRegistry; import org.elasticsearch.script.ScriptContextRegistry;
import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.ScriptEngineService;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.engine.MockEngineFactory; import org.elasticsearch.test.engine.MockEngineFactory;
@ -102,7 +101,6 @@ public class IndexModuleTests extends ESTestCase {
BigArrays bigArrays = new BigArrays(recycler, circuitBreakerService); BigArrays bigArrays = new BigArrays(recycler, circuitBreakerService);
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndicesFieldDataCacheListener(circuitBreakerService), threadPool); IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndicesFieldDataCacheListener(circuitBreakerService), threadPool);
Set<ScriptEngineService> scriptEngines = new HashSet<>(); Set<ScriptEngineService> scriptEngines = new HashSet<>();
scriptEngines.add(new MustacheScriptEngineService(settings));
scriptEngines.addAll(Arrays.asList(scriptEngineServices)); scriptEngines.addAll(Arrays.asList(scriptEngineServices));
ScriptService scriptService = new ScriptService(settings, environment, scriptEngines, new ResourceWatcherService(settings, threadPool), new ScriptContextRegistry(Collections.emptyList())); ScriptService scriptService = new ScriptService(settings, environment, scriptEngines, new ResourceWatcherService(settings, threadPool), new ScriptContextRegistry(Collections.emptyList()));
IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, Collections.emptySet(), new NamedWriteableRegistry()); IndicesQueriesRegistry indicesQueriesRegistry = new IndicesQueriesRegistry(settings, Collections.emptySet(), new NamedWriteableRegistry());

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -28,15 +29,21 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.core.DoubleFieldMapper;
import org.elasticsearch.index.mapper.core.FloatFieldMapper;
import org.elasticsearch.index.mapper.core.IntegerFieldMapper; import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
public class DynamicMappingTests extends ESSingleNodeTestCase { public class DynamicMappingTests extends ESSingleNodeTestCase {
@ -407,4 +414,26 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
// expected // expected
} }
} }
public void testDefaultFloatingPointMappings() throws IOException {
DocumentMapper mapper = createIndex("test").mapperService().documentMapperWithAutoCreate("type").getDocumentMapper();
doTestDefaultFloatingPointMappings(mapper, XContentFactory.jsonBuilder());
doTestDefaultFloatingPointMappings(mapper, XContentFactory.yamlBuilder());
doTestDefaultFloatingPointMappings(mapper, XContentFactory.smileBuilder());
doTestDefaultFloatingPointMappings(mapper, XContentFactory.cborBuilder());
}
private void doTestDefaultFloatingPointMappings(DocumentMapper mapper, XContentBuilder builder) throws IOException {
BytesReference source = builder.startObject()
.field("foo", 3.2f) // float
.field("bar", 3.2d) // double
.field("baz", (double) 3.2f) // double that can be accurately represented as a float
.endObject().bytes();
ParsedDocument parsedDocument = mapper.parse("index", "type", "id", source);
Mapping update = parsedDocument.dynamicMappingsUpdate();
assertNotNull(update);
assertThat(update.root().getMapper("foo"), instanceOf(FloatFieldMapper.class));
assertThat(update.root().getMapper("bar"), instanceOf(FloatFieldMapper.class));
assertThat(update.root().getMapper("baz"), instanceOf(FloatFieldMapper.class));
}
} }

View File

@ -281,7 +281,7 @@ public abstract class FieldTypeTestCase extends ESTestCase {
public void testCheckTypeName() { public void testCheckTypeName() {
final MappedFieldType fieldType = createNamedDefaultFieldType(); final MappedFieldType fieldType = createNamedDefaultFieldType();
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
fieldType.checkTypeName(fieldType, conflicts); fieldType.checkCompatibility(fieldType, conflicts, random().nextBoolean()); // no exception
assertTrue(conflicts.toString(), conflicts.isEmpty()); assertTrue(conflicts.toString(), conflicts.isEmpty());
MappedFieldType bogus = new MappedFieldType() { MappedFieldType bogus = new MappedFieldType() {
@ -291,7 +291,7 @@ public abstract class FieldTypeTestCase extends ESTestCase {
public String typeName() { return fieldType.typeName();} public String typeName() { return fieldType.typeName();}
}; };
try { try {
fieldType.checkTypeName(bogus, conflicts); fieldType.checkCompatibility(bogus, conflicts, random().nextBoolean());
fail("expected bad types exception"); fail("expected bad types exception");
} catch (IllegalStateException e) { } catch (IllegalStateException e) {
assertTrue(e.getMessage().contains("Type names equal")); assertTrue(e.getMessage().contains("Type names equal"));
@ -304,10 +304,13 @@ public abstract class FieldTypeTestCase extends ESTestCase {
@Override @Override
public String typeName() { return "othertype";} public String typeName() { return "othertype";}
}; };
fieldType.checkTypeName(other, conflicts); try {
assertFalse(conflicts.isEmpty()); fieldType.checkCompatibility(other, conflicts, random().nextBoolean());
assertTrue(conflicts.get(0).contains("cannot be changed from type")); fail();
assertEquals(1, conflicts.size()); } catch (IllegalArgumentException e) {
assertTrue(e.getMessage(), e.getMessage().contains("cannot be changed from type"));
}
assertTrue(conflicts.toString(), conflicts.isEmpty());
} }
public void testCheckCompatibility() { public void testCheckCompatibility() {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper; package org.elasticsearch.index.mapper;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
@ -117,8 +118,9 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
if (t instanceof ExecutionException) { if (t instanceof ExecutionException) {
t = ((ExecutionException) t).getCause(); t = ((ExecutionException) t).getCause();
} }
if (t instanceof IllegalArgumentException) { final Throwable throwable = ExceptionsHelper.unwrapCause(t);
assertEquals("It is forbidden to index into the default mapping [_default_]", t.getMessage()); if (throwable instanceof IllegalArgumentException) {
assertEquals("It is forbidden to index into the default mapping [_default_]", throwable.getMessage());
} else { } else {
throw t; throw t;
} }
@ -133,8 +135,9 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
if (t instanceof ExecutionException) { if (t instanceof ExecutionException) {
t = ((ExecutionException) t).getCause(); t = ((ExecutionException) t).getCause();
} }
if (t instanceof IllegalArgumentException) { final Throwable throwable = ExceptionsHelper.unwrapCause(t);
assertEquals("It is forbidden to index into the default mapping [_default_]", t.getMessage()); if (throwable instanceof IllegalArgumentException) {
assertEquals("It is forbidden to index into the default mapping [_default_]", throwable.getMessage());
} else { } else {
throw t; throw t;
} }

View File

@ -30,6 +30,7 @@ import org.elasticsearch.test.ESIntegTestCase;
import java.io.IOException; import java.io.IOException;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -68,6 +69,25 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase {
} }
public void testDynamicObjectCopyTo() throws Exception {
String mapping = jsonBuilder().startObject().startObject("doc").startObject("properties")
.startObject("foo")
.field("type", "string")
.field("copy_to", "root.top.child")
.endObject()
.endObject().endObject().endObject().string();
assertAcked(
client().admin().indices().prepareCreate("test-idx")
.addMapping("doc", mapping)
);
client().prepareIndex("test-idx", "doc", "1")
.setSource("foo", "bar")
.get();
client().admin().indices().prepareRefresh("test-idx").execute().actionGet();
SearchResponse response = client().prepareSearch("test-idx")
.setQuery(QueryBuilders.termQuery("root.top.child", "bar")).get();
assertThat(response.getHits().totalHits(), equalTo(1L));
}
private XContentBuilder createDynamicTemplateMapping() throws IOException { private XContentBuilder createDynamicTemplateMapping() throws IOException {
return XContentFactory.jsonBuilder().startObject().startObject("doc") return XContentFactory.jsonBuilder().startObject().startObject("doc")

View File

@ -167,27 +167,126 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
} }
public void testCopyToFieldsNonExistingInnerObjectParsing() throws Exception { public void testCopyToDynamicInnerObjectParsing() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1").startObject("properties") String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties")
.startObject("copy_test") .startObject("copy_test")
.field("type", "string") .field("type", "string")
.field("copy_to", "very.inner.field") .field("copy_to", "very.inner.field")
.endObject() .endObject()
.endObject()
.endObject().endObject().endObject().string(); .endObject().endObject().string();
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
BytesReference json = jsonBuilder().startObject() BytesReference json = jsonBuilder().startObject()
.field("copy_test", "foo") .field("copy_test", "foo")
.field("new_field", "bar")
.endObject().bytes(); .endObject().bytes();
ParseContext.Document doc = docMapper.parse("test", "type1", "1", json).rootDoc();
assertThat(doc.getFields("copy_test").length, equalTo(1));
assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
assertThat(doc.getFields("very.inner.field").length, equalTo(1));
assertThat(doc.getFields("very.inner.field")[0].stringValue(), equalTo("foo"));
assertThat(doc.getFields("new_field").length, equalTo(1));
assertThat(doc.getFields("new_field")[0].stringValue(), equalTo("bar"));
}
public void testCopyToDynamicInnerInnerObjectParsing() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties")
.startObject("copy_test")
.field("type", "string")
.field("copy_to", "very.far.inner.field")
.endObject()
.startObject("very")
.field("type", "object")
.startObject("properties")
.startObject("far")
.field("type", "object")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
BytesReference json = jsonBuilder().startObject()
.field("copy_test", "foo")
.field("new_field", "bar")
.endObject().bytes();
ParseContext.Document doc = docMapper.parse("test", "type1", "1", json).rootDoc();
assertThat(doc.getFields("copy_test").length, equalTo(1));
assertThat(doc.getFields("copy_test")[0].stringValue(), equalTo("foo"));
assertThat(doc.getFields("very.far.inner.field").length, equalTo(1));
assertThat(doc.getFields("very.far.inner.field")[0].stringValue(), equalTo("foo"));
assertThat(doc.getFields("new_field").length, equalTo(1));
assertThat(doc.getFields("new_field")[0].stringValue(), equalTo("bar"));
}
public void testCopyToStrictDynamicInnerObjectParsing() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.field("dynamic", "strict")
.startObject("properties")
.startObject("copy_test")
.field("type", "string")
.field("copy_to", "very.inner.field")
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
BytesReference json = jsonBuilder().startObject()
.field("copy_test", "foo")
.endObject().bytes();
try { try {
docMapper.parse("test", "type1", "1", json).rootDoc(); docMapper.parse("test", "type1", "1", json).rootDoc();
fail(); fail();
} catch (MapperParsingException ex) { } catch (MapperParsingException ex) {
assertThat(ex.getMessage(), startsWith("attempt to copy value to non-existing object")); assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [very] within [type1] is not allowed"));
}
}
public void testCopyToInnerStrictDynamicInnerObjectParsing() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startObject("properties")
.startObject("copy_test")
.field("type", "string")
.field("copy_to", "very.far.field")
.endObject()
.startObject("very")
.field("type", "object")
.startObject("properties")
.startObject("far")
.field("type", "object")
.field("dynamic", "strict")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
BytesReference json = jsonBuilder().startObject()
.field("copy_test", "foo")
.endObject().bytes();
try {
docMapper.parse("test", "type1", "1", json).rootDoc();
fail();
} catch (MapperParsingException ex) {
assertThat(ex.getMessage(), startsWith("mapping set to strict, dynamic introduction of [field] within [very.far] is not allowed"));
} }
} }
@ -337,6 +436,41 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
} }
} }
public void testCopyToDynamicNestedObjectParsing() throws Exception {
String mapping = jsonBuilder().startObject().startObject("type1")
.startArray("dynamic_templates")
.startObject()
.startObject("objects")
.field("match_mapping_type", "object")
.startObject("mapping")
.field("type", "nested")
.endObject()
.endObject()
.endObject()
.endArray()
.startObject("properties")
.startObject("copy_test")
.field("type", "string")
.field("copy_to", "very.inner.field")
.endObject()
.endObject()
.endObject().endObject().string();
DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping);
BytesReference json = jsonBuilder().startObject()
.field("copy_test", "foo")
.field("new_field", "bar")
.endObject().bytes();
try {
docMapper.parse("test", "type1", "1", json).rootDoc();
fail();
} catch (MapperParsingException ex) {
assertThat(ex.getMessage(), startsWith("It is forbidden to create dynamic nested objects ([very]) through `copy_to`"));
}
}
private void assertFieldValue(Document doc, String field, Number... expected) { private void assertFieldValue(Document doc, String field, Number... expected) {
IndexableField[] values = doc.getFields(field); IndexableField[] values = doc.getFields(field);
if (values == null) { if (values == null) {

View File

@ -0,0 +1,105 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper.core;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.MapperTestUtils;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.hamcrest.core.IsEqual.equalTo;
public class MultiFieldCopyToMapperTests extends ESTestCase {
public void testExceptionForCopyToInMultiFields() throws IOException {
XContentBuilder mapping = createMappinmgWithCopyToInMultiField();
Tuple<List<Version>, List<Version>> versionsWithAndWithoutExpectedExceptions = versionsWithAndWithoutExpectedExceptions();
// first check that for newer versions we throw exception if copy_to is found withing multi field
Version indexVersion = randomFrom(versionsWithAndWithoutExpectedExceptions.v1());
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build());
try {
mapperService.parse("type", new CompressedXContent(mapping.string()), true);
fail("Parsing should throw an exception because the mapping contains a copy_to in a multi field");
} catch (MapperParsingException e) {
assertThat(e.getMessage(), equalTo("copy_to in multi fields is not allowed. Found the copy_to in field [c] which is within a multi field."));
}
// now test that with an older version the pasring just works
indexVersion = randomFrom(versionsWithAndWithoutExpectedExceptions.v2());
mapperService = MapperTestUtils.newMapperService(createTempDir(), Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, indexVersion).build());
DocumentMapper documentMapper = mapperService.parse("type", new CompressedXContent(mapping.string()), true);
assertFalse(documentMapper.mapping().toString().contains("copy_to"));
}
private static XContentBuilder createMappinmgWithCopyToInMultiField() throws IOException {
XContentBuilder mapping = jsonBuilder();
mapping.startObject()
.startObject("type")
.startObject("properties")
.startObject("a")
.field("type", "string")
.endObject()
.startObject("b")
.field("type", "string")
.startObject("fields")
.startObject("c")
.field("type", "string")
.field("copy_to", "a")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject()
.endObject();
return mapping;
}
// returs a tuple where
// v1 is a list of versions for which we expect an excpetion when a copy_to in multi fields is found and
// v2 is older versions where we throw no exception and we just log a warning
private static Tuple<List<Version>, List<Version>> versionsWithAndWithoutExpectedExceptions() {
List<Version> versionsWithException = new ArrayList<>();
List<Version> versionsWithoutException = new ArrayList<>();
for (Version version : VersionUtils.allVersions()) {
if (version.after(Version.V_2_1_0) ||
(version.after(Version.V_2_0_1) && version.before(Version.V_2_1_0))) {
versionsWithException.add(version);
} else {
versionsWithoutException.add(version);
}
}
return new Tuple<>(versionsWithException, versionsWithoutException);
}
}

View File

@ -24,6 +24,8 @@ import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableField;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
@ -41,9 +43,11 @@ import org.elasticsearch.index.mapper.string.SimpleStringMappingTests;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.nullValue;
@ -510,4 +514,62 @@ public class SimpleNumericTests extends ESSingleNodeTestCase {
assertThat(ts, instanceOf(NumericTokenStream.class)); assertThat(ts, instanceOf(NumericTokenStream.class));
assertEquals(expected, ((NumericTokenStream)ts).getPrecisionStep()); assertEquals(expected, ((NumericTokenStream)ts).getPrecisionStep());
} }
public void testTermVectorsBackCompat() throws Exception {
for (String type : Arrays.asList("byte", "short", "integer", "long", "float", "double")) {
doTestTermVectorsBackCompat(type);
}
}
private void doTestTermVectorsBackCompat(String type) throws Exception {
DocumentMapperParser parser = createIndex("index-" + type).mapperService().documentMapperParser();
String mappingWithTV = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("foo")
.field("type", type)
.field("term_vector", "yes")
.endObject()
.endObject().endObject().endObject().string();
try {
parser.parse(mappingWithTV);
fail();
} catch (MapperParsingException e) {
assertThat(e.getMessage(), containsString("Mapping definition for [foo] has unsupported parameters: [term_vector : yes]"));
}
Settings oldIndexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_1_0)
.build();
parser = createIndex("index2-" + type, oldIndexSettings).mapperService().documentMapperParser();
parser.parse(mappingWithTV); // no exception
}
public void testAnalyzerBackCompat() throws Exception {
for (String type : Arrays.asList("byte", "short", "integer", "long", "float", "double")) {
doTestAnalyzerBackCompat(type);
}
}
private void doTestAnalyzerBackCompat(String type) throws Exception {
DocumentMapperParser parser = createIndex("index-" + type).mapperService().documentMapperParser();
String mappingWithTV = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties")
.startObject("foo")
.field("type", type)
.field("analyzer", "keyword")
.endObject()
.endObject().endObject().endObject().string();
try {
parser.parse(mappingWithTV);
fail();
} catch (MapperParsingException e) {
assertThat(e.getMessage(), containsString("Mapping definition for [foo] has unsupported parameters: [analyzer : keyword]"));
}
Settings oldIndexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_1_0)
.build();
parser = createIndex("index2-" + type, oldIndexSettings).mapperService().documentMapperParser();
parser.parse(mappingWithTV); // no exception
}
} }

Some files were not shown because too many files have changed in this diff Show More