Merge branch 'master' into feature/client_aggs_parsing

This commit is contained in:
Christoph Büscher 2017-04-26 23:09:45 +02:00
commit 1f221154a6
201 changed files with 2898 additions and 1758 deletions

8
Vagrantfile vendored
View File

@ -104,6 +104,12 @@ SOURCE_PROMPT
source /etc/profile.d/elasticsearch_prompt.sh
SOURCE_PROMPT
SHELL
# Creates a file to mark the machine as created by vagrant. Tests check
# for this file and refuse to run if it is not present so that they can't
# be run unexpectedly.
config.vm.provision "markerfile", type: "shell", inline: <<-SHELL
touch /etc/is_vagrant_vm
SHELL
end
config.config_procs.push ['2', set_prompt]
end
@ -263,7 +269,7 @@ def provision(config,
echo "==> Installing Gradle"
curl -sS -o /tmp/gradle.zip -L https://services.gradle.org/distributions/gradle-3.3-bin.zip
unzip /tmp/gradle.zip -d /opt
rm -rf /tmp/gradle.zip
rm -rf /tmp/gradle.zip
ln -s /opt/gradle-3.3/bin/gradle /usr/bin/gradle
# make nfs mounted gradle home dir writeable
chown vagrant:vagrant /home/vagrant/.gradle

View File

@ -156,4 +156,11 @@ if (project != rootProject) {
testClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$UnitTestCase'
integTestClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$IntegTestCase'
}
task namingConventionsMain(type: org.elasticsearch.gradle.precommit.NamingConventionsTask) {
checkForTestsInMain = true
testClass = namingConventions.testClass
integTestClass = namingConventions.integTestClass
}
precommit.dependsOn namingConventionsMain
}

View File

@ -38,17 +38,7 @@ public class NamingConventionsTask extends LoggedExec {
* inputs (ie the jars/class files).
*/
@OutputFile
File successMarker = new File(project.buildDir, 'markers/namingConventions')
/**
* The classpath to run the naming conventions checks against. Must contain the files in the test
* output directory and everything required to load those classes.
*
* We don't declare the actual test files as a dependency or input because if they change then
* this will change.
*/
@InputFiles
FileCollection classpath = project.sourceSets.test.runtimeClasspath
File successMarker = new File(project.buildDir, "markers/${this.name}")
/**
* Should we skip the integ tests in disguise tests? Defaults to true because only core names its
@ -69,18 +59,35 @@ public class NamingConventionsTask extends LoggedExec {
@Input
String integTestClass = 'org.elasticsearch.test.ESIntegTestCase'
/**
* Should the test also check the main classpath for test classes instead of
* doing the usual checks to the test classpath.
*/
@Input
boolean checkForTestsInMain = false;
public NamingConventionsTask() {
// Extra classpath contains the actual test
project.configurations.create('namingConventions')
Dependency buildToolsDep = project.dependencies.add('namingConventions',
"org.elasticsearch.gradle:build-tools:${VersionProperties.elasticsearch}")
buildToolsDep.transitive = false // We don't need gradle in the classpath. It conflicts.
if (false == project.configurations.names.contains('namingConventions')) {
project.configurations.create('namingConventions')
Dependency buildToolsDep = project.dependencies.add('namingConventions',
"org.elasticsearch.gradle:build-tools:${VersionProperties.elasticsearch}")
buildToolsDep.transitive = false // We don't need gradle in the classpath. It conflicts.
}
FileCollection extraClasspath = project.configurations.namingConventions
dependsOn(extraClasspath)
description = "Runs NamingConventionsCheck on ${classpath}"
FileCollection classpath = project.sourceSets.test.runtimeClasspath
inputs.files(classpath)
description = "Tests that test classes aren't misnamed or misplaced"
executable = new File(project.javaHome, 'bin/java')
onlyIf { project.sourceSets.test.output.classesDir.exists() }
if (false == checkForTestsInMain) {
/* This task is created by default for all subprojects with this
* setting and there is no point in running it if the files don't
* exist. */
onlyIf { project.sourceSets.test.output.classesDir.exists() }
}
/*
* We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be
* ready for us. Strangely neither one on their own are good enough.
@ -104,7 +111,14 @@ public class NamingConventionsTask extends LoggedExec {
if (':build-tools'.equals(project.path)) {
args('--self-test')
}
args('--', project.sourceSets.test.output.classesDir.absolutePath)
if (checkForTestsInMain) {
args('--main')
args('--')
args(project.sourceSets.main.output.classesDir.absolutePath)
} else {
args('--')
args(project.sourceSets.test.output.classesDir.absolutePath)
}
}
}
doLast { successMarker.setText("", 'UTF-8') }

View File

@ -28,6 +28,7 @@ import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
/**
@ -49,6 +50,7 @@ public class NamingConventionsCheck {
Path rootPath = null;
boolean skipIntegTestsInDisguise = false;
boolean selfTest = false;
boolean checkMainClasses = false;
for (int i = 0; i < args.length; i++) {
String arg = args[i];
switch (arg) {
@ -64,6 +66,9 @@ public class NamingConventionsCheck {
case "--self-test":
selfTest = true;
break;
case "--main":
checkMainClasses = true;
break;
case "--":
rootPath = Paths.get(args[++i]);
break;
@ -73,28 +78,43 @@ public class NamingConventionsCheck {
}
NamingConventionsCheck check = new NamingConventionsCheck(testClass, integTestClass);
check.check(rootPath, skipIntegTestsInDisguise);
if (checkMainClasses) {
check.checkMain(rootPath);
} else {
check.checkTests(rootPath, skipIntegTestsInDisguise);
}
if (selfTest) {
assertViolation("WrongName", check.missingSuffix);
assertViolation("WrongNameTheSecond", check.missingSuffix);
assertViolation("DummyAbstractTests", check.notRunnable);
assertViolation("DummyInterfaceTests", check.notRunnable);
assertViolation("InnerTests", check.innerClasses);
assertViolation("NotImplementingTests", check.notImplementing);
assertViolation("PlainUnit", check.pureUnitTest);
if (checkMainClasses) {
assertViolation(NamingConventionsCheckInMainTests.class.getName(), check.testsInMain);
assertViolation(NamingConventionsCheckInMainIT.class.getName(), check.testsInMain);
} else {
assertViolation("WrongName", check.missingSuffix);
assertViolation("WrongNameTheSecond", check.missingSuffix);
assertViolation("DummyAbstractTests", check.notRunnable);
assertViolation("DummyInterfaceTests", check.notRunnable);
assertViolation("InnerTests", check.innerClasses);
assertViolation("NotImplementingTests", check.notImplementing);
assertViolation("PlainUnit", check.pureUnitTest);
}
}
// Now we should have no violations
assertNoViolations("Not all subclasses of " + check.testClass.getSimpleName()
+ " match the naming convention. Concrete classes must end with [Tests]", check.missingSuffix);
assertNoViolations(
"Not all subclasses of " + check.testClass.getSimpleName()
+ " match the naming convention. Concrete classes must end with [Tests]",
check.missingSuffix);
assertNoViolations("Classes ending with [Tests] are abstract or interfaces", check.notRunnable);
assertNoViolations("Found inner classes that are tests, which are excluded from the test runner", check.innerClasses);
assertNoViolations("Pure Unit-Test found must subclass [" + check.testClass.getSimpleName() + "]", check.pureUnitTest);
assertNoViolations("Classes ending with [Tests] must subclass [" + check.testClass.getSimpleName() + "]", check.notImplementing);
assertNoViolations(
"Classes ending with [Tests] or [IT] or extending [" + check.testClass.getSimpleName() + "] must be in src/test/java",
check.testsInMain);
if (skipIntegTestsInDisguise == false) {
assertNoViolations("Subclasses of " + check.integTestClass.getSimpleName() +
" should end with IT as they are integration tests", check.integTestsInDisguise);
assertNoViolations(
"Subclasses of " + check.integTestClass.getSimpleName() + " should end with IT as they are integration tests",
check.integTestsInDisguise);
}
}
@ -104,86 +124,78 @@ public class NamingConventionsCheck {
private final Set<Class<?>> integTestsInDisguise = new HashSet<>();
private final Set<Class<?>> notRunnable = new HashSet<>();
private final Set<Class<?>> innerClasses = new HashSet<>();
private final Set<Class<?>> testsInMain = new HashSet<>();
private final Class<?> testClass;
private final Class<?> integTestClass;
public NamingConventionsCheck(Class<?> testClass, Class<?> integTestClass) {
this.testClass = testClass;
this.testClass = Objects.requireNonNull(testClass, "--test-class is required");
this.integTestClass = integTestClass;
}
public void check(Path rootPath, boolean skipTestsInDisguised) throws IOException {
Files.walkFileTree(rootPath, new FileVisitor<Path>() {
/**
* The package name of the directory we are currently visiting. Kept as a string rather than something fancy because we load
* just about every class and doing so requires building a string out of it anyway. At least this way we don't need to build the
* first part of the string over and over and over again.
*/
private String packageName;
public void checkTests(Path rootPath, boolean skipTestsInDisguised) throws IOException {
Files.walkFileTree(rootPath, new TestClassVisitor() {
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
// First we visit the root directory
if (packageName == null) {
// And it package is empty string regardless of the directory name
packageName = "";
} else {
packageName += dir.getFileName() + ".";
protected void visitTestClass(Class<?> clazz) {
if (skipTestsInDisguised == false && integTestClass.isAssignableFrom(clazz)) {
integTestsInDisguise.add(clazz);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
// Go up one package by jumping back to the second to last '.'
packageName = packageName.substring(0, 1 + packageName.lastIndexOf('.', packageName.length() - 2));
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String filename = file.getFileName().toString();
if (filename.endsWith(".class")) {
String className = filename.substring(0, filename.length() - ".class".length());
Class<?> clazz = loadClassWithoutInitializing(packageName + className);
if (clazz.getName().endsWith("Tests")) {
if (skipTestsInDisguised == false && integTestClass.isAssignableFrom(clazz)) {
integTestsInDisguise.add(clazz);
}
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
notRunnable.add(clazz);
} else if (isTestCase(clazz) == false) {
notImplementing.add(clazz);
} else if (Modifier.isStatic(clazz.getModifiers())) {
innerClasses.add(clazz);
}
} else if (clazz.getName().endsWith("IT")) {
if (isTestCase(clazz) == false) {
notImplementing.add(clazz);
}
} else if (Modifier.isAbstract(clazz.getModifiers()) == false && Modifier.isInterface(clazz.getModifiers()) == false) {
if (isTestCase(clazz)) {
missingSuffix.add(clazz);
} else if (junit.framework.Test.class.isAssignableFrom(clazz)) {
pureUnitTest.add(clazz);
}
}
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
notRunnable.add(clazz);
} else if (isTestCase(clazz) == false) {
notImplementing.add(clazz);
} else if (Modifier.isStatic(clazz.getModifiers())) {
innerClasses.add(clazz);
}
return FileVisitResult.CONTINUE;
}
private boolean isTestCase(Class<?> clazz) {
return testClass.isAssignableFrom(clazz);
}
@Override
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
throw exc;
protected void visitIntegrationTestClass(Class<?> clazz) {
if (isTestCase(clazz) == false) {
notImplementing.add(clazz);
}
}
@Override
protected void visitOtherClass(Class<?> clazz) {
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
return;
}
if (isTestCase(clazz)) {
missingSuffix.add(clazz);
} else if (junit.framework.Test.class.isAssignableFrom(clazz)) {
pureUnitTest.add(clazz);
}
}
});
}
public void checkMain(Path rootPath) throws IOException {
Files.walkFileTree(rootPath, new TestClassVisitor() {
@Override
protected void visitTestClass(Class<?> clazz) {
testsInMain.add(clazz);
}
@Override
protected void visitIntegrationTestClass(Class<?> clazz) {
testsInMain.add(clazz);
}
@Override
protected void visitOtherClass(Class<?> clazz) {
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
return;
}
if (isTestCase(clazz)) {
testsInMain.add(clazz);
}
}
});
}
/**
* Fail the process if there are any violations in the set. Named to look like a junit assertion even though it isn't because it is
* similar enough.
@ -203,7 +215,7 @@ public class NamingConventionsCheck {
* similar enough.
*/
private static void assertViolation(String className, Set<Class<?>> set) {
className = "org.elasticsearch.test.NamingConventionsCheckBadClasses$" + className;
className = className.startsWith("org") ? className : "org.elasticsearch.test.NamingConventionsCheckBadClasses$" + className;
if (false == set.remove(loadClassWithoutInitializing(className))) {
System.err.println("Error in NamingConventionsCheck! Expected [" + className + "] to be a violation but wasn't.");
System.exit(1);
@ -229,4 +241,74 @@ public class NamingConventionsCheck {
throw new RuntimeException(e);
}
}
abstract class TestClassVisitor implements FileVisitor<Path> {
/**
* The package name of the directory we are currently visiting. Kept as a string rather than something fancy because we load
* just about every class and doing so requires building a string out of it anyway. At least this way we don't need to build the
* first part of the string over and over and over again.
*/
private String packageName;
/**
* Visit classes named like a test.
*/
protected abstract void visitTestClass(Class<?> clazz);
/**
* Visit classes named like an integration test.
*/
protected abstract void visitIntegrationTestClass(Class<?> clazz);
/**
* Visit classes not named like a test at all.
*/
protected abstract void visitOtherClass(Class<?> clazz);
@Override
public final FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
// First we visit the root directory
if (packageName == null) {
// And it package is empty string regardless of the directory name
packageName = "";
} else {
packageName += dir.getFileName() + ".";
}
return FileVisitResult.CONTINUE;
}
@Override
public final FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
// Go up one package by jumping back to the second to last '.'
packageName = packageName.substring(0, 1 + packageName.lastIndexOf('.', packageName.length() - 2));
return FileVisitResult.CONTINUE;
}
@Override
public final FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String filename = file.getFileName().toString();
if (filename.endsWith(".class")) {
String className = filename.substring(0, filename.length() - ".class".length());
Class<?> clazz = loadClassWithoutInitializing(packageName + className);
if (clazz.getName().endsWith("Tests")) {
visitTestClass(clazz);
} else if (clazz.getName().endsWith("IT")) {
visitIntegrationTestClass(clazz);
} else {
visitOtherClass(clazz);
}
}
return FileVisitResult.CONTINUE;
}
/**
* Is this class a test case?
*/
protected boolean isTestCase(Class<?> clazz) {
return testClass.isAssignableFrom(clazz);
}
@Override
public final FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
throw exc;
}
}
}

View File

@ -0,0 +1,26 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
/**
* This class should fail the naming conventions self test.
*/
public class NamingConventionsCheckInMainIT {
}

View File

@ -0,0 +1,26 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.test;
/**
* This class should fail the naming conventions self test.
*/
public class NamingConventionsCheckInMainTests {
}

View File

@ -39,7 +39,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.RandomObjects;
@ -287,7 +286,7 @@ public class RequestTests extends ESTestCase {
expectedParams.put("doc_as_upsert", "true");
}
} else {
updateRequest.script(new Script("_value + 1"));
updateRequest.script(mockScript("_value + 1"));
updateRequest.scriptedUpsert(randomBoolean());
}
if (randomBoolean()) {
@ -520,7 +519,7 @@ public class RequestTests extends ESTestCase {
{
BulkRequest bulkRequest = new BulkRequest();
bulkRequest.add(new DeleteRequest("index", "type", "0"));
bulkRequest.add(new UpdateRequest("index", "type", "1").script(new Script("test")));
bulkRequest.add(new UpdateRequest("index", "type", "1").script(mockScript("test")));
bulkRequest.add(new DeleteRequest("index", "type", "2"));
Request request = Request.bulk(bulkRequest);

View File

@ -196,7 +196,7 @@ public class Version implements Comparable<Version> {
if (snapshot = version.endsWith("-SNAPSHOT")) {
version = version.substring(0, version.length() - 9);
}
String[] parts = version.split("\\.|\\-");
String[] parts = version.split("[.-]");
if (parts.length < 3 || parts.length > 4) {
throw new IllegalArgumentException(
"the version needs to contain major, minor, and revision, and optionally the build: " + version);

View File

@ -28,7 +28,10 @@ import java.io.IOException;
/**
* Used to keep track of original indices within internal (e.g. shard level) requests
*/
public class OriginalIndices implements IndicesRequest {
public final class OriginalIndices implements IndicesRequest {
//constant to use when original indices are not applicable and will not be serialized across the wire
public static final OriginalIndices NONE = new OriginalIndices(null, null);
private final String[] indices;
private final IndicesOptions indicesOptions;
@ -39,7 +42,6 @@ public class OriginalIndices implements IndicesRequest {
public OriginalIndices(String[] indices, IndicesOptions indicesOptions) {
this.indices = indices;
assert indicesOptions != null;
this.indicesOptions = indicesOptions;
}
@ -57,8 +59,8 @@ public class OriginalIndices implements IndicesRequest {
return new OriginalIndices(in.readStringArray(), IndicesOptions.readIndicesOptions(in));
}
public static void writeOriginalIndices(OriginalIndices originalIndices, StreamOutput out) throws IOException {
assert originalIndices != NONE;
out.writeStringArrayNullable(originalIndices.indices);
originalIndices.indicesOptions.writeIndicesOptions(out);
}

View File

@ -88,7 +88,7 @@ public class TransportClusterSearchShardsAction extends
}
Set<String> nodeIds = new HashSet<>();
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices,
GroupShardsIterator<ShardIterator> groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices,
routingMap, request.preference());
ShardRouting shard;
ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()];

View File

@ -29,7 +29,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray;
@ -75,8 +74,9 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
Function<String, Transport.Connection> nodeIdToConnection,
Map<String, AliasFilter> aliasFilter, Map<String, Float> concreteIndexBoosts,
Executor executor, SearchRequest request,
ActionListener<SearchResponse> listener, GroupShardsIterator shardsIts, TransportSearchAction.SearchTimeProvider timeProvider,
long clusterStateVersion, SearchTask task, SearchPhaseResults<Result> resultConsumer) {
ActionListener<SearchResponse> listener, GroupShardsIterator<SearchShardIterator> shardsIts,
TransportSearchAction.SearchTimeProvider timeProvider, long clusterStateVersion,
SearchTask task, SearchPhaseResults<Result> resultConsumer) {
super(name, request, shardsIts, logger);
this.timeProvider = timeProvider;
this.logger = logger;
@ -209,8 +209,9 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
private void raisePhaseFailure(SearchPhaseExecutionException exception) {
results.getSuccessfulResults().forEach((entry) -> {
try {
Transport.Connection connection = nodeIdToConnection.apply(entry.getSearchShardTarget().getNodeId());
sendReleaseSearchContext(entry.getRequestId(), connection);
SearchShardTarget searchShardTarget = entry.getSearchShardTarget();
Transport.Connection connection = nodeIdToConnection.apply(searchShardTarget.getNodeId());
sendReleaseSearchContext(entry.getRequestId(), connection, searchShardTarget.getOriginalIndices());
} catch (Exception inner) {
inner.addSuppressed(exception);
logger.trace("failed to release context", inner);
@ -296,11 +297,11 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
listener.onFailure(e);
}
public final ShardSearchTransportRequest buildShardSearchRequest(ShardIterator shardIt, ShardRouting shard) {
public final ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt, ShardRouting shard) {
AliasFilter filter = aliasFilter.get(shard.index().getUUID());
assert filter != null;
float indexBoost = concreteIndexBoosts.getOrDefault(shard.index().getUUID(), DEFAULT_INDEX_BOOST);
return new ShardSearchTransportRequest(request, shardIt.shardId(), getNumShards(),
return new ShardSearchTransportRequest(shardIt.getOriginalIndices(), request, shardIt.shardId(), getNumShards(),
filter, indexBoost, timeProvider.getAbsoluteStartMillis());
}

View File

@ -73,7 +73,8 @@ final class DfsQueryPhase extends SearchPhase {
for (final DfsSearchResult dfsResult : resultList) {
final SearchShardTarget searchShardTarget = dfsResult.getSearchShardTarget();
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
QuerySearchRequest querySearchRequest = new QuerySearchRequest(context.getRequest(), dfsResult.getRequestId(), dfs);
QuerySearchRequest querySearchRequest = new QuerySearchRequest(searchShardTarget.getOriginalIndices(),
dfsResult.getRequestId(), dfs);
final int shardIndex = dfsResult.getShardIndex();
searchTransportService.sendExecuteQuery(connection, querySearchRequest, context.getTask(),
new SearchActionListener<QuerySearchResult>(searchShardTarget, shardIndex) {
@ -95,7 +96,7 @@ final class DfsQueryPhase extends SearchPhase {
// the query might not have been executed at all (for example because thread pool rejected
// execution) and the search context that was created in dfs phase might not be released.
// release it again to be in the safe side
context.sendReleaseSearchContext(querySearchRequest.id(), connection);
context.sendReleaseSearchContext(querySearchRequest.id(), connection, searchShardTarget.getOriginalIndices());
}
}
});

View File

@ -24,6 +24,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget;
@ -73,7 +74,6 @@ final class FetchSearchPhase extends SearchPhase {
this.context = context;
this.logger = context.getLogger();
this.resultConsumer = resultConsumer;
}
@Override
@ -112,7 +112,7 @@ final class FetchSearchPhase extends SearchPhase {
final IntArrayList[] docIdsToLoad = searchPhaseController.fillDocIdsToLoad(numShards, reducedQueryPhase.scoreDocs);
if (reducedQueryPhase.scoreDocs.length == 0) { // no docs to fetch -- sidestep everything and return
phaseResults.stream()
.map(e -> e.queryResult())
.map(SearchPhaseResult::queryResult)
.forEach(this::releaseIrrelevantSearchContext); // we have to release contexts here to free up resources
finishPhase.run();
} else {
@ -135,10 +135,11 @@ final class FetchSearchPhase extends SearchPhase {
// in any case we count down this result since we don't talk to this shard anymore
counter.countDown();
} else {
Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId());
SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget();
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult().getRequestId(), i, entry,
lastEmittedDocPerShard);
executeFetch(i, queryResult.getSearchShardTarget(), counter, fetchSearchRequest, queryResult.queryResult(),
lastEmittedDocPerShard, searchShardTarget.getOriginalIndices());
executeFetch(i, searchShardTarget, counter, fetchSearchRequest, queryResult.queryResult(),
connection);
}
}
@ -147,9 +148,9 @@ final class FetchSearchPhase extends SearchPhase {
}
protected ShardFetchSearchRequest createFetchRequest(long queryId, int index, IntArrayList entry,
ScoreDoc[] lastEmittedDocPerShard) {
ScoreDoc[] lastEmittedDocPerShard, OriginalIndices originalIndices) {
final ScoreDoc lastEmittedDoc = (lastEmittedDocPerShard != null) ? lastEmittedDocPerShard[index] : null;
return new ShardFetchSearchRequest(context.getRequest(), queryId, entry, lastEmittedDoc);
return new ShardFetchSearchRequest(originalIndices, queryId, entry, lastEmittedDoc);
}
private void executeFetch(final int shardIndex, final SearchShardTarget shardTarget,
@ -189,8 +190,9 @@ final class FetchSearchPhase extends SearchPhase {
// and if it has at lease one hit that didn't make it to the global topDocs
if (context.getRequest().scroll() == null && queryResult.hasSearchContext()) {
try {
Transport.Connection connection = context.getConnection(queryResult.getSearchShardTarget().getNodeId());
context.sendReleaseSearchContext(queryResult.getRequestId(), connection);
SearchShardTarget searchShardTarget = queryResult.getSearchShardTarget();
Transport.Connection connection = context.getConnection(searchShardTarget.getNodeId());
context.sendReleaseSearchContext(queryResult.getRequestId(), connection, searchShardTarget.getOriginalIndices());
} catch (Exception e) {
context.getLogger().trace("failed to release context", e);
}

View File

@ -46,12 +46,12 @@ import java.util.stream.Stream;
*/
abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends SearchPhase {
private final SearchRequest request;
private final GroupShardsIterator shardsIts;
private final GroupShardsIterator<SearchShardIterator> shardsIts;
private final Logger logger;
private final int expectedTotalOps;
private final AtomicInteger totalOps = new AtomicInteger();
InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator shardsIts, Logger logger) {
InitialSearchPhase(String name, SearchRequest request, GroupShardsIterator<SearchShardIterator> shardsIts, Logger logger) {
super(name);
this.request = request;
this.shardsIts = shardsIts;
@ -64,10 +64,10 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
}
private void onShardFailure(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
final ShardIterator shardIt, Exception e) {
final SearchShardIterator shardIt, Exception e) {
// we always add the shard failure for a specific shard instance
// we do make sure to clean it on a successful response from a shard
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId(), shardIt.getOriginalIndices());
onShardFailure(shardIndex, shardTarget, e);
if (totalOps.incrementAndGet() == expectedTotalOps) {
@ -124,7 +124,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
@Override
public final void run() throws IOException {
int shardIndex = -1;
for (final ShardIterator shardIt : shardsIts) {
for (final SearchShardIterator shardIt : shardsIts) {
shardIndex++;
final ShardRouting shard = shardIt.nextOrNull();
if (shard != null) {
@ -136,7 +136,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
}
}
private void performPhaseOnShard(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
private void performPhaseOnShard(final int shardIndex, final SearchShardIterator shardIt, final ShardRouting shard) {
if (shard == null) {
// TODO upgrade this to an assert...
// no more active shards... (we should not really get here, but just for safety)
@ -144,7 +144,7 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
} else {
try {
executePhaseOnShard(shardIt, shard, new SearchActionListener<FirstResult>(new SearchShardTarget(shard.currentNodeId(),
shardIt.shardId()), shardIndex) {
shardIt.shardId(), shardIt.getOriginalIndices()), shardIndex) {
@Override
public void innerOnResponse(FirstResult result) {
onShardResult(result, shardIt);
@ -213,7 +213,8 @@ abstract class InitialSearchPhase<FirstResult extends SearchPhaseResult> extends
* @param shard the shard routing to send the request for
* @param listener the listener to notify on response
*/
protected abstract void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, SearchActionListener<FirstResult> listener);
protected abstract void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard,
SearchActionListener<FirstResult> listener);
/**
* This class acts as a basic result collection that can be extended to do on-the-fly reduction or result processing

View File

@ -162,7 +162,7 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
/**
* Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end.
*/
public void fetchSearchShards(SearchRequest searchRequest, final List<String> indices,
public void fetchSearchShards(SearchRequest searchRequest, final String[] indices,
ActionListener<ClusterSearchShardsResponse> listener) {
if (connectedNodes.isEmpty()) {
// just in case if we are not connected for some reason we try to connect and if we fail we have to notify the listener
@ -176,10 +176,10 @@ final class RemoteClusterConnection extends AbstractComponent implements Transpo
}
}
private void fetchShardsInternal(SearchRequest searchRequest, List<String> indices,
private void fetchShardsInternal(SearchRequest searchRequest, String[] indices,
final ActionListener<ClusterSearchShardsResponse> listener) {
final DiscoveryNode node = nodeSupplier.get();
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices.toArray(new String[indices.size()]))
ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices)
.indicesOptions(searchRequest.indicesOptions()).local(true).preference(searchRequest.preference())
.routing(searchRequest.routing());
transportService.sendRequest(node, ClusterSearchShardsAction.NAME, searchShardsRequest,

View File

@ -22,14 +22,13 @@ import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
import org.elasticsearch.action.support.GroupedActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.metadata.ClusterNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
@ -243,18 +242,18 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
return remoteClusters.containsKey(clusterName);
}
void collectSearchShards(SearchRequest searchRequest, Map<String, List<String>> remoteIndicesByCluster,
void collectSearchShards(SearchRequest searchRequest, Map<String, OriginalIndices> remoteIndicesByCluster,
ActionListener<Map<String, ClusterSearchShardsResponse>> listener) {
final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size());
final Map<String, ClusterSearchShardsResponse> searchShardsResponses = new ConcurrentHashMap<>();
final AtomicReference<TransportException> transportException = new AtomicReference<>();
for (Map.Entry<String, List<String>> entry : remoteIndicesByCluster.entrySet()) {
for (Map.Entry<String, OriginalIndices> entry : remoteIndicesByCluster.entrySet()) {
final String clusterName = entry.getKey();
RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName);
if (remoteClusterConnection == null) {
throw new IllegalArgumentException("no such remote cluster: " + clusterName);
}
final List<String> indices = entry.getValue();
final String[] indices = entry.getValue().indices();
remoteClusterConnection.fetchSearchShards(searchRequest, indices,
new ActionListener<ClusterSearchShardsResponse>() {
@Override
@ -288,16 +287,16 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
}
}
Function<String, Transport.Connection> processRemoteShards(Map<String, ClusterSearchShardsResponse> searchShardsResponses,
List<ShardIterator> remoteShardIterators,
Map<String, AliasFilter> aliasFilterMap) {
Map<String, OriginalIndices> remoteIndicesByCluster,
List<SearchShardIterator> remoteShardIterators,
Map<String, AliasFilter> aliasFilterMap) {
Map<String, Supplier<Transport.Connection>> nodeToCluster = new HashMap<>();
for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
String clusterName = entry.getKey();
String clusterAlias = entry.getKey();
ClusterSearchShardsResponse searchShardsResponse = entry.getValue();
for (DiscoveryNode remoteNode : searchShardsResponse.getNodes()) {
nodeToCluster.put(remoteNode.getId(), () -> getConnection(remoteNode, clusterName));
nodeToCluster.put(remoteNode.getId(), () -> getConnection(remoteNode, clusterAlias));
}
Map<String, AliasFilter> indicesAndFilters = searchShardsResponse.getIndicesAndFilters();
for (ClusterSearchShardsGroup clusterSearchShardsGroup : searchShardsResponse.getGroups()) {
@ -305,9 +304,11 @@ public final class RemoteClusterService extends AbstractComponent implements Clo
//this ends up in the hits returned with the search response
ShardId shardId = clusterSearchShardsGroup.getShardId();
Index remoteIndex = shardId.getIndex();
Index index = new Index(clusterName + REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), remoteIndex.getUUID());
ShardIterator shardIterator = new PlainShardIterator(new ShardId(index, shardId.getId()),
Arrays.asList(clusterSearchShardsGroup.getShards()));
Index index = new Index(clusterAlias + REMOTE_CLUSTER_INDEX_SEPARATOR + remoteIndex.getName(), remoteIndex.getUUID());
OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias);
assert originalIndices != null;
SearchShardIterator shardIterator = new SearchShardIterator(new ShardId(index, shardId.getId()),
Arrays.asList(clusterSearchShardsGroup.getShards()), originalIndices);
remoteShardIterators.add(shardIterator);
AliasFilter aliasFilter;
if (indicesAndFilters == null) {

View File

@ -49,5 +49,4 @@ abstract class SearchActionListener<T extends SearchPhaseResult> implements Acti
}
protected abstract void innerOnResponse(T response);
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.internal.AliasFilter;
@ -46,7 +45,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
final Executor executor,
final SearchRequest request,
final ActionListener<SearchResponse> listener,
final GroupShardsIterator shardsIts,
final GroupShardsIterator<SearchShardIterator> shardsIts,
final TransportSearchAction.SearchTimeProvider timeProvider,
final long clusterStateVersion,
final SearchTask task) {
@ -70,7 +69,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction
@Override
protected void executePhaseOnShard(
final ShardIterator shardIt,
final SearchShardIterator shardIt,
final ShardRouting shard,
final SearchActionListener<DfsSearchResult> listener) {
getSearchTransport().sendExecuteDfs(getConnection(shard.currentNodeId()),

View File

@ -20,7 +20,7 @@ package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.search.SearchShardTarget;
@ -97,16 +97,16 @@ interface SearchPhaseContext extends ActionListener<SearchResponse>, Executor {
* @see org.elasticsearch.search.fetch.FetchSearchResult#getRequestId()
*
*/
default void sendReleaseSearchContext(long contextId, Transport.Connection connection) {
default void sendReleaseSearchContext(long contextId, Transport.Connection connection, OriginalIndices originalIndices) {
if (connection != null) {
getSearchTransport().sendFreeContext(connection, contextId, getRequest());
getSearchTransport().sendFreeContext(connection, contextId, originalIndices);
}
}
/**
* Builds an request for the initial search phase.
*/
ShardSearchTransportRequest buildShardSearchRequest(ShardIterator shardIt, ShardRouting shard);
ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt, ShardRouting shard);
/**
* Processes the phase transition from on phase to another. This method handles all errors that happen during the initial run execution

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.internal.AliasFilter;
@ -32,8 +31,7 @@ import java.util.Map;
import java.util.concurrent.Executor;
import java.util.function.Function;
final class SearchQueryThenFetchAsyncAction
extends AbstractSearchAsyncAction<SearchPhaseResult> {
final class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<SearchPhaseResult> {
private final SearchPhaseController searchPhaseController;
@ -47,7 +45,7 @@ final class SearchQueryThenFetchAsyncAction
final Executor executor,
final SearchRequest request,
final ActionListener<SearchResponse> listener,
final GroupShardsIterator shardsIts,
final GroupShardsIterator<SearchShardIterator> shardsIts,
final TransportSearchAction.SearchTimeProvider timeProvider,
long clusterStateVersion,
SearchTask task) {
@ -70,7 +68,7 @@ final class SearchQueryThenFetchAsyncAction
}
protected void executePhaseOnShard(
final ShardIterator shardIt,
final SearchShardIterator shardIt,
final ShardRouting shard,
final SearchActionListener<SearchPhaseResult> listener) {
getSearchTransport().sendExecuteQuery(

View File

@ -0,0 +1,55 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.search;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.index.shard.ShardId;
import java.util.List;
/**
* Extension of {@link PlainShardIterator} used in the search api, which also holds the {@link OriginalIndices}
* of the search request. Useful especially with cross cluster search, as each cluster has its own set of original indices.
*/
public final class SearchShardIterator extends PlainShardIterator {
private final OriginalIndices originalIndices;
/**
* Creates a {@link PlainShardIterator} instance that iterates over a subset of the given shards
* this the a given <code>shardId</code>.
*
* @param shardId shard id of the group
* @param shards shards to iterate
*/
public SearchShardIterator(ShardId shardId, List<ShardRouting> shards, OriginalIndices originalIndices) {
super(shardId, shards);
this.originalIndices = originalIndices;
}
/**
* Returns the original indices associated with this shard iterator, specifically with the cluster that this shard belongs to.
*/
public OriginalIndices getOriginalIndices() {
return originalIndices;
}
}

View File

@ -92,8 +92,8 @@ public class SearchTransportService extends AbstractLifecycleComponent {
}
}
public void sendFreeContext(Transport.Connection connection, final long contextId, SearchRequest request) {
transportService.sendRequest(connection, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(request, contextId),
public void sendFreeContext(Transport.Connection connection, final long contextId, OriginalIndices originalIndices) {
transportService.sendRequest(connection, FREE_CONTEXT_ACTION_NAME, new SearchFreeContextRequest(originalIndices, contextId),
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(new ActionListener<SearchFreeContextResponse>() {
@Override
public void onResponse(SearchFreeContextResponse response) {
@ -219,9 +219,9 @@ public class SearchTransportService extends AbstractLifecycleComponent {
SearchFreeContextRequest() {
}
SearchFreeContextRequest(SearchRequest request, long id) {
SearchFreeContextRequest(OriginalIndices originalIndices, long id) {
super(id);
this.originalIndices = new OriginalIndices(request);
this.originalIndices = originalIndices;
}
@Override

View File

@ -21,6 +21,7 @@ package org.elasticsearch.action.search;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
@ -212,7 +213,8 @@ public class ShardSearchFailure implements ShardOperationFailedException {
}
}
return new ShardSearchFailure(exception,
new SearchShardTarget(nodeId, new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId)));
new SearchShardTarget(nodeId,
new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), shardId), OriginalIndices.NONE));
}
@Override

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterState;
@ -162,10 +163,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
long getRelativeCurrentNanos() {
return relativeCurrentNanosProvider.getAsLong();
}
}
@Override
protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
final long absoluteStartMillis = System.currentTimeMillis();
@ -173,17 +172,27 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
final SearchTimeProvider timeProvider =
new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime);
final String[] localIndices;
final Map<String, List<String>> remoteClusterIndices;
final OriginalIndices localIndices;
final Map<String, OriginalIndices> remoteClusterIndices;
final ClusterState clusterState = clusterService.state();
if (remoteClusterService.isCrossClusterSearchEnabled()) {
remoteClusterIndices = remoteClusterService.groupClusterIndices( searchRequest.indices(), // empty string is not allowed
final Map<String, List<String>> groupedIndices = remoteClusterService.groupClusterIndices(searchRequest.indices(),
// empty string is not allowed
idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
List<String> remove = remoteClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY);
localIndices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]);
List<String> remove = groupedIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY);
String[] indices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]);
localIndices = new OriginalIndices(indices, searchRequest.indicesOptions());
Map<String, OriginalIndices> originalIndicesMap = new HashMap<>();
for (Map.Entry<String, List<String>> entry : groupedIndices.entrySet()) {
String clusterAlias = entry.getKey();
List<String> originalIndices = entry.getValue();
originalIndicesMap.put(clusterAlias,
new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), searchRequest.indicesOptions()));
}
remoteClusterIndices = Collections.unmodifiableMap(originalIndicesMap);
} else {
remoteClusterIndices = Collections.emptyMap();
localIndices = searchRequest.indices();
localIndices = new OriginalIndices(searchRequest);
}
if (remoteClusterIndices.isEmpty()) {
@ -192,18 +201,18 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
} else {
remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices,
ActionListener.wrap((searchShardsResponses) -> {
List<ShardIterator> remoteShardIterators = new ArrayList<>();
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
Function<String, Transport.Connection> connectionFunction = remoteClusterService.processRemoteShards(
searchShardsResponses, remoteShardIterators, remoteAliasFilters);
searchShardsResponses, remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteShardIterators,
connectionFunction, clusterState, remoteAliasFilters, listener);
}, listener::onFailure));
}
}
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, String[] localIndices,
List<ShardIterator> remoteShardIterators, Function<String, Transport.Connection> remoteConnections,
private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices,
List<SearchShardIterator> remoteShardIterators, Function<String, Transport.Connection> remoteConnections,
ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap,
ActionListener<SearchResponse> listener) {
@ -212,11 +221,11 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
final Index[] indices;
if (localIndices.length == 0 && remoteShardIterators.size() > 0) {
if (localIndices.indices().length == 0 && remoteShardIterators.size() > 0) {
indices = Index.EMPTY_ARRAY; // don't search on _all if only remote indices were specified
} else {
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(),
timeProvider.getAbsoluteStartMillis(), localIndices);
timeProvider.getAbsoluteStartMillis(), localIndices.indices());
}
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(),
@ -225,9 +234,9 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
for (int i = 0; i < indices.length; i++) {
concreteIndices[i] = indices[i].getName();
}
GroupShardsIterator localShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap,
GroupShardsIterator<ShardIterator> localShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap,
searchRequest.preference());
GroupShardsIterator shardIterators = mergeShardsIterators(localShardsIterator, remoteShardIterators);
GroupShardsIterator<SearchShardIterator> shardIterators = mergeShardsIterators(localShardsIterator, localIndices, remoteShardIterators);
failIfOverShardCountLimit(clusterService, shardIterators.size());
@ -268,19 +277,17 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
}
private static GroupShardsIterator mergeShardsIterators(GroupShardsIterator localShardsIterator,
List<ShardIterator> remoteShardIterators) {
if (remoteShardIterators.isEmpty()) {
return localShardsIterator;
}
List<ShardIterator> shards = new ArrayList<>();
for (ShardIterator shardIterator : remoteShardIterators) {
static GroupShardsIterator<SearchShardIterator> mergeShardsIterators(GroupShardsIterator<ShardIterator> localShardsIterator,
OriginalIndices localIndices,
List<SearchShardIterator> remoteShardIterators) {
List<SearchShardIterator> shards = new ArrayList<>();
for (SearchShardIterator shardIterator : remoteShardIterators) {
shards.add(shardIterator);
}
for (ShardIterator shardIterator : localShardsIterator) {
shards.add(shardIterator);
shards.add(new SearchShardIterator(shardIterator.shardId(), shardIterator.getShardRoutings(), localIndices));
}
return new GroupShardsIterator(shards);
return new GroupShardsIterator<>(shards);
}
@Override
@ -288,7 +295,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
throw new UnsupportedOperationException("the task parameter is required");
}
private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest, GroupShardsIterator shardIterators,
private AbstractSearchAsyncAction searchAsyncAction(SearchTask task, SearchRequest searchRequest,
GroupShardsIterator<SearchShardIterator> shardIterators,
SearchTimeProvider timeProvider, Function<String, Transport.Connection> connectionLookup,
long clusterStateVersion, Map<String, AliasFilter> aliasFilter,
Map<String, Float> concreteIndexBoosts,

View File

@ -94,7 +94,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
* Determines the shards this operation will be executed on. The operation is executed once per shard iterator, typically
* on the first shard in it. If the operation fails, it will be retried on the next shard in the iterator.
*/
protected abstract GroupShardsIterator shards(ClusterState clusterState, Request request, String[] concreteIndices);
protected abstract GroupShardsIterator<ShardIterator> shards(ClusterState clusterState, Request request, String[] concreteIndices);
protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
@ -107,7 +107,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
private final ActionListener<Response> listener;
private final ClusterState clusterState;
private final DiscoveryNodes nodes;
private final GroupShardsIterator shardsIts;
private final GroupShardsIterator<ShardIterator> shardsIts;
private final int expectedOps;
private final AtomicInteger counterOps = new AtomicInteger();
private final AtomicReferenceArray shardsResponses;

View File

@ -270,7 +270,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
nodeIds = new HashMap<>();
for (ShardRouting shard : shardIt.asUnordered()) {
for (ShardRouting shard : shardIt) {
// send a request to the shard only if it is assigned to a node that is in the local node's cluster state
// a scenario in which a shard can be assigned but to a node that is not in the local node's cluster state
// is when the shard is assigned to the master node, the local node has detected the master as failed

View File

@ -58,7 +58,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
protected ShardIterator shards(ClusterState state, InternalRequest request) {
if (request.request().doc() != null && request.request().routing() == null) {
// artificial document without routing specified, ignore its "id" and use either random shard or according to preference
GroupShardsIterator groupShardsIter = clusterService.operationRouting().searchShards(state,
GroupShardsIterator<ShardIterator> groupShardsIter = clusterService.operationRouting().searchShards(state,
new String[] { request.concreteIndex() }, null, request.request().preference());
return groupShardsIter.iterator().next();
}

View File

@ -41,8 +41,6 @@ import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -62,7 +60,6 @@ import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.text.ParseException;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
@ -440,12 +437,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContent {
return mappings.get(mappingType);
}
public static final Setting<String> INDEX_SHRINK_SOURCE_UUID = Setting.simpleString("index.shrink.source.uuid");
public static final Setting<String> INDEX_SHRINK_SOURCE_NAME = Setting.simpleString("index.shrink.source.name");
public static final String INDEX_SHRINK_SOURCE_UUID_KEY = "index.shrink.source.uuid";
public static final String INDEX_SHRINK_SOURCE_NAME_KEY = "index.shrink.source.name";
public static final Setting<String> INDEX_SHRINK_SOURCE_UUID = Setting.simpleString(INDEX_SHRINK_SOURCE_UUID_KEY);
public static final Setting<String> INDEX_SHRINK_SOURCE_NAME = Setting.simpleString(INDEX_SHRINK_SOURCE_NAME_KEY);
public Index getMergeSourceIndex() {
return INDEX_SHRINK_SOURCE_UUID.exists(settings) ? new Index(INDEX_SHRINK_SOURCE_NAME.get(settings), INDEX_SHRINK_SOURCE_UUID.get(settings)) : null;
return INDEX_SHRINK_SOURCE_UUID.exists(settings) ? new Index(INDEX_SHRINK_SOURCE_NAME.get(settings), INDEX_SHRINK_SOURCE_UUID.get(settings)) : null;
}
/**

View File

@ -598,7 +598,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
indexSettingsBuilder
// we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away
// once we are allocated.
.put("index.routing.allocation.initial_recovery._id",
.put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id",
Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray()))
// we only try once and then give up with a shrink index
.put("index.allocation.max_retries", 1)

View File

@ -198,7 +198,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
/** extract node roles from the given settings */
public static Set<Role> getRolesFromSettings(Settings settings) {
Set<Role> roles = new HashSet<>();
Set<Role> roles = EnumSet.noneOf(Role.class);
if (Node.NODE_INGEST_SETTING.get(settings)) {
roles.add(Role.INGEST);
}

View File

@ -30,14 +30,14 @@ import java.util.List;
* ShardsIterators are always returned in ascending order independently of their order at construction
* time. The incoming iterators are sorted to ensure consistent iteration behavior across Nodes / JVMs.
*/
public final class GroupShardsIterator implements Iterable<ShardIterator> {
public final class GroupShardsIterator<ShardIt extends ShardIterator> implements Iterable<ShardIt> {
private final List<ShardIterator> iterators;
private final List<ShardIt> iterators;
/**
* Constructs a enw GroupShardsIterator from the given list.
*/
public GroupShardsIterator(List<ShardIterator> iterators) {
public GroupShardsIterator(List<ShardIt> iterators) {
CollectionUtil.timSort(iterators);
this.iterators = iterators;
}
@ -60,7 +60,7 @@ public final class GroupShardsIterator implements Iterable<ShardIterator> {
*/
public int totalSizeWith1ForEmpty() {
int size = 0;
for (ShardIterator shard : iterators) {
for (ShardIt shard : iterators) {
size += Math.max(1, shard.size());
}
return size;
@ -75,7 +75,7 @@ public final class GroupShardsIterator implements Iterable<ShardIterator> {
}
@Override
public Iterator<ShardIterator> iterator() {
public Iterator<ShardIt> iterator() {
return iterators.iterator();
}
}

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.IntSet;
import com.carrotsearch.hppc.cursors.IntCursor;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.AbstractDiffable;
import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -139,8 +138,8 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
"allocation set " + inSyncAllocationIds);
}
if (shardRouting.primary() && shardRouting.initializing() && shardRouting.relocating() == false &&
RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) == false &&
if (shardRouting.primary() && shardRouting.initializing() &&
shardRouting.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
inSyncAllocationIds.contains(shardRouting.allocationId().getId()) == false)
throw new IllegalStateException("a primary shard routing " + shardRouting + " is a primary that is recovering from " +
"a known allocation id but has no corresponding entry in the in-sync " +
@ -265,37 +264,6 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
return new PlainShardsIterator(shuffler.shuffle(allActiveShards));
}
/**
* A group shards iterator where each group ({@link ShardIterator}
* is an iterator across shard replication group.
*/
public GroupShardsIterator groupByShardsIt() {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<>(shards.size());
for (IndexShardRoutingTable indexShard : this) {
set.add(indexShard.shardsIt());
}
return new GroupShardsIterator(set);
}
/**
* A groups shards iterator where each groups is a single {@link ShardRouting} and a group
* is created for each shard routing.
* <p>
* This basically means that components that use the {@link GroupShardsIterator} will iterate
* over *all* the shards (all the replicas) within the index.</p>
*/
public GroupShardsIterator groupByAllIt() {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<>();
for (IndexShardRoutingTable indexShard : this) {
for (ShardRouting shardRouting : indexShard) {
set.add(shardRouting.shardsIt());
}
}
return new GroupShardsIterator(set);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;

View File

@ -68,7 +68,7 @@ public class OperationRouting extends AbstractComponent {
return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference);
}
public GroupShardsIterator searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) {
public GroupShardsIterator<ShardIterator> searchShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map<String, Set<String>> routing, @Nullable String preference) {
final Set<IndexShardRoutingTable> shards = computeTargetedShards(clusterState, concreteIndices, routing);
final Set<ShardIterator> set = new HashSet<>(shards.size());
for (IndexShardRoutingTable shard : shards) {
@ -77,7 +77,7 @@ public class OperationRouting extends AbstractComponent {
set.add(iterator);
}
}
return new GroupShardsIterator(new ArrayList<>(set));
return new GroupShardsIterator<>(new ArrayList<>(set));
}
private static final Map<String, Set<String>> EMPTY_ROUTING = Collections.emptyMap();

View File

@ -43,7 +43,6 @@ public class PlainShardIterator extends PlainShardsIterator implements ShardIter
this.shardId = shardId;
}
@Override
public ShardId shardId() {
return this.shardId;

View File

@ -18,6 +18,8 @@
*/
package org.elasticsearch.cluster.routing;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
/**
@ -74,7 +76,12 @@ public class PlainShardsIterator implements ShardsIterator {
}
@Override
public Iterable<ShardRouting> asUnordered() {
return shards;
public List<ShardRouting> getShardRoutings() {
return Collections.unmodifiableList(shards);
}
@Override
public Iterator<ShardRouting> iterator() {
return shards.iterator();
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -29,7 +28,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.snapshots.Snapshot;
import java.io.IOException;
import java.util.EnumSet;
import java.util.Objects;
/**
@ -249,14 +247,4 @@ public abstract class RecoverySource implements Writeable, ToXContent {
return "peer recovery";
}
}
private static EnumSet<RecoverySource.Type> INITIAL_RECOVERY_TYPES = EnumSet.of(Type.EMPTY_STORE, Type.LOCAL_SHARDS, Type.SNAPSHOT);
/**
* returns true for recovery types that indicate that a primary is being allocated for the very first time.
* This recoveries can be controlled by {@link IndexMetaData#INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING}
*/
public static boolean isInitialRecovery(RecoverySource.Type type) {
return INITIAL_RECOVERY_TYPES.contains(type);
}
}

View File

@ -238,7 +238,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ACTIVE_PREDICATE);
}
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty) {
public GroupShardsIterator<ShardIterator> allAssignedShardsGrouped(String[] indices, boolean includeEmpty) {
return allAssignedShardsGrouped(indices, includeEmpty, false);
}
@ -249,14 +249,14 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
* @param includeRelocationTargets if true, an <b>extra</b> shard iterator will be added for relocating shards. The extra
* iterator contains a single ShardRouting pointing at the relocating target
*/
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) {
public GroupShardsIterator<ShardIterator> allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) {
return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ASSIGNED_PREDICATE);
}
private static Predicate<ShardRouting> ACTIVE_PREDICATE = shardRouting -> shardRouting.active();
private static Predicate<ShardRouting> ASSIGNED_PREDICATE = shardRouting -> shardRouting.assignedToNode();
private static Predicate<ShardRouting> ACTIVE_PREDICATE = ShardRouting::active;
private static Predicate<ShardRouting> ASSIGNED_PREDICATE = ShardRouting::assignedToNode;
private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate<ShardRouting> predicate) {
private GroupShardsIterator<ShardIterator> allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate<ShardRouting> predicate) {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<>();
for (String index : indices) {
@ -278,7 +278,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
}
}
}
return new GroupShardsIterator(set);
return new GroupShardsIterator<>(set);
}
public ShardsIterator allShards(String[] indices) {
@ -320,9 +320,8 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
* @param indices The indices to return all the shards (replicas)
* @return All the primary shards grouped into a single shard element group each
* @throws IndexNotFoundException If an index passed does not exists
* @see IndexRoutingTable#groupByAllIt()
*/
public GroupShardsIterator activePrimaryShardsGrouped(String[] indices, boolean includeEmpty) {
public GroupShardsIterator<ShardIterator> activePrimaryShardsGrouped(String[] indices, boolean includeEmpty) {
// use list here since we need to maintain identity across shards
ArrayList<ShardIterator> set = new ArrayList<>();
for (String index : indices) {
@ -339,7 +338,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
}
}
}
return new GroupShardsIterator(set);
return new GroupShardsIterator<>(set);
}
@Override

View File

@ -18,10 +18,12 @@
*/
package org.elasticsearch.cluster.routing;
import java.util.List;
/**
* Allows to iterate over unrelated shards.
*/
public interface ShardsIterator {
public interface ShardsIterator extends Iterable<ShardRouting> {
/**
* Resets the iterator to its initial state.
@ -60,6 +62,9 @@ public interface ShardsIterator {
@Override
boolean equals(Object other);
Iterable<ShardRouting> asUnordered();
/**
* Returns the {@link ShardRouting}s that this shards iterator holds.
*/
List<ShardRouting> getShardRoutings();
}

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -49,7 +49,7 @@ public class AllocateUnassignedDecision extends AbstractAllocationDecision {
*/
private static final Map<AllocationStatus, AllocateUnassignedDecision> CACHED_DECISIONS;
static {
Map<AllocationStatus, AllocateUnassignedDecision> cachedDecisions = new HashMap<>();
Map<AllocationStatus, AllocateUnassignedDecision> cachedDecisions = new EnumMap<>(AllocationStatus.class);
cachedDecisions.put(AllocationStatus.FETCHING_SHARD_DATA,
new AllocateUnassignedDecision(AllocationStatus.FETCHING_SHARD_DATA, null, null, null, false, 0L, 0L));
cachedDecisions.put(AllocationStatus.NO_VALID_SHARD_COPY,

View File

@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.EnumSet;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.IP_VALIDATOR;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
@ -75,6 +77,17 @@ public class FilterAllocationDecider extends AllocationDecider {
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.groupSetting(CLUSTER_ROUTING_EXCLUDE_GROUP_PREFIX + ".", IP_VALIDATOR, Property.Dynamic, Property.NodeScope);
/**
* The set of {@link RecoverySource.Type} values for which the
* {@link IndexMetaData#INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING} should apply.
* Note that we do not include the {@link RecoverySource.Type#SNAPSHOT} type here
* because if the snapshot is restored to a different cluster that does not contain
* the initial recovery node id, or to the same cluster where the initial recovery node
* id has been decommissioned, then the primary shards will never be allocated.
*/
static EnumSet<RecoverySource.Type> INITIAL_RECOVERY_TYPES =
EnumSet.of(RecoverySource.Type.EMPTY_STORE, RecoverySource.Type.LOCAL_SHARDS);
private volatile DiscoveryNodeFilters clusterRequireFilters;
private volatile DiscoveryNodeFilters clusterIncludeFilters;
private volatile DiscoveryNodeFilters clusterExcludeFilters;
@ -98,7 +111,7 @@ public class FilterAllocationDecider extends AllocationDecider {
IndexMetaData indexMd = allocation.metaData().getIndexSafe(shardRouting.index());
DiscoveryNodeFilters initialRecoveryFilters = indexMd.getInitialRecoveryFilters();
if (initialRecoveryFilters != null &&
RecoverySource.isInitialRecovery(shardRouting.recoverySource().getType()) &&
INITIAL_RECOVERY_TYPES.contains(shardRouting.recoverySource().getType()) &&
initialRecoveryFilters.match(node.node()) == false) {
String explanation = (shardRouting.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) ?
"initial allocation of the shrunken index is only allowed on nodes [%s] that hold a copy of every shard in the index" :

View File

@ -202,7 +202,9 @@ public abstract class StreamInput extends InputStream {
return i;
}
b = readByte();
assert (b & 0x80) == 0;
if ((b & 0x80) != 0) {
throw new IOException("Invalid vInt ((" + Integer.toHexString(b) + " & 0x7f) << 28) | " + Integer.toHexString(i));
}
return i | ((b & 0x7F) << 28);
}
@ -367,7 +369,7 @@ public abstract class StreamInput extends InputStream {
buffer[i] = ((char) ((c & 0x0F) << 12 | (readByte() & 0x3F) << 6 | (readByte() & 0x3F) << 0));
break;
default:
new AssertionError("unexpected character: " + c + " hex: " + Integer.toHexString(c));
throw new IOException("Invalid string; unexpected character: " + c + " hex: " + Integer.toHexString(c));
}
}
return spare.toString();
@ -808,7 +810,7 @@ public abstract class StreamInput extends InputStream {
case 17:
return (T) readStackTrace(new IOException(readOptionalString(), readException()), this);
default:
assert false : "no such exception for id: " + key;
throw new IOException("no such exception for id: " + key);
}
}
return null;

View File

@ -311,7 +311,7 @@ public class DeprecationLogger {
* @return the escaped string
*/
public static String escape(String s) {
return s.replaceAll("(\\\\|\")", "\\\\$1");
return s.replaceAll("([\"\\\\])", "\\\\$1");
}
}

View File

@ -191,9 +191,11 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
case IndexMetaData.SETTING_VERSION_UPGRADED:
case IndexMetaData.SETTING_INDEX_PROVIDED_NAME:
case MergePolicyConfig.INDEX_MERGE_ENABLED:
case IndexMetaData.INDEX_SHRINK_SOURCE_UUID_KEY:
case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY:
return true;
default:
return false;
return IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getRawKey().match(key);
}
}
}

View File

@ -21,12 +21,9 @@ package org.elasticsearch.common.settings;
import java.io.InputStream;
import java.security.GeneralSecurityException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Objects;
import java.util.EnumSet;
import java.util.Set;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.util.ArrayUtils;
@ -36,9 +33,7 @@ import org.elasticsearch.common.util.ArrayUtils;
* This class allows access to settings from the Elasticsearch keystore.
*/
public abstract class SecureSetting<T> extends Setting<T> {
private static final Set<Property> ALLOWED_PROPERTIES = new HashSet<>(
Arrays.asList(Property.Deprecated, Property.Shared)
);
private static final Set<Property> ALLOWED_PROPERTIES = EnumSet.of(Property.Deprecated, Property.Shared);
private static final Property[] FIXED_PROPERTIES = {
Property.NodeScope

View File

@ -31,6 +31,7 @@ import org.joda.time.format.PeriodFormatter;
import java.io.IOException;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
@ -48,7 +49,7 @@ public class TimeValue implements Writeable, Comparable<TimeValue> {
private static Map<Byte, TimeUnit> BYTE_TIME_UNIT_MAP;
static {
final Map<TimeUnit, Byte> timeUnitByteMap = new HashMap<>();
final Map<TimeUnit, Byte> timeUnitByteMap = new EnumMap<>(TimeUnit.class);
timeUnitByteMap.put(TimeUnit.NANOSECONDS, (byte)0);
timeUnitByteMap.put(TimeUnit.MICROSECONDS, (byte)1);
timeUnitByteMap.put(TimeUnit.MILLISECONDS, (byte)2);

View File

@ -25,5 +25,5 @@ public interface CharFilterFactory {
String name();
Reader create(Reader tokenStream);
Reader create(Reader reader);
}

View File

@ -121,8 +121,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
}
final IndexReader.CacheKey coreCacheReader = cacheHelper.getKey();
final ShardId shardId = ShardUtils.extractShardId(context.reader());
if (shardId != null // can't require it because of the percolator
&& indexSettings.getIndex().equals(shardId.getIndex()) == false) {
if (indexSettings.getIndex().equals(shardId.getIndex()) == false) {
// insanity
throw new IllegalStateException("Trying to load bit set for index " + shardId.getIndex()
+ " with cache of index " + indexSettings.getIndex());

View File

@ -30,6 +30,7 @@ import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.template.CompiledTemplate;
import java.util.function.LongSupplier;
@ -105,8 +106,7 @@ public class QueryRewriteContext {
}
public BytesReference getTemplateBytes(Script template) {
CompiledScript compiledTemplate = scriptService.compile(template, ScriptContext.Standard.SEARCH);
ExecutableScript executable = scriptService.executable(compiledTemplate, template.getParams());
return (BytesReference) executable.run();
CompiledTemplate compiledTemplate = scriptService.compileTemplate(template, ScriptContext.Standard.SEARCH);
return compiledTemplate.run(template.getParams());
}
}

View File

@ -20,6 +20,9 @@
package org.elasticsearch.index.query.functionscore;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import static java.util.Collections.emptyMap;
/**
* Static method aliases for constructors of known {@link ScoreFunctionBuilder}s.
@ -69,7 +72,7 @@ public class ScoreFunctionBuilders {
}
public static ScriptScoreFunctionBuilder scriptFunction(String script) {
return (new ScriptScoreFunctionBuilder(new Script(script)));
return (new ScriptScoreFunctionBuilder(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, script, emptyMap())));
}
public static RandomScoreFunctionBuilder randomFunction(int seed) {

View File

@ -69,7 +69,6 @@ import org.elasticsearch.index.analysis.GermanStemTokenFilterFactory;
import org.elasticsearch.index.analysis.GreekAnalyzerProvider;
import org.elasticsearch.index.analysis.HindiAnalyzerProvider;
import org.elasticsearch.index.analysis.HindiNormalizationFilterFactory;
import org.elasticsearch.index.analysis.HtmlStripCharFilterFactory;
import org.elasticsearch.index.analysis.HungarianAnalyzerProvider;
import org.elasticsearch.index.analysis.HunspellTokenFilterFactory;
import org.elasticsearch.index.analysis.IndicNormalizationFilterFactory;
@ -89,7 +88,6 @@ import org.elasticsearch.index.analysis.LimitTokenCountFilterFactory;
import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider;
import org.elasticsearch.index.analysis.LowerCaseTokenFilterFactory;
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
import org.elasticsearch.index.analysis.MinHashTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenFilterFactory;
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
@ -97,7 +95,6 @@ import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory;
import org.elasticsearch.index.analysis.PatternAnalyzerProvider;
import org.elasticsearch.index.analysis.PatternCaptureGroupTokenFilterFactory;
import org.elasticsearch.index.analysis.PatternReplaceCharFilterFactory;
import org.elasticsearch.index.analysis.PatternReplaceTokenFilterFactory;
import org.elasticsearch.index.analysis.PatternTokenizerFactory;
import org.elasticsearch.index.analysis.PersianAnalyzerProvider;
@ -146,6 +143,8 @@ import org.elasticsearch.plugins.AnalysisPlugin;
import java.io.IOException;
import java.util.List;
import static org.elasticsearch.plugins.AnalysisPlugin.requriesAnalysisSettings;
/**
* Sets up {@link AnalysisRegistry}.
*/
@ -184,9 +183,6 @@ public final class AnalysisModule {
private NamedRegistry<AnalysisProvider<CharFilterFactory>> setupCharFilters(List<AnalysisPlugin> plugins) {
NamedRegistry<AnalysisProvider<CharFilterFactory>> charFilters = new NamedRegistry<>("char_filter");
charFilters.register("html_strip", HtmlStripCharFilterFactory::new);
charFilters.register("pattern_replace", requriesAnalysisSettings(PatternReplaceCharFilterFactory::new));
charFilters.register("mapping", requriesAnalysisSettings(MappingCharFilterFactory::new));
charFilters.extractAndRegister(plugins, AnalysisPlugin::getCharFilters);
return charFilters;
}
@ -340,19 +336,6 @@ public final class AnalysisModule {
return normalizers;
}
private static <T> AnalysisModule.AnalysisProvider<T> requriesAnalysisSettings(AnalysisModule.AnalysisProvider<T> provider) {
return new AnalysisModule.AnalysisProvider<T>() {
@Override
public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
return provider.get(indexSettings, environment, name, settings);
}
@Override
public boolean requiresAnalysisSettings() {
return true;
}
};
}
/**
* The basic factory interface for analysis components.

View File

@ -31,6 +31,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Date;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -554,7 +555,7 @@ public final class IngestDocument {
* Metadata fields that used to be accessible as ordinary top level fields will be removed as part of this call.
*/
public Map<MetaData, String> extractMetadata() {
Map<MetaData, String> metadataMap = new HashMap<>();
Map<MetaData, String> metadataMap = new EnumMap<>(MetaData.class);
for (MetaData metaData : MetaData.values()) {
metadataMap.put(metaData, cast(metaData.getFieldName(), sourceAndMetadata.remove(metaData.getFieldName()), String.class));
}

View File

@ -19,16 +19,16 @@
package org.elasticsearch.ingest;
import java.util.Collections;
import java.util.Map;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptType;
import java.util.Collections;
import java.util.Map;
import org.elasticsearch.template.CompiledTemplate;
public class InternalTemplateService implements TemplateService {
@ -44,16 +44,11 @@ public class InternalTemplateService implements TemplateService {
int mustacheEnd = template.indexOf("}}");
if (mustacheStart != -1 && mustacheEnd != -1 && mustacheStart < mustacheEnd) {
Script script = new Script(ScriptType.INLINE, "mustache", template, Collections.emptyMap());
CompiledScript compiledScript = scriptService.compile(script, ScriptContext.Standard.INGEST);
CompiledTemplate compiledTemplate = scriptService.compileTemplate(script, ScriptContext.Standard.INGEST);
return new Template() {
@Override
public String execute(Map<String, Object> model) {
ExecutableScript executableScript = scriptService.executable(compiledScript, model);
Object result = executableScript.run();
if (result instanceof BytesReference) {
return ((BytesReference) result).utf8ToString();
}
return String.valueOf(result);
return compiledTemplate.run(model).utf8ToString();
}
@Override

View File

@ -23,12 +23,16 @@ import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.CharFilter;
import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.Tokenizer;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalyzerProvider;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.TokenizerFactory;
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
import java.io.IOException;
import java.util.Map;
import static java.util.Collections.emptyMap;
@ -52,28 +56,32 @@ import static java.util.Collections.emptyMap;
*/
public interface AnalysisPlugin {
/**
* Override to add additional {@link CharFilter}s.
* Override to add additional {@link CharFilter}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
return emptyMap();
}
/**
* Override to add additional {@link TokenFilter}s.
* Override to add additional {@link TokenFilter}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
return emptyMap();
}
/**
* Override to add additional {@link Tokenizer}s.
* Override to add additional {@link Tokenizer}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() {
return emptyMap();
}
/**
* Override to add additional {@link Analyzer}s.
* Override to add additional {@link Analyzer}s. See {@link #requriesAnalysisSettings(AnalysisProvider)}
* how to on get the configuration from the index.
*/
default Map<String, AnalysisProvider<AnalyzerProvider<? extends Analyzer>>> getAnalyzers() {
return emptyMap();
@ -85,4 +93,21 @@ public interface AnalysisPlugin {
default Map<String, org.apache.lucene.analysis.hunspell.Dictionary> getHunspellDictionaries() {
return emptyMap();
}
/**
* Mark an {@link AnalysisProvider} as requiring the index's settings.
*/
static <T> AnalysisProvider<T> requriesAnalysisSettings(AnalysisProvider<T> provider) {
return new AnalysisProvider<T>() {
@Override
public T get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
return provider.get(indexSettings, environment, name, settings);
}
@Override
public boolean requiresAnalysisSettings() {
return true;
}
};
}
}

View File

@ -78,10 +78,17 @@ public class RestClearScrollAction extends BaseRestHandler {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
} else if ("scroll_id".equals(currentFieldName)){
if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token.isValue() == false) {
throw new IllegalArgumentException("scroll_id array element should only contain scroll_id");
}
clearScrollRequest.addScrollId(parser.text());
}
} else {
if (token.isValue() == false) {
throw new IllegalArgumentException("scroll_id array element should only contain scroll_id");
throw new IllegalArgumentException("scroll_id element should only contain scroll_id");
}
clearScrollRequest.addScrollId(parser.text());
}

View File

@ -38,6 +38,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.breaker.CircuitBreakingException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.cache.Cache;
import org.elasticsearch.common.cache.CacheBuilder;
import org.elasticsearch.common.cache.RemovalListener;
@ -56,6 +57,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.env.Environment;
import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.template.CompiledTemplate;
import org.elasticsearch.watcher.FileChangesListener;
import org.elasticsearch.watcher.FileWatcher;
import org.elasticsearch.watcher.ResourceWatcherService;
@ -320,6 +322,12 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
}
}
/** Compiles a template. Note this will be moved to a separate TemplateService in the future. */
public CompiledTemplate compileTemplate(Script script, ScriptContext scriptContext) {
CompiledScript compiledScript = compile(script, scriptContext);
return params -> (BytesReference)executable(compiledScript, params).run();
}
/**
* Check whether there have been too many compilations within the last minute, throwing a circuit breaking exception if so.
* This is a variant of the token bucket algorithm: https://en.wikipedia.org/wiki/Token_bucket

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -35,7 +36,7 @@ public class ScriptSettings {
private static final Map<ScriptType, Setting<Boolean>> SCRIPT_TYPE_SETTING_MAP;
static {
Map<ScriptType, Setting<Boolean>> scriptTypeSettingMap = new HashMap<>();
Map<ScriptType, Setting<Boolean>> scriptTypeSettingMap = new EnumMap<>(ScriptType.class);
for (ScriptType scriptType : ScriptType.values()) {
scriptTypeSettingMap.put(scriptType, Setting.boolSetting(
ScriptModes.sourceKey(scriptType),

View File

@ -21,6 +21,7 @@ package org.elasticsearch.search;
import org.apache.lucene.search.Explanation;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
@ -544,7 +545,7 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable<S
ShardId shardId = get(Fields._SHARD, values, null);
String nodeId = get(Fields._NODE, values, null);
if (shardId != null && nodeId != null) {
searchHit.shard(new SearchShardTarget(nodeId, shardId));
searchHit.shard(new SearchShardTarget(nodeId, shardId, OriginalIndices.NONE));
}
searchHit.fields(fields);
return searchHit;

View File

@ -24,6 +24,7 @@ import org.apache.lucene.search.TopDocs;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.SearchTask;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.service.ClusterService;
@ -40,7 +41,6 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.collapse.CollapseContext;
import org.elasticsearch.index.query.InnerHitBuilder;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexEventListener;
@ -55,6 +55,7 @@ import org.elasticsearch.search.aggregations.AggregationInitializationException;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.collapse.CollapseContext;
import org.elasticsearch.search.dfs.DfsPhase;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchPhase;
@ -498,7 +499,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
throws IOException {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(),
indexShard.shardId(), OriginalIndices.NONE);
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
final DefaultSearchContext searchContext = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget,

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -36,6 +37,9 @@ public class SearchShardTarget implements Writeable, Comparable<SearchShardTarge
private final Text nodeId;
private final ShardId shardId;
//original indices are only needed in the coordinating node throughout the search request execution.
//no need to serialize them as part of SearchShardTarget.
private final transient OriginalIndices originalIndices;
public SearchShardTarget(StreamInput in) throws IOException {
if (in.readBoolean()) {
@ -44,15 +48,18 @@ public class SearchShardTarget implements Writeable, Comparable<SearchShardTarge
nodeId = null;
}
shardId = ShardId.readShardId(in);
this.originalIndices = null;
}
public SearchShardTarget(String nodeId, ShardId shardId) {
public SearchShardTarget(String nodeId, ShardId shardId, OriginalIndices originalIndices) {
this.nodeId = nodeId == null ? null : new Text(nodeId);
this.shardId = shardId;
this.originalIndices = originalIndices;
}
//this constructor is only used in tests
public SearchShardTarget(String nodeId, Index index, int shardId) {
this(nodeId, new ShardId(index, shardId));
this(nodeId, new ShardId(index, shardId), OriginalIndices.NONE);
}
@Nullable
@ -72,6 +79,10 @@ public class SearchShardTarget implements Writeable, Comparable<SearchShardTarge
return shardId;
}
public OriginalIndices getOriginalIndices() {
return originalIndices;
}
@Override
public int compareTo(SearchShardTarget o) {
int i = shardId.getIndexName().compareTo(o.getIndex());

View File

@ -37,10 +37,8 @@ import org.elasticsearch.search.aggregations.AggregatorFactory;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
import java.util.HashSet;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder<ScriptedMetricAggregationBuilder> {
@ -239,11 +237,6 @@ public class ScriptedMetricAggregationBuilder extends AbstractAggregationBuilder
Map<String, Object> params = null;
XContentParser.Token token;
String currentFieldName = null;
Set<String> scriptParameters = new HashSet<>();
scriptParameters.add(INIT_SCRIPT_FIELD.getPreferredName());
scriptParameters.add(MAP_SCRIPT_FIELD.getPreferredName());
scriptParameters.add(COMBINE_SCRIPT_FIELD.getPreferredName());
scriptParameters.add(REDUCE_SCRIPT_FIELD.getPreferredName());
XContentParser parser = context.parser();
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {

View File

@ -31,21 +31,35 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class InternalPercentilesBucket extends InternalNumericMetricsAggregation.MultiValue implements PercentilesBucket {
private double[] percentiles;
private double[] percents;
private final transient Map<Double, Double> percentileLookups = new HashMap<>();
public InternalPercentilesBucket(String name, double[] percents, double[] percentiles,
DocValueFormat formatter, List<PipelineAggregator> pipelineAggregators,
Map<String, Object> metaData) {
super(name, pipelineAggregators, metaData);
if ((percentiles.length == percents.length) == false) {
throw new IllegalArgumentException("The number of provided percents and percentiles didn't match. percents: "
+ Arrays.toString(percents) + ", percentiles: " + Arrays.toString(percentiles));
}
this.format = formatter;
this.percentiles = percentiles;
this.percents = percents;
computeLookup();
}
private void computeLookup() {
for (int i = 0; i < percents.length; i++) {
percentileLookups.put(percents[i], percentiles[i]);
}
}
/**
@ -56,6 +70,7 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation
format = in.readNamedWriteable(DocValueFormat.class);
percentiles = in.readDoubleArray();
percents = in.readDoubleArray();
computeLookup();
}
@Override
@ -72,12 +87,12 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation
@Override
public double percentile(double percent) throws IllegalArgumentException {
int index = Arrays.binarySearch(percents, percent);
if (index < 0) {
Double percentile = percentileLookups.get(percent);
if (percentile == null) {
throw new IllegalArgumentException("Percent requested [" + String.valueOf(percent) + "] was not" +
" one of the computed percentiles. Available keys are: " + Arrays.toString(percents));
}
return percentiles[index];
return percentile;
}
@Override
@ -116,6 +131,17 @@ public class InternalPercentilesBucket extends InternalNumericMetricsAggregation
return builder;
}
@Override
protected boolean doEquals(Object obj) {
InternalPercentilesBucket that = (InternalPercentilesBucket) obj;
return Arrays.equals(percents, that.percents) && Arrays.equals(percentiles, that.percentiles);
}
@Override
protected int doHashCode() {
return Objects.hash(Arrays.hashCode(percents), Arrays.hashCode(percentiles));
}
public static class Iter implements Iterator<Percentile> {
private final double[] percents;

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -42,9 +41,9 @@ public class ShardFetchSearchRequest extends ShardFetchRequest implements Indice
}
public ShardFetchSearchRequest(SearchRequest request, long id, IntArrayList list, ScoreDoc lastEmittedDoc) {
public ShardFetchSearchRequest(OriginalIndices originalIndices, long id, IntArrayList list, ScoreDoc lastEmittedDoc) {
super(id, list, lastEmittedDoc);
this.originalIndices = new OriginalIndices(request);
this.originalIndices = originalIndices;
}
@Override

View File

@ -63,7 +63,7 @@ import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.search.suggest.SuggestionSearchContext;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
@ -336,7 +336,7 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas
*/
public void addReleasable(Releasable releasable, Lifetime lifetime) {
if (clearables == null) {
clearables = new HashMap<>();
clearables = new EnumMap<>(Lifetime.class);
}
List<Releasable> releasables = clearables.get(lifetime);
if (releasables == null) {

View File

@ -53,11 +53,11 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
public ShardSearchTransportRequest(){
}
public ShardSearchTransportRequest(SearchRequest searchRequest, ShardId shardId, int numberOfShards,
public ShardSearchTransportRequest(OriginalIndices originalIndices, SearchRequest searchRequest, ShardId shardId, int numberOfShards,
AliasFilter aliasFilter, float indexBoost, long nowInMillis) {
this.shardSearchLocalRequest = new ShardSearchLocalRequest(searchRequest, shardId, numberOfShards, aliasFilter, indexBoost,
nowInMillis);
this.originalIndices = new OriginalIndices(searchRequest);
this.originalIndices = originalIndices;
}
@Override
@ -76,7 +76,6 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha
return originalIndices.indicesOptions();
}
@Override
public ShardId shardId() {
return shardSearchLocalRequest.shardId();

View File

@ -21,7 +21,6 @@ package org.elasticsearch.search.query;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchTask;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Strings;
@ -47,10 +46,10 @@ public class QuerySearchRequest extends TransportRequest implements IndicesReque
public QuerySearchRequest() {
}
public QuerySearchRequest(SearchRequest request, long id, AggregatedDfs dfs) {
public QuerySearchRequest(OriginalIndices originalIndices, long id, AggregatedDfs dfs) {
this.id = id;
this.dfs = dfs;
this.originalIndices = new OriginalIndices(request);
this.originalIndices = originalIndices;
}
public long id() {

View File

@ -26,6 +26,7 @@ import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.ShardSearchFailure;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterChangedEvent;
@ -913,13 +914,11 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
try {
final Repository repository = repositoriesService.repository(snapshot.getRepository());
logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), failure);
ArrayList<ShardSearchFailure> failures = new ArrayList<>();
ArrayList<SnapshotShardFailure> shardFailures = new ArrayList<>();
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardStatus : entry.shards()) {
ShardId shardId = shardStatus.key;
ShardSnapshotStatus status = shardStatus.value;
if (status.state().failed()) {
failures.add(new ShardSearchFailure(status.reason(), new SearchShardTarget(status.nodeId(), shardId)));
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, status.reason()));
}
}

View File

@ -0,0 +1,35 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.template;
import java.util.Map;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.ScriptType;
/**
* A template that may be executed.
*/
public interface CompiledTemplate {
/** Run a template and return the resulting string, encoded in utf8 bytes. */
BytesReference run(Map<String, Object> params);
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch;
import org.apache.lucene.util.Constants;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.ShardSearchFailure;
@ -758,9 +759,9 @@ public class ElasticsearchExceptionTests extends ESTestCase {
failureCause = new NoShardAvailableActionException(new ShardId("_index_g", "_uuid_g", 6), "node_g", failureCause);
ShardSearchFailure[] shardFailures = new ShardSearchFailure[]{
new ShardSearchFailure(new ParsingException(0, 0, "Parsing g", null),
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61))),
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61), OriginalIndices.NONE)),
new ShardSearchFailure(new RepositoryException("repository_g", "Repo"),
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62))),
new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62), OriginalIndices.NONE)),
new ShardSearchFailure(new SearchContextMissingException(0L), null)
};
failure = new SearchPhaseExecutionException("phase_g", "G", failureCause, shardFailures);

View File

@ -19,11 +19,8 @@
package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.test.ESTestCase;
@ -56,48 +53,6 @@ public class AbstractSearchAsyncActionTookTests extends ESTestCase {
System::nanoTime);
}
final ShardIterator it = new ShardIterator() {
@Override
public ShardId shardId() {
return null;
}
@Override
public void reset() {
}
@Override
public int compareTo(ShardIterator o) {
return 0;
}
@Override
public int size() {
return 0;
}
@Override
public int sizeActive() {
return 0;
}
@Override
public ShardRouting nextOrNull() {
return null;
}
@Override
public int remaining() {
return 0;
}
@Override
public Iterable<ShardRouting> asUnordered() {
return null;
}
};
return new AbstractSearchAsyncAction<SearchPhaseResult>(
"test",
null,
@ -108,7 +63,7 @@ public class AbstractSearchAsyncActionTookTests extends ESTestCase {
null,
null,
null,
new GroupShardsIterator(Collections.singletonList(it)),
new GroupShardsIterator<>(Collections.singletonList(new SearchShardIterator(null, Collections.emptyList(), null))),
timeProvider,
0,
null,
@ -123,7 +78,7 @@ public class AbstractSearchAsyncActionTookTests extends ESTestCase {
@Override
protected void executePhaseOnShard(
final ShardIterator shardIt,
final SearchShardIterator shardIt,
final ShardRouting shard,
final SearchActionListener<SearchPhaseResult> listener) {
@ -157,5 +112,4 @@ public class AbstractSearchAsyncActionTookTests extends ESTestCase {
assertThat(actual, greaterThanOrEqualTo(TimeUnit.NANOSECONDS.toMillis(expected.get())));
}
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.search;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.Loggers;
@ -29,14 +29,11 @@ import org.elasticsearch.search.internal.ShardSearchTransportRequest;
import org.elasticsearch.transport.Transport;
import org.junit.Assert;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Executor;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
@ -114,7 +111,7 @@ public final class MockSearchPhaseContext implements SearchPhaseContext {
}
@Override
public ShardSearchTransportRequest buildShardSearchRequest(ShardIterator shardIt, ShardRouting shard) {
public ShardSearchTransportRequest buildShardSearchRequest(SearchShardIterator shardIt, ShardRouting shard) {
Assert.fail("should not be called");
return null;
}
@ -145,7 +142,7 @@ public final class MockSearchPhaseContext implements SearchPhaseContext {
}
@Override
public void sendReleaseSearchContext(long contextId, Transport.Connection connection) {
public void sendReleaseSearchContext(long contextId, Transport.Connection connection, OriginalIndices originalIndices) {
releasedSearchContexts.add(contextId);
}
}

View File

@ -382,7 +382,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
failReference.set(x);
responseLatch.countDown();
});
connection.fetchSearchShards(request, Arrays.asList("test-index"), shardsListener);
connection.fetchSearchShards(request, new String[]{"test-index"}, shardsListener);
responseLatch.await();
assertNull(failReference.get());
assertNotNull(reference.get());

View File

@ -20,10 +20,11 @@ package org.elasticsearch.action.search;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
@ -204,7 +205,7 @@ public class RemoteClusterServiceTests extends ESTestCase {
public void testProcessRemoteShards() throws IOException {
try (RemoteClusterService service = new RemoteClusterService(Settings.EMPTY, null)) {
assertFalse(service.isCrossClusterSearchEnabled());
List<ShardIterator> iteratorList = new ArrayList<>();
List<SearchShardIterator> iteratorList = new ArrayList<>();
Map<String, ClusterSearchShardsResponse> searchShardsResponseMap = new HashMap<>();
DiscoveryNode[] nodes = new DiscoveryNode[] {
new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT),
@ -225,11 +226,26 @@ public class RemoteClusterServiceTests extends ESTestCase {
TestShardRouting.newShardRouting("bar", 0, "node1", false, ShardRoutingState.STARTED)})
};
searchShardsResponseMap.put("test_cluster_1", new ClusterSearchShardsResponse(groups, nodes, indicesAndAliases));
DiscoveryNode[] nodes2 = new DiscoveryNode[] {
new DiscoveryNode("node3", buildNewFakeTransportAddress(), Version.CURRENT)
};
ClusterSearchShardsGroup[] groups2 = new ClusterSearchShardsGroup[] {
new ClusterSearchShardsGroup(new ShardId("xyz", "xyz_id", 0),
new ShardRouting[] {TestShardRouting.newShardRouting("xyz", 0, "node3", true, ShardRoutingState.STARTED)})
};
searchShardsResponseMap.put("test_cluster_2", new ClusterSearchShardsResponse(groups2, nodes2, null));
Map<String, OriginalIndices> remoteIndicesByCluster = new HashMap<>();
remoteIndicesByCluster.put("test_cluster_1",
new OriginalIndices(new String[]{"fo*", "ba*"}, IndicesOptions.strictExpandOpenAndForbidClosed()));
remoteIndicesByCluster.put("test_cluster_2",
new OriginalIndices(new String[]{"x*"}, IndicesOptions.strictExpandOpenAndForbidClosed()));
Map<String, AliasFilter> remoteAliases = new HashMap<>();
service.processRemoteShards(searchShardsResponseMap, iteratorList, remoteAliases);
assertEquals(3, iteratorList.size());
for (ShardIterator iterator : iteratorList) {
service.processRemoteShards(searchShardsResponseMap, remoteIndicesByCluster, iteratorList, remoteAliases);
assertEquals(4, iteratorList.size());
for (SearchShardIterator iterator : iteratorList) {
if (iterator.shardId().getIndexName().endsWith("foo")) {
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
assertTrue(iterator.shardId().getId() == 0 || iterator.shardId().getId() == 1);
assertEquals("test_cluster_1:foo", iterator.shardId().getIndexName());
ShardRouting shardRouting = iterator.nextOrNull();
@ -239,7 +255,8 @@ public class RemoteClusterServiceTests extends ESTestCase {
assertNotNull(shardRouting);
assertEquals(shardRouting.getIndexName(), "foo");
assertNull(iterator.nextOrNull());
} else {
} else if (iterator.shardId().getIndexName().endsWith("bar")) {
assertArrayEquals(new String[]{"fo*", "ba*"}, iterator.getOriginalIndices().indices());
assertEquals(0, iterator.shardId().getId());
assertEquals("test_cluster_1:bar", iterator.shardId().getIndexName());
ShardRouting shardRouting = iterator.nextOrNull();
@ -249,13 +266,23 @@ public class RemoteClusterServiceTests extends ESTestCase {
assertNotNull(shardRouting);
assertEquals(shardRouting.getIndexName(), "bar");
assertNull(iterator.nextOrNull());
} else if (iterator.shardId().getIndexName().endsWith("xyz")) {
assertArrayEquals(new String[]{"x*"}, iterator.getOriginalIndices().indices());
assertEquals(0, iterator.shardId().getId());
assertEquals("test_cluster_2:xyz", iterator.shardId().getIndexName());
ShardRouting shardRouting = iterator.nextOrNull();
assertNotNull(shardRouting);
assertEquals(shardRouting.getIndexName(), "xyz");
assertNull(iterator.nextOrNull());
}
}
assertEquals(2, remoteAliases.size());
assertEquals(3, remoteAliases.size());
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("foo_id"));
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("bar_id"));
assertTrue(remoteAliases.toString(), remoteAliases.containsKey("xyz_id"));
assertEquals(new TermsQueryBuilder("foo", "bar"), remoteAliases.get("foo_id").getQueryBuilder());
assertEquals(new MatchAllQueryBuilder(), remoteAliases.get("bar_id").getQueryBuilder());
assertNull(remoteAliases.get("xyz_id").getQueryBuilder());
}
}

View File

@ -20,11 +20,11 @@ package org.elasticsearch.action.search;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Strings;
@ -76,12 +76,14 @@ public class SearchAsyncActionTests extends ESTestCase {
Map<DiscoveryNode, Set<Long>> nodeToContextMap = new HashMap<>();
AtomicInteger contextIdGenerator = new AtomicInteger(0);
GroupShardsIterator shardsIter = getShardsIter("idx", randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode);
GroupShardsIterator<SearchShardIterator> shardsIter = getShardsIter("idx",
new OriginalIndices(new String[]{"idx"}, IndicesOptions.strictExpandOpenAndForbidClosed()),
randomIntBetween(1, 10), randomBoolean(), primaryNode, replicaNode);
AtomicInteger numFreedContext = new AtomicInteger();
SearchTransportService transportService = new SearchTransportService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
Collections.singleton(RemoteClusterService.REMOTE_CLUSTERS_SEEDS)), null) {
@Override
public void sendFreeContext(Transport.Connection connection, long contextId, SearchRequest request) {
public void sendFreeContext(Transport.Connection connection, long contextId, OriginalIndices originalIndices) {
numFreedContext.incrementAndGet();
assertTrue(nodeToContextMap.containsKey(connection.getNode()));
assertTrue(nodeToContextMap.get(connection.getNode()).remove(contextId));
@ -110,7 +112,7 @@ public class SearchAsyncActionTests extends ESTestCase {
TestSearchResponse response = new TestSearchResponse();
@Override
protected void executePhaseOnShard(ShardIterator shardIt, ShardRouting shard, SearchActionListener<TestSearchPhaseResult>
protected void executePhaseOnShard(SearchShardIterator shardIt, ShardRouting shard, SearchActionListener<TestSearchPhaseResult>
listener) {
assertTrue("shard: " + shard.shardId() + " has been queried twice", response.queried.add(shard.shardId()));
Transport.Connection connection = getConnection(shard.currentNodeId());
@ -133,7 +135,7 @@ public class SearchAsyncActionTests extends ESTestCase {
for (int i = 0; i < results.getNumShards(); i++) {
TestSearchPhaseResult result = results.results.get(i);
assertEquals(result.node.getId(), result.getSearchShardTarget().getNodeId());
sendReleaseSearchContext(result.getRequestId(), new MockConnection(result.node));
sendReleaseSearchContext(result.getRequestId(), new MockConnection(result.node), OriginalIndices.NONE);
}
responseListener.onResponse(response);
latch.countDown();
@ -154,9 +156,9 @@ public class SearchAsyncActionTests extends ESTestCase {
}
}
private GroupShardsIterator getShardsIter(String index, int numShards, boolean doReplicas, DiscoveryNode primaryNode,
DiscoveryNode replicaNode) {
ArrayList<ShardIterator> list = new ArrayList<>();
private static GroupShardsIterator<SearchShardIterator> getShardsIter(String index, OriginalIndices originalIndices, int numShards,
boolean doReplicas, DiscoveryNode primaryNode, DiscoveryNode replicaNode) {
ArrayList<SearchShardIterator> list = new ArrayList<>();
for (int i = 0; i < numShards; i++) {
ArrayList<ShardRouting> started = new ArrayList<>();
ArrayList<ShardRouting> initializing = new ArrayList<>();
@ -184,9 +186,9 @@ public class SearchAsyncActionTests extends ESTestCase {
}
Collections.shuffle(started, random());
started.addAll(initializing);
list.add(new PlainShardIterator(new ShardId(new Index(index, "_na_"), i), started));
list.add(new SearchShardIterator(new ShardId(new Index(index, "_na_"), i), started, originalIndices));
}
return new GroupShardsIterator(list);
return new GroupShardsIterator<>(list);
}
public static class TestSearchResponse extends SearchResponse {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.search;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentParser;
@ -42,7 +43,7 @@ public class ShardSearchFailureTests extends ESTestCase {
String indexUuid = randomAlphaOfLengthBetween(5, 10);
int shardId = randomInt();
return new ShardSearchFailure(ex,
new SearchShardTarget(nodeId, new ShardId(new Index(indexName, indexUuid), shardId)));
new SearchShardTarget(nodeId, new ShardId(new Index(indexName, indexUuid), shardId), null));
}
public void testFromXContent() throws IOException {
@ -73,7 +74,7 @@ public class ShardSearchFailureTests extends ESTestCase {
public void testToXContent() throws IOException {
ShardSearchFailure failure = new ShardSearchFailure(new ParsingException(0, 0, "some message", null),
new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123)));
new SearchShardTarget("nodeId", new ShardId(new Index("indexName", "indexUuid"), 123), OriginalIndices.NONE));
BytesReference xContent = toXContent(failure, XContentType.JSON, randomBoolean());
assertEquals(
"{\"shard\":123,"

View File

@ -0,0 +1,122 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.search;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.PlainShardIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
public class TransportSearchActionTests extends ESTestCase {
public void testMergeShardsIterators() throws IOException {
List<ShardIterator> localShardIterators = new ArrayList<>();
{
ShardId shardId = new ShardId("local_index", "local_index_uuid", 0);
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, "local_node", true, STARTED);
ShardIterator shardIterator = new PlainShardIterator(shardId, Collections.singletonList(shardRouting));
localShardIterators.add(shardIterator);
}
{
ShardId shardId2 = new ShardId("local_index_2", "local_index_2_uuid", 1);
ShardRouting shardRouting2 = TestShardRouting.newShardRouting(shardId2, "local_node", true, STARTED);
ShardIterator shardIterator2 = new PlainShardIterator(shardId2, Collections.singletonList(shardRouting2));
localShardIterators.add(shardIterator2);
}
GroupShardsIterator<ShardIterator> localShardsIterator = new GroupShardsIterator<>(localShardIterators);
OriginalIndices localIndices = new OriginalIndices(new String[]{"local_alias", "local_index_2"},
IndicesOptions.strictExpandOpenAndForbidClosed());
OriginalIndices remoteIndices = new OriginalIndices(new String[]{"remote_alias", "remote_index_2"},
IndicesOptions.strictExpandOpen());
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
{
ShardId remoteShardId = new ShardId("remote_index", "remote_index_uuid", 2);
ShardRouting remoteShardRouting = TestShardRouting.newShardRouting(remoteShardId, "remote_node", true, STARTED);
SearchShardIterator remoteShardIterator = new SearchShardIterator(remoteShardId,
Collections.singletonList(remoteShardRouting), remoteIndices);
remoteShardIterators.add(remoteShardIterator);
}
{
ShardId remoteShardId2 = new ShardId("remote_index_2", "remote_index_2_uuid", 3);
ShardRouting remoteShardRouting2 = TestShardRouting.newShardRouting(remoteShardId2, "remote_node", true, STARTED);
SearchShardIterator remoteShardIterator2 = new SearchShardIterator(remoteShardId2,
Collections.singletonList(remoteShardRouting2), remoteIndices);
remoteShardIterators.add(remoteShardIterator2);
}
OriginalIndices remoteIndices2 = new OriginalIndices(new String[]{"remote_index_3"}, IndicesOptions.strictExpand());
{
ShardId remoteShardId3 = new ShardId("remote_index_3", "remote_index_3_uuid", 4);
ShardRouting remoteShardRouting3 = TestShardRouting.newShardRouting(remoteShardId3, "remote_node", true, STARTED);
SearchShardIterator remoteShardIterator3 = new SearchShardIterator(remoteShardId3,
Collections.singletonList(remoteShardRouting3), remoteIndices2);
remoteShardIterators.add(remoteShardIterator3);
}
GroupShardsIterator<SearchShardIterator> searchShardIterators = TransportSearchAction.mergeShardsIterators(localShardsIterator,
localIndices, remoteShardIterators);
assertEquals(searchShardIterators.size(), 5);
int i = 0;
for (SearchShardIterator searchShardIterator : searchShardIterators) {
switch(i++) {
case 0:
assertEquals("local_index", searchShardIterator.shardId().getIndexName());
assertEquals(0, searchShardIterator.shardId().getId());
assertSame(localIndices, searchShardIterator.getOriginalIndices());
break;
case 1:
assertEquals("local_index_2", searchShardIterator.shardId().getIndexName());
assertEquals(1, searchShardIterator.shardId().getId());
assertSame(localIndices, searchShardIterator.getOriginalIndices());
break;
case 2:
assertEquals("remote_index", searchShardIterator.shardId().getIndexName());
assertEquals(2, searchShardIterator.shardId().getId());
assertSame(remoteIndices, searchShardIterator.getOriginalIndices());
break;
case 3:
assertEquals("remote_index_2", searchShardIterator.shardId().getIndexName());
assertEquals(3, searchShardIterator.shardId().getId());
assertSame(remoteIndices, searchShardIterator.getOriginalIndices());
break;
case 4:
assertEquals("remote_index_3", searchShardIterator.shardId().getIndexName());
assertEquals(4, searchShardIterator.shardId().getId());
assertSame(remoteIndices2, searchShardIterator.getOriginalIndices());
break;
}
}
}
}

View File

@ -296,7 +296,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX});
Set<String> set = new HashSet<>();
for (ShardRouting shard : shardIt.asUnordered()) {
for (ShardRouting shard : shardIt) {
set.add(shard.currentNodeId());
}
@ -332,7 +332,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
// the master should not be in the list of nodes that requests were sent to
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX});
Set<String> set = new HashSet<>();
for (ShardRouting shard : shardIt.asUnordered()) {
for (ShardRouting shard : shardIt) {
if (!shard.currentNodeId().equals(masterNode.getId())) {
set.add(shard.currentNodeId());
}
@ -352,8 +352,8 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
public void testOperationExecution() throws Exception {
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[]{TEST_INDEX});
Set<ShardRouting> shards = new HashSet<>();
String nodeId = shardIt.asUnordered().iterator().next().currentNodeId();
for (ShardRouting shard : shardIt.asUnordered()) {
String nodeId = shardIt.iterator().next().currentNodeId();
for (ShardRouting shard : shardIt) {
if (nodeId.equals(shard.currentNodeId())) {
shards.add(shard);
}
@ -417,7 +417,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
ShardsIterator shardIt = clusterService.state().getRoutingTable().allShards(new String[]{TEST_INDEX});
Map<String, List<ShardRouting>> map = new HashMap<>();
for (ShardRouting shard : shardIt.asUnordered()) {
for (ShardRouting shard : shardIt) {
if (!map.containsKey(shard.currentNodeId())) {
map.put(shard.currentNodeId(), new ArrayList<>());
}

View File

@ -263,18 +263,17 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
}
public void testDynamicUpdateMinimumMasterNodes() throws Exception {
Settings settings = Settings.builder()
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), "1")
.build();
Settings settingsWithMinMaster1 = Settings.builder()
.put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "400ms")
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1)
.build();
logger.info("--> start first node and wait for it to be a master");
internalCluster().startNode(settings);
ensureClusterSizeConsistency();
Settings settingsWithMinMaster2 = Settings.builder()
.put(settingsWithMinMaster1).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)
.build();
// wait until second node join the cluster
logger.info("--> start second node and wait for it to join");
internalCluster().startNode(settings);
logger.info("--> start two nodes and wait for them to form a cluster");
internalCluster().startNodes(settingsWithMinMaster1, settingsWithMinMaster2);
ensureClusterSizeConsistency();
logger.info("--> setting minimum master node to 2");
@ -292,7 +291,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
assertNoMasterBlockOnAllNodes();
logger.info("--> bringing another node up");
internalCluster().startNode(Settings.builder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build());
internalCluster().startNode(settingsWithMinMaster2);
ensureClusterSizeConsistency();
}

View File

@ -165,10 +165,9 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
private List<ShardStateAction.ShardEntry> createExistingShards(ClusterState currentState, String reason) {
List<ShardRouting> shards = new ArrayList<>();
GroupShardsIterator shardGroups =
currentState.routingTable().allAssignedShardsGrouped(new String[] { INDEX }, true);
GroupShardsIterator<ShardIterator> shardGroups = currentState.routingTable().allAssignedShardsGrouped(new String[] { INDEX }, true);
for (ShardIterator shardIt : shardGroups) {
for (ShardRouting shard : shardIt.asUnordered()) {
for (ShardRouting shard : shardIt) {
shards.add(shard);
}
}

View File

@ -43,7 +43,7 @@ public class GroupShardsIteratorTests extends ESTestCase {
list.add(new PlainShardIterator(new ShardId(index, 0), Arrays.asList(newRouting(index, 0, true))));
list.add(new PlainShardIterator(new ShardId(index, 1), Arrays.asList(newRouting(index, 1, true))));
GroupShardsIterator iter = new GroupShardsIterator(list);
GroupShardsIterator iter = new GroupShardsIterator<>(list);
assertEquals(7, iter.totalSizeWith1ForEmpty());
assertEquals(5, iter.size());
assertEquals(6, iter.totalSize());
@ -67,7 +67,7 @@ public class GroupShardsIteratorTests extends ESTestCase {
Collections.shuffle(list, random());
ArrayList<ShardIterator> actualIterators = new ArrayList<>();
GroupShardsIterator iter = new GroupShardsIterator(list);
GroupShardsIterator<ShardIterator> iter = new GroupShardsIterator<>(list);
for (ShardIterator shardsIterator : iter) {
actualIterators.add(shardsIterator);
}

View File

@ -352,8 +352,7 @@ public class RoutingTableTests extends ESAllocationTestCase {
Set<String> insyncAids = shardTable.activeShards().stream().map(
shr -> shr.allocationId().getId()).collect(Collectors.toSet());
final ShardRouting primaryShard = shardTable.primaryShard();
if (primaryShard.initializing() && primaryShard.relocating() == false &&
RecoverySource.isInitialRecovery(primaryShard.recoverySource().getType()) == false ) {
if (primaryShard.initializing() && shardRouting.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE) {
// simulate a primary was initialized based on aid
insyncAids.add(primaryShard.allocationId().getId());
}

View File

@ -16,7 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation;
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
@ -29,19 +29,14 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.test.gateway.TestGatewayAllocator;
import java.util.Arrays;
@ -144,8 +139,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
}
private ClusterState createInitialClusterState(AllocationService service, Settings settings) {
RecoverySource.Type recoveryType = randomFrom(RecoverySource.Type.EMPTY_STORE,
RecoverySource.Type.LOCAL_SHARDS, RecoverySource.Type.SNAPSHOT);
RecoverySource.Type recoveryType = randomFrom(FilterAllocationDecider.INITIAL_RECOVERY_TYPES);
MetaData.Builder metaData = MetaData.builder();
final Settings.Builder indexSettings = settings(Version.CURRENT).put(settings);
final IndexMetaData sourceIndex;
@ -164,9 +158,6 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
}
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder("idx").settings(indexSettings)
.numberOfShards(1).numberOfReplicas(1);
if (recoveryType == RecoverySource.Type.SNAPSHOT) {
indexMetaDataBuilder.putInSyncAllocationIds(0, Collections.singleton("_snapshot_restore"));
}
final IndexMetaData indexMetaData = indexMetaDataBuilder.build();
metaData.put(indexMetaData, false);
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
@ -174,11 +165,6 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
case EMPTY_STORE:
routingTableBuilder.addAsNew(indexMetaData);
break;
case SNAPSHOT:
routingTableBuilder.addAsRestore(indexMetaData, new RecoverySource.SnapshotRecoverySource(
new Snapshot("repository", new SnapshotId("snapshot_name", "snapshot_uuid")),
Version.CURRENT, indexMetaData.getIndex().getName()));
break;
case LOCAL_SHARDS:
routingTableBuilder.addAsFromCloseToOpen(sourceIndex);
routingTableBuilder.addAsNew(indexMetaData);
@ -192,7 +178,7 @@ public class FilterAllocationDeciderTests extends ESAllocationTestCase {
.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build();
clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2")))
.build();
return service.reroute(clusterState, "reroute", false);
return service.reroute(clusterState, "reroute");
}
public void testInvalidIPFilter() {

View File

@ -376,7 +376,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
OperationRouting operationRouting = new OperationRouting(Settings.EMPTY, new ClusterSettings(Settings.EMPTY,
ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0");
GroupShardsIterator<ShardIterator> shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_shards:0");
assertThat(shardIterators.size(), equalTo(1));
assertThat(shardIterators.iterator().next().shardId().id(), equalTo(0));
@ -443,7 +443,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase {
clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING));
// When replicas haven't initialized, it comes back with the primary first, then initializing replicas
GroupShardsIterator shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica_first");
GroupShardsIterator<ShardIterator> shardIterators = operationRouting.searchShards(clusterState, new String[]{"test"}, null, "_replica_first");
assertThat(shardIterators.size(), equalTo(2)); // two potential shards
ShardIterator iter = shardIterators.iterator().next();
assertThat(iter.size(), equalTo(3)); // three potential candidates for the shard

View File

@ -21,9 +21,8 @@ package org.elasticsearch.common.lucene.search.function;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.script.AbstractDoubleSearchScript;
import org.elasticsearch.script.LeafSearchScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.GeneralScriptException;
import org.elasticsearch.script.LeafSearchScript;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.test.ESTestCase;
@ -35,7 +34,7 @@ public class ScriptScoreFunctionTests extends ESTestCase {
*/
public void testScriptScoresReturnsNaN() throws IOException {
// script that always returns NaN
ScoreFunction scoreFunction = new ScriptScoreFunction(new Script("Double.NaN"), new SearchScript() {
ScoreFunction scoreFunction = new ScriptScoreFunction(mockScript("Double.NaN"), new SearchScript() {
@Override
public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
return new AbstractDoubleSearchScript() {

View File

@ -585,6 +585,12 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
ensureStableCluster(nodes.size(), TimeValue.timeValueMillis(disruptionScheme.expectedTimeToHeal().millis() +
DISRUPTION_HEALING_OVERHEAD.millis()), true, node);
}
// in case of a bridge partition, shard allocation can fail "index.allocation.max_retries" times if the master
// is the super-connected node and recovery source and target are on opposite sides of the bridge
if (disruptionScheme instanceof NetworkDisruption &&
((NetworkDisruption) disruptionScheme).getDisruptedLinks() instanceof Bridge) {
assertAcked(client().admin().cluster().prepareReroute().setRetryFailed(true));
}
ensureGreen("test");
logger.info("validating successful docs");

View File

@ -40,8 +40,6 @@ import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import static java.util.Collections.emptyMap;

View File

@ -1,86 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.analysis;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.test.ESTokenStreamTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import static org.elasticsearch.test.ESTestCase.createTestAnalysis;
public class CharFilterTests extends ESTokenStreamTestCase {
public void testMappingCharFilter() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.char_filter.my_mapping.type", "mapping")
.putArray("index.analysis.char_filter.my_mapping.mappings", "ph=>f", "qu=>q")
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
// Repeat one more time to make sure that char filter is reinitialized correctly
assertTokenStreamContents(analyzer1.tokenStream("test", "jeff quit phish"), new String[]{"jeff", "qit", "fish"});
}
public void testHtmlStripCharFilter() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "html_strip")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
// Repeat one more time to make sure that char filter is reinitialized correctly
assertTokenStreamContents(analyzer1.tokenStream("test", "<b>hello</b>!"), new String[]{"hello"});
}
public void testPatternReplaceCharFilter() throws Exception {
Settings settings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put("index.analysis.char_filter.my_mapping.type", "pattern_replace")
.put("index.analysis.char_filter.my_mapping.pattern", "ab*")
.put("index.analysis.char_filter.my_mapping.replacement", "oo")
.put("index.analysis.char_filter.my_mapping.flags", "CASE_INSENSITIVE")
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "my_mapping")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("test", settings);
IndexAnalyzers indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
NamedAnalyzer analyzer1 = indexAnalyzers.get("custom_with_char_filter");
assertTokenStreamContents(analyzer1.tokenStream("test", "faBBbBB aBbbbBf"), new String[]{"foo", "oof"});
}
}

View File

@ -22,13 +22,18 @@ package org.elasticsearch.index.analysis;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
import org.elasticsearch.plugins.AnalysisPlugin;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.ESTokenStreamTestCase;
import java.io.IOException;
import java.io.Reader;
import java.util.Map;
import static java.util.Collections.singletonMap;
public class CustomNormalizerTests extends ESTokenStreamTestCase {
public void testBasics() throws IOException {
Settings settings = Settings.builder()
.putArray("index.analysis.normalizer.my_normalizer.filter", "lowercase", "asciifolding")
@ -66,12 +71,11 @@ public class CustomNormalizerTests extends ESTokenStreamTestCase {
public void testCharFilters() throws IOException {
Settings settings = Settings.builder()
.put("index.analysis.char_filter.my_mapping.type", "mapping")
.putArray("index.analysis.char_filter.my_mapping.mappings", "a => z")
.put("index.analysis.char_filter.my_mapping.type", "mock_char_filter")
.putArray("index.analysis.normalizer.my_normalizer.char_filter", "my_mapping")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
.build();
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new MockCharFilterPlugin());
assertNull(analysis.indexAnalyzers.get("my_normalizer"));
NamedAnalyzer normalizer = analysis.indexAnalyzers.getNormalizer("my_normalizer");
assertNotNull(normalizer);
@ -99,4 +103,44 @@ public class CustomNormalizerTests extends ESTokenStreamTestCase {
() -> AnalysisTestsHelper.createTestAnalysisFromSettings(settings));
assertEquals("Custom normalizer [my_normalizer] may not use char filter [html_strip]", e.getMessage());
}
private class MockCharFilterPlugin implements AnalysisPlugin {
@Override
public Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
return singletonMap("mock_char_filter", (indexSettings, env, name, settings) -> {
class Factory implements CharFilterFactory, MultiTermAwareComponent {
@Override
public String name() {
return name;
}
@Override
public Reader create(Reader reader) {
return new Reader() {
@Override
public int read(char[] cbuf, int off, int len) throws IOException {
int result = reader.read(cbuf, off, len);
for (int i = off; i < result; i++) {
if (cbuf[i] == 'a') {
cbuf[i] = 'z';
}
}
return result;
}
@Override
public void close() throws IOException {
reader.close();
}
};
}
@Override
public Object getMultiTermComponent() {
return this;
}
}
return new Factory();
});
}
}
}

View File

@ -35,7 +35,6 @@ import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardIterator;
@ -52,7 +51,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MergePolicyConfig;
@ -73,9 +71,6 @@ import org.elasticsearch.test.MockIndexEventListener;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.store.MockFSIndexStore;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.ConnectionProfile;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
@ -292,7 +287,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
}
assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
ClusterState state = client().admin().cluster().prepareState().get().getState();
GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
GroupShardsIterator<ShardIterator> shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
for (ShardIterator iterator : shardIterators) {
ShardRouting routing;
while ((routing = iterator.nextOrNull()) != null) {

View File

@ -150,10 +150,10 @@ public class SuggestStatsIT extends ESIntegTestCase {
private Set<String> nodeIdsWithIndex(String... indices) {
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
GroupShardsIterator allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
GroupShardsIterator<ShardIterator> allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
Set<String> nodes = new HashSet<>();
for (ShardIterator shardIterator : allAssignedShardsGrouped) {
for (ShardRouting routing : shardIterator.asUnordered()) {
for (ShardRouting routing : shardIterator) {
if (routing.active()) {
nodes.add(routing.currentNodeId());
}

View File

@ -32,7 +32,6 @@ import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.inject.ModuleTestCase;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
@ -40,17 +39,17 @@ import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.Analysis;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisTestsHelper;
import org.elasticsearch.index.analysis.CharFilterFactory;
import org.elasticsearch.index.analysis.CustomAnalyzer;
import org.elasticsearch.index.analysis.IndexAnalyzers;
import org.elasticsearch.index.analysis.MappingCharFilterFactory;
import org.elasticsearch.index.analysis.NamedAnalyzer;
import org.elasticsearch.index.analysis.PatternReplaceCharFilterFactory;
import org.elasticsearch.index.analysis.StandardTokenizerFactory;
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
import org.elasticsearch.index.analysis.TokenFilterFactory;
import org.elasticsearch.index.analysis.filter1.MyFilterTokenFilterFactory;
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
import org.elasticsearch.plugins.AnalysisPlugin;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.VersionUtils;
import org.hamcrest.MatcherAssert;
@ -72,7 +71,7 @@ import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
public class AnalysisModuleTests extends ModuleTestCase {
public class AnalysisModuleTests extends ESTestCase {
public IndexAnalyzers getIndexAnalyzers(Settings settings) throws IOException {
return getIndexAnalyzers(getNewRegistry(settings), settings);
@ -90,6 +89,11 @@ public class AnalysisModuleTests extends ModuleTestCase {
public Map<String, AnalysisProvider<TokenFilterFactory>> getTokenFilters() {
return singletonMap("myfilter", MyFilterTokenFilterFactory::new);
}
@Override
public Map<String, AnalysisProvider<CharFilterFactory>> getCharFilters() {
return AnalysisPlugin.super.getCharFilters();
}
})).getAnalysisRegistry();
} catch (IOException e) {
throw new RuntimeException(e);
@ -184,29 +188,12 @@ public class AnalysisModuleTests extends ModuleTestCase {
StopTokenFilterFactory stop1 = (StopTokenFilterFactory) custom1.tokenFilters()[0];
assertThat(stop1.stopWords().size(), equalTo(1));
analyzer = indexAnalyzers.get("custom2").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
// verify position increment gap
analyzer = indexAnalyzers.get("custom6").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom6 = (CustomAnalyzer) analyzer;
assertThat(custom6.getPositionIncrementGap("any_string"), equalTo(256));
// verify characters mapping
analyzer = indexAnalyzers.get("custom5").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom5 = (CustomAnalyzer) analyzer;
assertThat(custom5.charFilters()[0], instanceOf(MappingCharFilterFactory.class));
// check custom pattern replace filter
analyzer = indexAnalyzers.get("custom3").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));
CustomAnalyzer custom3 = (CustomAnalyzer) analyzer;
PatternReplaceCharFilterFactory patternReplaceCharFilterFactory = (PatternReplaceCharFilterFactory) custom3.charFilters()[0];
assertThat(patternReplaceCharFilterFactory.getPattern().pattern(), equalTo("sample(.*)"));
assertThat(patternReplaceCharFilterFactory.getReplacement(), equalTo("replacedSample $1"));
// check custom class name (my)
analyzer = indexAnalyzers.get("custom4").analyzer();
assertThat(analyzer, instanceOf(CustomAnalyzer.class));

View File

@ -111,36 +111,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
assertThat(analyzeResponse.getTokens().get(0).getPositionLength(), equalTo(1));
}
public void testAnalyzeWithCharFilters() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
.setSettings(Settings.builder().put(indexSettings())
.put("index.analysis.char_filter.custom_mapping.type", "mapping")
.putArray("index.analysis.char_filter.custom_mapping.mappings", "ph=>f", "qu=>q")
.put("index.analysis.analyzer.custom_with_char_filter.tokenizer", "standard")
.putArray("index.analysis.analyzer.custom_with_char_filter.char_filter", "custom_mapping")));
ensureGreen();
AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("<h2><b>THIS</b> IS A</h2> <a href=\"#\">TEST</a>").setTokenizer("standard").addCharFilter("html_strip").get();
assertThat(analyzeResponse.getTokens().size(), equalTo(4));
analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A <b>TEST</b>").setTokenizer("keyword").addTokenFilter("lowercase").addCharFilter("html_strip").get();
assertThat(analyzeResponse.getTokens().size(), equalTo(1));
assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("this is a test"));
analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "jeff quit phish").setTokenizer("keyword").addTokenFilter("lowercase").addCharFilter("custom_mapping").get();
assertThat(analyzeResponse.getTokens().size(), equalTo(1));
assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("jeff qit fish"));
analyzeResponse = client().admin().indices().prepareAnalyze(indexOrAlias(), "<a href=\"#\">jeff quit fish</a>").setTokenizer("standard").addCharFilter("html_strip").addCharFilter("custom_mapping").get();
assertThat(analyzeResponse.getTokens().size(), equalTo(3));
AnalyzeResponse.AnalyzeToken token = analyzeResponse.getTokens().get(0);
assertThat(token.getTerm(), equalTo("jeff"));
token = analyzeResponse.getTokens().get(1);
assertThat(token.getTerm(), equalTo("qit"));
token = analyzeResponse.getTokens().get(2);
assertThat(token.getTerm(), equalTo("fish"));
}
public void testAnalyzeWithNonDefaultPostionLength() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
.setSettings(Settings.builder().put(indexSettings())
@ -263,46 +233,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
assertThat(token.getPositionLength(), equalTo(1));
}
public void testDetailAnalyze() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
.setSettings(
Settings.builder()
.put("index.analysis.char_filter.my_mapping.type", "mapping")
.putArray("index.analysis.char_filter.my_mapping.mappings", "PH=>F")
.put("index.analysis.analyzer.test_analyzer.type", "custom")
.put("index.analysis.analyzer.test_analyzer.position_increment_gap", "100")
.put("index.analysis.analyzer.test_analyzer.tokenizer", "standard")
.putArray("index.analysis.analyzer.test_analyzer.char_filter", "my_mapping")
.putArray("index.analysis.analyzer.test_analyzer.filter", "snowball")));
ensureGreen();
for (int i = 0; i < 10; i++) {
AnalyzeResponse analyzeResponse = admin().indices().prepareAnalyze().setIndex(indexOrAlias()).setText("THIS IS A PHISH")
.setExplain(true).addCharFilter("my_mapping").setTokenizer("keyword").addTokenFilter("lowercase").get();
assertThat(analyzeResponse.detail().analyzer(), IsNull.nullValue());
//charfilters
assertThat(analyzeResponse.detail().charfilters().length, equalTo(1));
assertThat(analyzeResponse.detail().charfilters()[0].getName(), equalTo("my_mapping"));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts().length, equalTo(1));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts()[0], equalTo("THIS IS A FISH"));
//tokenizer
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("keyword"));
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(1));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("THIS IS A FISH"));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getEndOffset(), equalTo(15));
//tokenfilters
assertThat(analyzeResponse.detail().tokenfilters().length, equalTo(1));
assertThat(analyzeResponse.detail().tokenfilters()[0].getName(), equalTo("lowercase"));
assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens().length, equalTo(1));
assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[0].getTerm(), equalTo("this is a fish"));
assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[0].getPosition(), equalTo(0));
assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[0].getStartOffset(), equalTo(0));
assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens()[0].getEndOffset(), equalTo(15));
}
}
public void testDetailAnalyzeWithNoIndex() throws Exception {
//analyzer only
AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze("THIS IS A TEST")
@ -414,90 +344,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
assertThat(token.getPositionLength(), equalTo(1));
}
public void testDetailAnalyzeWithMultiValuesWithCustomAnalyzer() throws Exception {
assertAcked(prepareCreate("test").addAlias(new Alias("alias"))
.setSettings(
Settings.builder()
.put("index.analysis.char_filter.my_mapping.type", "mapping")
.putArray("index.analysis.char_filter.my_mapping.mappings", "PH=>F")
.put("index.analysis.analyzer.test_analyzer.type", "custom")
.put("index.analysis.analyzer.test_analyzer.position_increment_gap", "100")
.put("index.analysis.analyzer.test_analyzer.tokenizer", "standard")
.putArray("index.analysis.analyzer.test_analyzer.char_filter", "my_mapping")
.putArray("index.analysis.analyzer.test_analyzer.filter", "snowball", "lowercase")));
ensureGreen();
client().admin().indices().preparePutMapping("test")
.setType("document").setSource("simple", "type=text,analyzer=simple,position_increment_gap=100").get();
//only analyzer =
String[] texts = new String[]{"this is a PHISH", "the troubled text"};
AnalyzeResponse analyzeResponse = client().admin().indices().prepareAnalyze().setIndex(indexOrAlias()).setText(texts)
.setExplain(true).setAnalyzer("test_analyzer").setText(texts).execute().get();
// charfilter
assertThat(analyzeResponse.detail().charfilters().length, equalTo(1));
assertThat(analyzeResponse.detail().charfilters()[0].getName(), equalTo("my_mapping"));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts().length, equalTo(2));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts()[0], equalTo("this is a FISH"));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts()[1], equalTo("the troubled text"));
// tokenizer
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("standard"));
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(7));
AnalyzeResponse.AnalyzeToken token = analyzeResponse.detail().tokenizer().getTokens()[3];
assertThat(token.getTerm(), equalTo("FISH"));
assertThat(token.getPosition(), equalTo(3));
assertThat(token.getStartOffset(), equalTo(10));
assertThat(token.getEndOffset(), equalTo(15));
assertThat(token.getPositionLength(), equalTo(1));
token = analyzeResponse.detail().tokenizer().getTokens()[5];
assertThat(token.getTerm(), equalTo("troubled"));
assertThat(token.getPosition(), equalTo(105));
assertThat(token.getStartOffset(), equalTo(20));
assertThat(token.getEndOffset(), equalTo(28));
assertThat(token.getPositionLength(), equalTo(1));
// tokenfilter(snowball)
assertThat(analyzeResponse.detail().tokenfilters().length, equalTo(2));
assertThat(analyzeResponse.detail().tokenfilters()[0].getName(), equalTo("snowball"));
assertThat(analyzeResponse.detail().tokenfilters()[0].getTokens().length, equalTo(7));
token = analyzeResponse.detail().tokenfilters()[0].getTokens()[3];
assertThat(token.getTerm(), equalTo("FISH"));
assertThat(token.getPosition(), equalTo(3));
assertThat(token.getStartOffset(), equalTo(10));
assertThat(token.getEndOffset(), equalTo(15));
assertThat(token.getPositionLength(), equalTo(1));
token = analyzeResponse.detail().tokenfilters()[0].getTokens()[5];
assertThat(token.getTerm(), equalTo("troubl"));
assertThat(token.getPosition(), equalTo(105));
assertThat(token.getStartOffset(), equalTo(20));
assertThat(token.getEndOffset(), equalTo(28));
assertThat(token.getPositionLength(), equalTo(1));
// tokenfilter(lowercase)
assertThat(analyzeResponse.detail().tokenfilters()[1].getName(), equalTo("lowercase"));
assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens().length, equalTo(7));
token = analyzeResponse.detail().tokenfilters()[1].getTokens()[3];
assertThat(token.getTerm(), equalTo("fish"));
assertThat(token.getPosition(), equalTo(3));
assertThat(token.getStartOffset(), equalTo(10));
assertThat(token.getEndOffset(), equalTo(15));
assertThat(token.getPositionLength(), equalTo(1));
token = analyzeResponse.detail().tokenfilters()[0].getTokens()[5];
assertThat(token.getTerm(), equalTo("troubl"));
assertThat(token.getPosition(), equalTo(105));
assertThat(token.getStartOffset(), equalTo(20));
assertThat(token.getEndOffset(), equalTo(28));
assertThat(token.getPositionLength(), equalTo(1));
}
public void testNonExistTokenizer() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> client().admin().indices()
@ -575,35 +421,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens()[0].getPositionLength(), equalTo(1));
}
public void testCustomCharFilterInRequest() throws Exception {
Map<String, Object> charFilterSettings = new HashMap<>();
charFilterSettings.put("type", "mapping");
charFilterSettings.put("mappings", new String[]{"ph => f", "qu => q"});
AnalyzeResponse analyzeResponse = client().admin().indices()
.prepareAnalyze()
.setText("jeff quit phish")
.setTokenizer("keyword")
.addCharFilter(charFilterSettings)
.setExplain(true)
.get();
assertThat(analyzeResponse.detail().analyzer(), IsNull.nullValue());
//charfilters
assertThat(analyzeResponse.detail().charfilters().length, equalTo(1));
assertThat(analyzeResponse.detail().charfilters()[0].getName(), equalTo("_anonymous_charfilter_[0]"));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts().length, equalTo(1));
assertThat(analyzeResponse.detail().charfilters()[0].getTexts()[0], equalTo("jeff qit fish"));
//tokenizer
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("keyword"));
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(1));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("jeff qit fish"));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getEndOffset(), equalTo(15));
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPositionLength(), equalTo(1));
}
public void testCustomTokenizerInRequest() throws Exception {
Map<String, Object> tokenizerSettings = new HashMap<>();
tokenizerSettings.put("type", "nGram");

View File

@ -20,6 +20,7 @@
package org.elasticsearch.search;
import org.apache.lucene.search.Explanation;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
@ -128,7 +129,8 @@ public class SearchHitTests extends ESTestCase {
}
if (randomBoolean()) {
hit.shard(new SearchShardTarget(randomAlphaOfLengthBetween(5, 10),
new ShardId(new Index(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)), randomInt())));
new ShardId(new Index(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)), randomInt()),
OriginalIndices.NONE));
}
return hit;
}

View File

@ -184,8 +184,8 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
for (int i = 0; i < rounds; i++) {
try {
SearchPhaseResult searchPhaseResult = service.executeQueryPhase(
new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f),
new ShardSearchLocalRequest(indexShard.shardId(), 1, SearchType.DEFAULT,
new SearchSourceBuilder(), new String[0], false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1.0f),
new SearchTask(123L, "", "", "", null));
IntArrayList intCursors = new IntArrayList(1);
intCursors.add(0);
@ -213,16 +213,16 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
final IndexService indexService = indicesService.indexServiceSafe(resolveIndex("index"));
final IndexShard indexShard = indexService.getShard(0);
final SearchContext contextWithDefaultTimeout = service.createContext(
new ShardSearchLocalRequest(
indexShard.shardId(),
1,
SearchType.DEFAULT,
new SearchSourceBuilder(),
new String[0],
false,
new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f),
null);
new ShardSearchLocalRequest(
indexShard.shardId(),
1,
SearchType.DEFAULT,
new SearchSourceBuilder(),
new String[0],
false,
new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f),
null);
try {
// the search context should inherit the default timeout
assertThat(contextWithDefaultTimeout.timeout(), equalTo(TimeValue.timeValueSeconds(5)));
@ -233,15 +233,15 @@ public class SearchServiceTests extends ESSingleNodeTestCase {
final long seconds = randomIntBetween(6, 10);
final SearchContext context = service.createContext(
new ShardSearchLocalRequest(
indexShard.shardId(),
1,
SearchType.DEFAULT,
new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds)),
new String[0],
false,
new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f),
new ShardSearchLocalRequest(
indexShard.shardId(),
1,
SearchType.DEFAULT,
new SearchSourceBuilder().timeout(TimeValue.timeValueSeconds(seconds)),
new String[0],
false,
new AliasFilter(null, Strings.EMPTY_ARRAY),
1.0f),
null);
try {
// the search context should inherit the query timeout

View File

@ -19,6 +19,7 @@
package org.elasticsearch.search.aggregations;
import org.apache.lucene.index.CompositeReaderContext;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReaderContext;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
@ -31,8 +32,10 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
import org.elasticsearch.index.cache.bitset.BitsetFilterCache.Listener;
@ -48,6 +51,7 @@ import org.elasticsearch.index.mapper.ObjectMapper;
import org.elasticsearch.index.mapper.ObjectMapper.Nested;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.query.support.NestedScope;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
@ -289,4 +293,8 @@ public abstract class AggregatorTestCase extends ESTestCase {
return "ShardSearcher(" + ctx.get(0) + ")";
}
}
protected static DirectoryReader wrap(DirectoryReader directoryReader) throws IOException {
return ElasticsearchDirectoryReader.wrap(directoryReader, new ShardId(new Index("_index", "_na_"), 0));
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.ESTestCase;
@ -176,4 +177,22 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
return INT_FIELD_NAME;
}
}
protected void randomFieldOrScript(ValuesSourceAggregationBuilder<?, ?> factory, String field) {
int choice = randomInt(2);
switch (choice) {
case 0:
factory.field(field);
break;
case 1:
factory.field(field);
factory.script(mockScript("_value + 1"));
break;
case 2:
factory.script(mockScript("doc[" + field + "] + 1"));
break;
default:
throw new AssertionError("Unknow random operation [" + choice + "]");
}
}
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.RegExp;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTermsAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare;
@ -34,6 +33,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Scrip
import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import java.util.SortedSet;
import java.util.TreeSet;
@ -54,21 +54,8 @@ public class SignificantTermsTests extends BaseAggregationTestCase<SignificantTe
String name = randomAlphaOfLengthBetween(3, 20);
SignificantTermsAggregationBuilder factory = new SignificantTermsAggregationBuilder(name, null);
String field = randomAlphaOfLengthBetween(3, 20);
int randomFieldBranch = randomInt(2);
switch (randomFieldBranch) {
case 0:
factory.field(field);
break;
case 1:
factory.field(field);
factory.script(new Script("_value + 1"));
break;
case 2:
factory.script(new Script("doc[" + field + "] + 1"));
break;
default:
fail();
}
randomFieldOrScript(factory, field);
if (randomBoolean()) {
factory.missing("MISSING");
}
@ -179,7 +166,7 @@ public class SignificantTermsTests extends BaseAggregationTestCase<SignificantTe
significanceHeuristic = new MutualInformation(randomBoolean(), randomBoolean());
break;
case 4:
significanceHeuristic = new ScriptHeuristic(new Script("foo"));
significanceHeuristic = new ScriptHeuristic(mockScript("foo"));
break;
case 5:
significanceHeuristic = new JLHScore();

View File

@ -21,13 +21,13 @@ package org.elasticsearch.search.aggregations.bucket;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.automaton.RegExp;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.terms.Terms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory.ExecutionMode;
import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude;
import java.util.ArrayList;
import java.util.List;
import java.util.SortedSet;
@ -50,21 +50,7 @@ public class TermsTests extends BaseAggregationTestCase<TermsAggregationBuilder>
String name = randomAlphaOfLengthBetween(3, 20);
TermsAggregationBuilder factory = new TermsAggregationBuilder(name, null);
String field = randomAlphaOfLengthBetween(3, 20);
int randomFieldBranch = randomInt(2);
switch (randomFieldBranch) {
case 0:
factory.field(field);
break;
case 1:
factory.field(field);
factory.script(new Script("_value + 1"));
break;
case 2:
factory.script(new Script("doc[" + field + "] + 1"));
break;
default:
fail();
}
randomFieldOrScript(factory, field);
if (randomBoolean()) {
factory.missing("MISSING");
}

View File

@ -65,7 +65,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
// intentionally not writing any docs
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME)
@ -75,7 +75,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(NESTED_AGG, nested.getName());
@ -112,7 +112,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
}
iw.commit();
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME)
@ -122,7 +122,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(expectedNestedDocs, nested.getDocCount());
@ -160,7 +160,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
}
iw.commit();
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT + "." + NESTED_OBJECT2);
MaxAggregationBuilder maxAgg = new MaxAggregationBuilder(MAX_AGG_NAME)
@ -171,7 +171,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(expectedNestedDocs, nested.getDocCount());
@ -213,7 +213,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
iw.addDocuments(documents);
iw.commit();
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
SumAggregationBuilder sumAgg = new SumAggregationBuilder(SUM_AGG_NAME)
@ -223,7 +223,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(expectedNestedDocs, nested.getDocCount());
@ -292,7 +292,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
iw.commit();
iw.close();
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
"nested_field");
@ -304,7 +304,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
bq.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST);
bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), BooleanClause.Occur.MUST_NOT);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new ConstantScoreQuery(bq.build()), nestedBuilder, fieldType);
assertEquals(NESTED_AGG, nested.getName());

View File

@ -54,7 +54,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
// intentionally not writing any docs
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
ReverseNestedAggregationBuilder reverseNestedBuilder
@ -67,7 +67,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
ReverseNested reverseNested = (ReverseNested)
((InternalAggregation)nested).getProperty(REVERSE_AGG_NAME);
@ -117,7 +117,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
}
iw.commit();
}
try (IndexReader indexReader = DirectoryReader.open(directory)) {
try (IndexReader indexReader = wrap(DirectoryReader.open(directory))) {
NestedAggregationBuilder nestedBuilder = new NestedAggregationBuilder(NESTED_AGG,
NESTED_OBJECT);
ReverseNestedAggregationBuilder reverseNestedBuilder
@ -130,7 +130,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
NumberFieldMapper.NumberType.LONG);
fieldType.setName(VALUE_FIELD_NAME);
Nested nested = search(newSearcher(indexReader, true, true),
Nested nested = search(newSearcher(indexReader, false, true),
new MatchAllDocsQuery(), nestedBuilder, fieldType);
assertEquals(expectedNestedDocs, nested.getDocCount());

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket.sampler;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.BaseAggregationTestCase;
import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregator.ExecutionMode;
@ -29,19 +28,7 @@ public class DiversifiedAggregationBuilderTests extends BaseAggregationTestCase<
protected final DiversifiedAggregationBuilder createTestAggregatorBuilder() {
DiversifiedAggregationBuilder factory = new DiversifiedAggregationBuilder("foo");
String field = randomNumericField();
int randomFieldBranch = randomInt(3);
switch (randomFieldBranch) {
case 0:
factory.field(field);
break;
case 1:
factory.field(field);
factory.script(new Script("_value + 1"));
break;
case 2:
factory.script(new Script("doc[" + field + "] + 1"));
break;
}
randomFieldOrScript(factory, field);
if (randomBoolean()) {
factory.missing("MISSING");
}

Some files were not shown because too many files have changed in this diff Show More