Merge branch 'master' into rankeval

This commit is contained in:
Christoph Büscher 2017-11-24 16:25:05 +01:00
commit 5661b1c3df
165 changed files with 2448 additions and 750 deletions

View File

@ -17,15 +17,15 @@
* under the License.
*/
import java.nio.file.Path
import java.util.regex.Matcher
import org.eclipse.jgit.lib.Repository
import org.eclipse.jgit.lib.RepositoryBuilder
import org.gradle.plugins.ide.eclipse.model.SourceFolder
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.VersionProperties
import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.VersionCollection
import org.elasticsearch.gradle.VersionProperties
import org.gradle.plugins.ide.eclipse.model.SourceFolder
import java.nio.file.Path
// common maven publishing configuration
subprojects {
@ -67,72 +67,16 @@ configure(subprojects.findAll { it.projectDir.toPath().startsWith(rootPath) }) {
}
}
/* Introspect all versions of ES that may be tested agains for backwards
/* Introspect all versions of ES that may be tested against for backwards
* compatibility. It is *super* important that this logic is the same as the
* logic in VersionUtils.java, throwing out alphas because they don't have any
* backwards compatibility guarantees and only keeping the latest beta or rc
* in a branch if there are only betas and rcs in the branch so we have
* *something* to test against. */
Version currentVersion = Version.fromString(VersionProperties.elasticsearch.minus('-SNAPSHOT'))
int prevMajor = currentVersion.major - 1
File versionFile = file('core/src/main/java/org/elasticsearch/Version.java')
List<String> versionLines = versionFile.readLines('UTF-8')
List<Version> versions = []
// keep track of the previous major version's last minor, so we know where wire compat begins
int prevMinorIndex = -1 // index in the versions list of the last minor from the prev major
int lastPrevMinor = -1 // the minor version number from the prev major we most recently seen
int prevBugfixIndex = -1 // index in the versions list of the last bugfix release from the prev major
for (String line : versionLines) {
/* Note that this skips alphas and betas which is fine because they aren't
* compatible with anything. */
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_beta\d+|_rc\d+)? .*/
if (match.matches()) {
int major = Integer.parseInt(match.group(1))
int minor = Integer.parseInt(match.group(2))
int bugfix = Integer.parseInt(match.group(3))
String suffix = (match.group(4) ?: '').replace('_', '-')
Version foundVersion = new Version(major, minor, bugfix, suffix, false)
if (currentVersion != foundVersion
&& (major == prevMajor || major == currentVersion.major)) {
if (versions.isEmpty() || versions.last() != foundVersion) {
versions.add(foundVersion)
} else {
// Replace the earlier betas with later ones
Version last = versions.set(versions.size() - 1, foundVersion)
if (last.suffix == '') {
throw new InvalidUserDataException("Found two equal versions but"
+ " the first one [$last] wasn't a beta.")
}
}
if (major == prevMajor && minor > lastPrevMinor) {
prevMinorIndex = versions.size() - 1
lastPrevMinor = minor
}
}
if (major == prevMajor) {
prevBugfixIndex = versions.size() - 1
}
}
}
if (versions.toSorted { it.id } != versions) {
println "Versions: ${versions}"
throw new GradleException("Versions.java contains out of order version constants")
}
if (prevBugfixIndex != -1) {
versions[prevBugfixIndex] = new Version(versions[prevBugfixIndex].major, versions[prevBugfixIndex].minor,
versions[prevBugfixIndex].bugfix, versions[prevBugfixIndex].suffix, true)
}
if (currentVersion.bugfix == 0) {
// If on a release branch, after the initial release of that branch, the bugfix version will
// be bumped, and will be != 0. On master and N.x branches, we want to test against the
// unreleased version of closest branch. So for those cases, the version includes -SNAPSHOT,
// and the bwc distribution will checkout and build that version.
Version last = versions[-1]
versions[-1] = new Version(last.major, last.minor, last.bugfix, last.suffix, true)
if (last.bugfix == 0) {
versions[-2] = new Version(
versions[-2].major, versions[-2].minor, versions[-2].bugfix, versions[-2].suffix, true)
}
VersionCollection versions = new VersionCollection(file('core/src/main/java/org/elasticsearch/Version.java').readLines('UTF-8'))
if (versions.currentVersion.toString() != VersionProperties.elasticsearch) {
throw new GradleException("The last version in Versions.java [${versions.currentVersion}] does not match " +
"VersionProperties.elasticsearch [${VersionProperties.elasticsearch}]")
}
// build metadata from previous build, contains eg hashes for bwc builds
@ -151,9 +95,10 @@ allprojects {
// for ide hacks...
isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
isIdea = System.getProperty("idea.active") != null || gradle.startParameter.taskNames.contains('idea') || gradle.startParameter.taskNames.contains('cleanIdea')
// for backcompat testing
indexCompatVersions = versions
wireCompatVersions = versions.subList(prevMinorIndex, versions.size())
// for BWC testing
versionCollection = versions
buildMetadata = buildMetadataMap
}
}
@ -171,13 +116,13 @@ task verifyVersions {
Set<Version> knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d\.\d\.\d/ }.collect { Version.fromString(it) })
// Limit the known versions to those that should be index compatible, and are not future versions
knownVersions = knownVersions.findAll { it.major >= prevMajor && it.before(VersionProperties.elasticsearch) }
knownVersions = knownVersions.findAll { it.major >= versions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) }
/* Limit the listed versions to those that have been marked as released.
* Versions not marked as released don't get the same testing and we want
* to make sure that we flip all unreleased versions to released as soon
* as possible after release. */
Set<Version> actualVersions = new TreeSet<>(indexCompatVersions.findAll { false == it.snapshot })
Set<Version> actualVersions = new TreeSet<>(versions.versionsIndexCompatibleWithCurrent.findAll { false == it.snapshot })
// Finally, compare!
if (knownVersions.equals(actualVersions) == false) {
@ -252,30 +197,17 @@ subprojects {
"org.elasticsearch.plugin:aggs-matrix-stats-client:${version}": ':modules:aggs-matrix-stats',
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
]
if (indexCompatVersions[-1].snapshot) {
/* The last and second to last versions can be snapshots. Rather than use
* snapshots built by CI we connect these versions to projects that build
* those those versions from the HEAD of the appropriate branch. */
if (indexCompatVersions[-1].bugfix == 0) {
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
if (indexCompatVersions.size() > 1) {
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
}
} else {
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
for (final Version version : versionCollection.versionsIndexCompatibleWithCurrent) {
if (version.branch != null) {
final String snapshotProject = ":distribution:bwc-snapshot-${version.branch}"
project(snapshotProject).ext.bwcVersion = version
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${version}"] = snapshotProject
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${version}"] = snapshotProject
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${version}"] = snapshotProject
}
} else if (indexCompatVersions[-2].snapshot) {
/* This is a terrible hack for the bump to 6.0.1 which will be fixed by #27397 */
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
}
project.afterEvaluate {
configurations.all {
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->

View File

@ -31,9 +31,10 @@ public class Version {
final int major
final int minor
final int bugfix
final int revision
final int id
final boolean snapshot
final String branch
/**
* Suffix on the version name. Unlike Version.java the build does not
* consider alphas and betas different versions, it just preserves the
@ -41,14 +42,15 @@ public class Version {
*/
final String suffix
public Version(int major, int minor, int bugfix,
String suffix, boolean snapshot) {
public Version(int major, int minor, int revision,
String suffix, boolean snapshot, String branch) {
this.major = major
this.minor = minor
this.bugfix = bugfix
this.revision = revision
this.snapshot = snapshot
this.suffix = suffix
this.id = major * 100000 + minor * 1000 + bugfix * 10 +
this.branch = branch
this.id = major * 100000 + minor * 1000 + revision * 10 +
(snapshot ? 1 : 0)
}
@ -58,13 +60,13 @@ public class Version {
throw new InvalidUserDataException("Invalid version [${s}]")
}
return new Version(m.group(1) as int, m.group(2) as int,
m.group(3) as int, m.group(4) ?: '', m.group(5) != null)
m.group(3) as int, m.group(4) ?: '', m.group(5) != null, null)
}
@Override
public String toString() {
String snapshotStr = snapshot ? '-SNAPSHOT' : ''
return "${major}.${minor}.${bugfix}${suffix}${snapshotStr}"
return "${major}.${minor}.${revision}${suffix}${snapshotStr}"
}
public boolean before(String compareTo) {
@ -82,4 +84,43 @@ public class Version {
public boolean after(String compareTo) {
return id > fromString(compareTo).id
}
public boolean onOrBeforeIncludingSuffix(Version otherVersion) {
if (id != otherVersion.id) {
return id < otherVersion.id
}
if (suffix == '') {
return otherVersion.suffix == ''
}
return otherVersion.suffix == '' || suffix < otherVersion.suffix
}
boolean equals(o) {
if (this.is(o)) return true
if (getClass() != o.class) return false
Version version = (Version) o
if (id != version.id) return false
if (major != version.major) return false
if (minor != version.minor) return false
if (revision != version.revision) return false
if (snapshot != version.snapshot) return false
if (suffix != version.suffix) return false
return true
}
int hashCode() {
int result
result = major
result = 31 * result + minor
result = 31 * result + revision
result = 31 * result + id
result = 31 * result + (snapshot ? 1 : 0)
result = 31 * result + (suffix != null ? suffix.hashCode() : 0)
return result
}
}

View File

@ -0,0 +1,185 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle
import org.gradle.api.GradleException
import java.util.regex.Matcher
/**
* The collection of version constants declared in Version.java, for use in BWC testing.
*/
class VersionCollection {
private final List<Version> versions
/**
* Construct a VersionCollection from the lines of the Version.java file.
* @param versionLines The lines of the Version.java file.
*/
VersionCollection(List<String> versionLines) {
List<Version> versions = []
for (final String line : versionLines) {
final Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_alpha\d+|_beta\d+|_rc\d+)? .*/
if (match.matches()) {
final Version foundVersion = new Version(
Integer.parseInt(match.group(1)), Integer.parseInt(match.group(2)),
Integer.parseInt(match.group(3)), (match.group(4) ?: '').replace('_', '-'), false, null)
if (versions.size() > 0 && foundVersion.onOrBeforeIncludingSuffix(versions[-1])) {
throw new GradleException("Versions.java contains out of order version constants:" +
" ${foundVersion} should come before ${versions[-1]}")
}
// Only keep the last alpha/beta/rc in the series
if (versions.size() > 0 && versions[-1].id == foundVersion.id) {
versions[-1] = foundVersion
} else {
versions.add(foundVersion)
}
}
}
if (versions.empty) {
throw new GradleException("Unexpectedly found no version constants in Versions.java");
}
// The tip of each minor series (>= 5.6) is unreleased, so set their 'snapshot' flags
Version prevConsideredVersion = null
boolean found6xSnapshot = false
for (final int versionIndex = versions.size() - 1; versionIndex >= 0; versionIndex--) {
final Version currConsideredVersion = versions[versionIndex]
if (prevConsideredVersion == null
|| currConsideredVersion.major != prevConsideredVersion.major
|| currConsideredVersion.minor != prevConsideredVersion.minor) {
// This is a snapshot version. Work out its branch. NB this doesn't name the current branch correctly, but this doesn't
// matter as we don't BWC test against it.
String branch = "${currConsideredVersion.major}.${currConsideredVersion.minor}"
if (false == found6xSnapshot && currConsideredVersion.major == 6) {
// TODO needs generalising to deal with when 7.x is cut, and when 6.x is deleted, and so on...
branch = "6.x"
found6xSnapshot = true
}
versions[versionIndex] = new Version(
currConsideredVersion.major, currConsideredVersion.minor,
currConsideredVersion.revision, currConsideredVersion.suffix, true, branch)
}
if (currConsideredVersion.onOrBefore("5.6.0")) {
break
}
prevConsideredVersion = currConsideredVersion
}
// If we're making a release build then the current should not be a snapshot after all.
final boolean currentIsSnapshot = "true" == System.getProperty("build.snapshot", "true")
if (false == currentIsSnapshot) {
versions[-1] = new Version(versions[-1].major, versions[-1].minor, versions[-1].revision, versions[-1].suffix, false, null)
}
this.versions = Collections.unmodifiableList(versions)
}
/**
* @return The list of versions read from the Version.java file
*/
List<Version> getVersions() {
return Collections.unmodifiableList(versions)
}
/**
* @return The latest version in the Version.java file, which must be the current version of the system.
*/
Version getCurrentVersion() {
return versions[-1]
}
/**
* @return The snapshot at the end of the previous minor series in the current major series, or null if this is the first minor series.
*/
Version getBWCSnapshotForCurrentMajor() {
return getLastSnapshotWithMajor(currentVersion.major)
}
/**
* @return The snapshot at the end of the previous major series, which must not be null.
*/
Version getBWCSnapshotForPreviousMajor() {
Version version = getLastSnapshotWithMajor(currentVersion.major - 1)
assert version != null : "getBWCSnapshotForPreviousMajor(): found no versions in the previous major"
return version
}
private Version getLastSnapshotWithMajor(int targetMajor) {
final String currentVersion = currentVersion.toString()
final int snapshotIndex = versions.findLastIndexOf {
it.major == targetMajor && it.before(currentVersion) && it.snapshot
}
return snapshotIndex == -1 ? null : versions[snapshotIndex]
}
private List<Version> versionsOnOrAfterExceptCurrent(Version minVersion) {
final String minVersionString = minVersion.toString()
return Collections.unmodifiableList(versions.findAll {
it.onOrAfter(minVersionString) && it != currentVersion
})
}
/**
* @return All earlier versions that should be tested for index BWC with the current version.
*/
List<Version> getVersionsIndexCompatibleWithCurrent() {
final Version firstVersionOfCurrentMajor = versions.find { it.major >= currentVersion.major - 1 }
return versionsOnOrAfterExceptCurrent(firstVersionOfCurrentMajor)
}
private Version getMinimumWireCompatibilityVersion() {
final int firstIndexOfThisMajor = versions.findIndexOf { it.major == currentVersion.major }
if (firstIndexOfThisMajor == 0) {
return versions[0]
}
final Version lastVersionOfEarlierMajor = versions[firstIndexOfThisMajor - 1]
return versions.find { it.major == lastVersionOfEarlierMajor.major && it.minor == lastVersionOfEarlierMajor.minor }
}
/**
* @return All earlier versions that should be tested for wire BWC with the current version.
*/
List<Version> getVersionsWireCompatibleWithCurrent() {
return versionsOnOrAfterExceptCurrent(minimumWireCompatibilityVersion)
}
/**
* `gradle check` does not run all BWC tests. This defines which tests it does run.
* @return Versions to test for BWC during gradle check.
*/
List<Version> getBasicIntegrationTestVersions() {
// TODO these are the versions checked by `gradle check` for BWC tests. Their choice seems a litle arbitrary.
List<Version> result = [BWCSnapshotForPreviousMajor, BWCSnapshotForCurrentMajor]
return Collections.unmodifiableList(result.findAll { it != null })
}
}

View File

@ -107,7 +107,8 @@ class VagrantTestPlugin implements Plugin<Project> {
if (upgradeFromVersion == null) {
String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0)
final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16)
upgradeFromVersion = project.indexCompatVersions[new Random(seed).nextInt(project.indexCompatVersions.size())]
final def indexCompatVersions = project.versionCollection.versionsIndexCompatibleWithCurrent
upgradeFromVersion = indexCompatVersions[new Random(seed).nextInt(indexCompatVersions.size())]
}
DISTRIBUTION_ARCHIVES.each {

View File

@ -1,4 +1,3 @@
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
elasticsearch = 7.0.0-alpha1
lucene = 7.1.0

View File

@ -264,7 +264,7 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
assertEquals(5, matrixStats.getFieldCount("num"));
assertEquals(56d, matrixStats.getMean("num"), 0d);
assertEquals(1830d, matrixStats.getVariance("num"), 0d);
assertEquals(0.09340198804973057, matrixStats.getSkewness("num"), 0d);
assertEquals(0.09340198804973046, matrixStats.getSkewness("num"), 0d);
assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d);
assertEquals(5, matrixStats.getFieldCount("num2"));
assertEquals(29d, matrixStats.getMean("num2"), 0d);

View File

@ -55,7 +55,7 @@ dependencies {
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
compile 'org.elasticsearch:securesm:1.1'
compile 'org.elasticsearch:securesm:1.2'
// utilities
compile "org.elasticsearch:elasticsearch-cli:${version}"

View File

@ -1 +0,0 @@
1e423447d020041534be94c0f31a49fbdc1f2950

View File

@ -0,0 +1 @@
4c28f5b634497d64b727961430a516f351a099d5

View File

@ -132,8 +132,9 @@ public class Version implements Comparable<Version> {
public static final Version V_6_0_1 =
new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_1_0_ID = 6010099;
public static final Version V_6_1_0 =
new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final Version V_6_1_0 = new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final int V_6_2_0_ID = 6020099;
public static final Version V_6_2_0 = new Version(V_6_2_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final int V_7_0_0_alpha1_ID = 7000001;
public static final Version V_7_0_0_alpha1 =
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
@ -154,6 +155,8 @@ public class Version implements Comparable<Version> {
return V_7_0_0_alpha1;
case V_6_1_0_ID:
return V_6_1_0;
case V_6_2_0_ID:
return V_6_2_0;
case V_6_0_1_ID:
return V_6_0_1;
case V_6_0_0_ID:

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.ActiveShardCount;
@ -40,6 +41,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
private ClusterHealthStatus waitForStatus;
private boolean waitForNoRelocatingShards = false;
private boolean waitForNoInitializingShards = false;
private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE;
private String waitForNodes = "";
private Priority waitForEvents = null;
@ -72,6 +74,9 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
if (in.readBoolean()) {
waitForEvents = Priority.readFrom(in);
}
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
waitForNoInitializingShards = in.readBoolean();
}
}
@Override
@ -101,6 +106,9 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
out.writeBoolean(true);
Priority.writeTo(waitForEvents, out);
}
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
out.writeBoolean(waitForNoInitializingShards);
}
}
@Override
@ -167,6 +175,21 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
return this;
}
public boolean waitForNoInitializingShards() {
return waitForNoInitializingShards;
}
/**
* Sets whether the request should wait for there to be no initializing shards before
* retrieving the cluster health status. Defaults to {@code false}, meaning the
* operation does not wait on there being no more initializing shards. Set to <code>true</code>
* to wait until the number of initializing shards in the cluster is 0.
*/
public ClusterHealthRequest waitForNoInitializingShards(boolean waitForNoInitializingShards) {
this.waitForNoInitializingShards = waitForNoInitializingShards;
return this;
}
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}

View File

@ -73,6 +73,17 @@ public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestB
return this;
}
/**
* Sets whether the request should wait for there to be no initializing shards before
* retrieving the cluster health status. Defaults to <code>false</code>, meaning the
* operation does not wait on there being no more initializing shards. Set to <code>true</code>
* to wait until the number of initializing shards in the cluster is 0.
*/
public ClusterHealthRequestBuilder setWaitForNoInitializingShards(boolean waitForNoInitializingShards) {
request.waitForNoInitializingShards(waitForNoInitializingShards);
return this;
}
/**
* Sets the number of shard copies that must be active before getting the health status.
* Defaults to {@link ActiveShardCount#NONE}, meaning we don't wait on any active shards.

View File

@ -142,24 +142,26 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
}
private void executeHealth(final ClusterHealthRequest request, final ActionListener<ClusterHealthResponse> listener) {
int waitFor = 5;
if (request.waitForStatus() == null) {
waitFor--;
int waitFor = 0;
if (request.waitForStatus() != null) {
waitFor++;
}
if (request.waitForNoRelocatingShards() == false) {
waitFor--;
if (request.waitForNoRelocatingShards()) {
waitFor++;
}
if (request.waitForActiveShards().equals(ActiveShardCount.NONE)) {
waitFor--;
if (request.waitForNoInitializingShards()) {
waitFor++;
}
if (request.waitForNodes().isEmpty()) {
waitFor--;
if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) {
waitFor++;
}
if (request.indices() == null || request.indices().length == 0) { // check that they actually exists in the meta data
waitFor--;
if (request.waitForNodes().isEmpty() == false) {
waitFor++;
}
if (request.indices() != null && request.indices().length > 0) { // check that they actually exists in the meta data
waitFor++;
}
assert waitFor >= 0;
final ClusterState state = clusterService.state();
final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext());
if (request.timeout().millis() == 0) {
@ -196,13 +198,15 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
private boolean validateRequest(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor) {
ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(),
gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime());
return prepareResponse(request, response, clusterState, waitFor);
int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver);
return readyCounter == waitFor;
}
private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor, boolean timedOut) {
ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(),
gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime());
boolean valid = prepareResponse(request, response, clusterState, waitFor);
int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver);
boolean valid = (readyCounter == waitFor);
assert valid || timedOut;
// we check for a timeout here since this method might be called from the wait_for_events
// response handler which might have timed out already.
@ -213,7 +217,8 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
return response;
}
private boolean prepareResponse(final ClusterHealthRequest request, final ClusterHealthResponse response, ClusterState clusterState, final int waitFor) {
static int prepareResponse(final ClusterHealthRequest request, final ClusterHealthResponse response,
final ClusterState clusterState, final IndexNameExpressionResolver indexNameExpressionResolver) {
int waitForCounter = 0;
if (request.waitForStatus() != null && response.getStatus().value() <= request.waitForStatus().value()) {
waitForCounter++;
@ -221,6 +226,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
if (request.waitForNoRelocatingShards() && response.getRelocatingShards() == 0) {
waitForCounter++;
}
if (request.waitForNoInitializingShards() && response.getInitializingShards() == 0) {
waitForCounter++;
}
if (request.waitForActiveShards().equals(ActiveShardCount.NONE) == false) {
ActiveShardCount waitForActiveShards = request.waitForActiveShards();
assert waitForActiveShards.equals(ActiveShardCount.DEFAULT) == false :
@ -292,7 +300,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
}
}
}
return waitForCounter == waitFor;
return waitForCounter;
}

View File

@ -163,9 +163,14 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
if (IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) {
throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index");
}
if (IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) {
throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize");
// if we have a source index with 1 shards it's legal to set this
final boolean splitFromSingleShards = resizeRequest.getResizeType() == ResizeType.SPLIT && metaData.getNumberOfShards() == 1;
if (splitFromSingleShards == false) {
throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize");
}
}
String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index";
targetIndex.cause(cause);

View File

@ -361,8 +361,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
shardFailures[i] = readShardSearchFailure(in);
}
}
//TODO update version once backported
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
clusters = new Clusters(in);
} else {
clusters = Clusters.EMPTY;
@ -385,8 +384,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
for (ShardSearchFailure shardSearchFailure : shardFailures) {
shardSearchFailure.writeTo(out);
}
//TODO update version once backported
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
clusters.writeTo(out);
}
out.writeOptionalString(scrollId);

View File

@ -19,9 +19,8 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.Build;
import org.elasticsearch.SecureSM;
import org.elasticsearch.Version;
import org.elasticsearch.cli.Command;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.network.NetworkModule;
@ -120,7 +119,8 @@ final class Security {
Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults));
// enable security manager
System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap.", "org.elasticsearch.cli" }));
final String[] classesThatCanExit = new String[]{ElasticsearchUncaughtExceptionHandler.class.getName(), Command.class.getName()};
System.setSecurityManager(new SecureSM(classesThatCanExit));
// do some basic tests
selfTest();

View File

@ -1333,25 +1333,33 @@ public class IndexMetaData implements Diffable<IndexMetaData>, ToXContentFragmen
* @return a the source shard ID to split off from
*/
public static ShardId selectSplitShard(int shardId, IndexMetaData sourceIndexMetadata, int numTargetShards) {
int numSourceShards = sourceIndexMetadata.getNumberOfShards();
if (shardId >= numTargetShards) {
throw new IllegalArgumentException("the number of target shards (" + numTargetShards + ") must be greater than the shard id: "
+ shardId);
}
int numSourceShards = sourceIndexMetadata.getNumberOfShards();
final int routingFactor = getRoutingFactor(numSourceShards, numTargetShards);
assertSplitMetadata(numSourceShards, numTargetShards, sourceIndexMetadata);
return new ShardId(sourceIndexMetadata.getIndex(), shardId/routingFactor);
}
private static void assertSplitMetadata(int numSourceShards, int numTargetShards, IndexMetaData sourceIndexMetadata) {
if (numSourceShards > numTargetShards) {
throw new IllegalArgumentException("the number of source shards [" + numSourceShards
+ "] must be less that the number of target shards [" + numTargetShards + "]");
+ "] must be less that the number of target shards [" + numTargetShards + "]");
}
int routingFactor = getRoutingFactor(numSourceShards, numTargetShards);
// now we verify that the numRoutingShards is valid in the source index
int routingNumShards = sourceIndexMetadata.getRoutingNumShards();
// note: if the number of shards is 1 in the source index we can just assume it's correct since from 1 we can split into anything
// this is important to special case here since we use this to validate this in various places in the code but allow to split form
// 1 to N but we never modify the sourceIndexMetadata to accommodate for that
int routingNumShards = numSourceShards == 1 ? numTargetShards : sourceIndexMetadata.getRoutingNumShards();
if (routingNumShards % numTargetShards != 0) {
throw new IllegalStateException("the number of routing shards ["
+ routingNumShards + "] must be a multiple of the target shards [" + numTargetShards + "]");
}
// this is just an additional assertion that ensures we are a factor of the routing num shards.
assert getRoutingFactor(numTargetShards, sourceIndexMetadata.getRoutingNumShards()) >= 0;
return new ShardId(sourceIndexMetadata.getIndex(), shardId/routingFactor);
assert sourceIndexMetadata.getNumberOfShards() == 1 // special case - we can split into anything from 1 shard
|| getRoutingFactor(numTargetShards, routingNumShards) >= 0;
}
/**

View File

@ -379,15 +379,24 @@ public class MetaDataCreateIndexService extends AbstractComponent {
indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
final IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index());
final Settings idxSettings = indexSettingsBuilder.build();
int numTargetShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(idxSettings);
final int routingNumShards;
if (recoverFromIndex == null) {
Settings idxSettings = indexSettingsBuilder.build();
routingNumShards = IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(idxSettings);
final Version indexVersionCreated = idxSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
final IndexMetaData sourceMetaData = recoverFromIndex == null ? null :
currentState.metaData().getIndexSafe(recoverFromIndex);
if (sourceMetaData == null || sourceMetaData.getNumberOfShards() == 1) {
// in this case we either have no index to recover from or
// we have a source index with 1 shard and without an explicit split factor
// or one that is valid in that case we can split into whatever and auto-generate a new factor.
if (IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(idxSettings)) {
routingNumShards = IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(idxSettings);
} else {
routingNumShards = calculateNumRoutingShards(numTargetShards, indexVersionCreated);
}
} else {
assert IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(indexSettingsBuilder.build()) == false
: "index.number_of_routing_shards should be present on the target index on resize";
final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(recoverFromIndex);
: "index.number_of_routing_shards should not be present on the target index on resize";
routingNumShards = sourceMetaData.getRoutingNumShards();
}
// remove the setting it's temporary and is only relevant once we create the index
@ -408,7 +417,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
* the maximum primary term on all the shards in the source index. This ensures that we have correct
* document-level semantics regarding sequence numbers in the shrunken index.
*/
final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(recoverFromIndex);
final long primaryTerm =
IntStream
.range(0, sourceMetaData.getNumberOfShards())
@ -717,4 +725,27 @@ public class MetaDataCreateIndexService extends AbstractComponent {
.put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName())
.put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID());
}
/**
* Returns a default number of routing shards based on the number of shards of the index. The default number of routing shards will
* allow any index to be split at least once and at most 10 times by a factor of two. The closer the number or shards gets to 1024
* the less default split operations are supported
*/
public static int calculateNumRoutingShards(int numShards, Version indexVersionCreated) {
if (indexVersionCreated.onOrAfter(Version.V_7_0_0_alpha1)) {
// only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour
// until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters
// will always have the behavior of the min node in the cluster.
//
// We use as a default number of routing shards the higher number that can be expressed
// as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024.
int log2MaxNumShards = 10; // logBase2(1024)
int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards))
int numSplits = log2MaxNumShards - log2NumShards;
numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once
return numShards * 1 << numSplits;
} else {
return numShards;
}
}
}

View File

@ -41,7 +41,7 @@ public class CoordinateNode implements ToXContentObject {
* @param coordinate
* Coordinate for the Node
*/
protected CoordinateNode(Coordinate coordinate) {
CoordinateNode(Coordinate coordinate) {
this.coordinate = coordinate;
this.children = null;
}
@ -52,7 +52,7 @@ public class CoordinateNode implements ToXContentObject {
* @param children
* Children of the Node
*/
protected CoordinateNode(List<CoordinateNode> children) {
CoordinateNode(List<CoordinateNode> children) {
this.children = children;
this.coordinate = null;
}
@ -61,10 +61,6 @@ public class CoordinateNode implements ToXContentObject {
return (coordinate == null && (children == null || children.isEmpty()));
}
public boolean isMultiPoint() {
return children != null && children.size() > 1;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (children == null) {

View File

@ -694,9 +694,9 @@ public abstract class StreamInput extends InputStream {
* assumed that the stream first contains a variable-length integer representing the size of the array, and then contains that many
* elements that can be read from the stream.
*
* @param reader the reader used to read individual elements
* @param reader the reader used to read individual elements
* @param arraySupplier a supplier used to construct a new array
* @param <T> the type of the elements of the array
* @param <T> the type of the elements of the array
* @return an array read from the stream
* @throws IOException if an I/O exception occurs while reading the array
*/

View File

@ -713,8 +713,8 @@ public abstract class StreamOutput extends OutputStream {
* integer is first written to the stream, and then the elements of the array are written to the stream.
*
* @param writer the writer used to write individual elements
* @param array the array
* @param <T> the type of the elements of the array
* @param array the array
* @param <T> the type of the elements of the array
* @throws IOException if an I/O exception occurs while writing the array
*/
public <T> void writeArray(final Writer<T> writer, final T[] array) throws IOException {

View File

@ -25,13 +25,16 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.ConstantScoreQuery;
import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.PrefixQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import java.util.List;
@ -62,12 +65,19 @@ public class Queries {
return new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")));
}
public static Query newNonNestedFilter() {
// TODO: this is slow, make it a positive query
return new BooleanQuery.Builder()
/**
* Creates a new non-nested docs query
* @param indexVersionCreated the index version created since newer indices can identify a parent field more efficiently
*/
public static Query newNonNestedFilter(Version indexVersionCreated) {
if (indexVersionCreated.onOrAfter(Version.V_6_1_0)) {
return new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME);
} else {
return new BooleanQuery.Builder()
.add(new MatchAllDocsQuery(), Occur.FILTER)
.add(newNestedFilter(), Occur.MUST_NOT)
.build();
}
}
public static BooleanQuery filtered(@Nullable Query query, @Nullable Query filter) {

View File

@ -141,6 +141,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING,
MapperService.INDEX_MAPPER_DYNAMIC_SETTING,
MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING,
MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING,
MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING,
MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING,
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,

View File

@ -43,7 +43,14 @@ public abstract class ConcurrentCollections {
* Creates a new CHM with an aggressive concurrency level, aimed at high concurrent update rate long living maps.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentMapWithAggressiveConcurrency() {
return new ConcurrentHashMap<>(16, 0.75f, aggressiveConcurrencyLevel);
return newConcurrentMapWithAggressiveConcurrency(16);
}
/**
* Creates a new CHM with an aggressive concurrency level, aimed at high concurrent update rate long living maps.
*/
public static <K, V> ConcurrentMap<K, V> newConcurrentMapWithAggressiveConcurrency(int initalCapacity) {
return new ConcurrentHashMap<>(initalCapacity, 0.75f, aggressiveConcurrencyLevel);
}
public static <K, V> ConcurrentMap<K, V> newConcurrentMap() {

View File

@ -300,6 +300,7 @@ public final class AnalysisRegistry implements Closeable {
};
}
@SuppressWarnings("unchecked")
private <T> Map<String, T> buildMapping(Component component, IndexSettings settings, Map<String, Settings> settingsMap,
Map<String, ? extends AnalysisModule.AnalysisProvider<T>> providerMap,
Map<String, ? extends AnalysisModule.AnalysisProvider<T>> defaultInstance) throws IOException {

View File

@ -249,7 +249,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements I
}
if (hasNested) {
warmUp.add(Queries.newNonNestedFilter());
warmUp.add(Queries.newNonNestedFilter(indexSettings.getIndexVersionCreated()));
}
final CountDownLatch latch = new CountDownLatch(searcher.reader().leaves().size() * warmUp.size());

View File

@ -71,12 +71,18 @@ class CombinedDeletionPolicy extends IndexDeletionPolicy {
}
private void setLastCommittedTranslogGeneration(List<? extends IndexCommit> commits) throws IOException {
// when opening an existing lucene index, we currently always open the last commit.
// we therefore use the translog gen as the one that will be required for recovery
final IndexCommit indexCommit = commits.get(commits.size() - 1);
assert indexCommit.isDeleted() == false : "last commit is deleted";
long minGen = Long.parseLong(indexCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY));
translogDeletionPolicy.setMinTranslogGenerationForRecovery(minGen);
// We need to keep translog since the smallest translog generation of un-deleted commits.
// However, there are commits that are not deleted just because they are being snapshotted (rather than being kept by the policy).
// TODO: We need to distinguish those commits and skip them in calculating the minimum required translog generation.
long minRequiredGen = Long.MAX_VALUE;
for (IndexCommit indexCommit : commits) {
if (indexCommit.isDeleted() == false) {
long translogGen = Long.parseLong(indexCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY));
minRequiredGen = Math.min(translogGen, minRequiredGen);
}
}
assert minRequiredGen != Long.MAX_VALUE : "All commits are deleted";
translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredGen);
}
public SnapshotDeletionPolicy getIndexDeletionPolicy() {

View File

@ -102,7 +102,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
// map. While reopen is running, any lookup will first
// try this new map, then fallback to old, then to the
// current searcher:
maps = new Maps(ConcurrentCollections.<BytesRef,VersionValue>newConcurrentMapWithAggressiveConcurrency(), maps.current);
maps = new Maps(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(maps.current.size()), maps.current);
// This is not 100% correct, since concurrent indexing ops can change these counters in between our execution of the previous
// line and this one, but that should be minor, and the error won't accumulate over time:
@ -117,7 +117,7 @@ class LiveVersionMap implements ReferenceManager.RefreshListener, Accountable {
// case. This is because we assign new maps (in beforeRefresh) slightly before Lucene actually flushes any segments for the
// reopen, and so any concurrent indexing requests can still sneak in a few additions to that current map that are in fact reflected
// in the previous reader. We don't touch tombstones here: they expire on their own index.gc_deletes timeframe:
maps = new Maps(maps.current, ConcurrentCollections.<BytesRef,VersionValue>newConcurrentMapWithAggressiveConcurrency());
maps = new Maps(maps.current, Collections.emptyMap());
}
/** Returns the live version (add or delete) for this uid. */

View File

@ -473,8 +473,8 @@ public class DateFieldMapper extends FieldMapper {
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
final DateFieldMapper other = (DateFieldMapper) mergeWith;
super.doMerge(mergeWith, updateAllTypes);
final DateFieldMapper other = (DateFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed;
}

View File

@ -56,7 +56,7 @@ public class DocumentMapper implements ToXContentFragment {
private final RootObjectMapper rootObjectMapper;
private Map<String, Object> meta = emptyMap();
private Map<String, Object> meta;
private final Mapper.BuilderContext builderContext;

View File

@ -92,6 +92,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
public static final String DEFAULT_MAPPING = "_default_";
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
// maximum allowed number of nested json objects across all fields in a single document
public static final Setting<Long> INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING =
Setting.longSetting("index.mapping.nested_objects.limit", 10000L, 0, Property.Dynamic, Property.IndexScope);
public static final Setting<Long> INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING =
Setting.longSetting("index.mapping.total_fields.limit", 1000L, 0, Property.Dynamic, Property.IndexScope);
public static final Setting<Long> INDEX_MAPPING_DEPTH_LIMIT_SETTING =

View File

@ -98,7 +98,8 @@ public final class Mapping implements ToXContentFragment {
}
mergedMetaDataMappers.put(merged.getClass(), merged);
}
return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta);
Map<String, Object> mergedMeta = mergeWith.meta == null ? meta : mergeWith.meta;
return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergedMeta);
}
/**
@ -128,7 +129,7 @@ public final class Mapping implements ToXContentFragment {
root.toXContent(builder, params, new ToXContent() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (meta != null && !meta.isEmpty()) {
if (meta != null) {
builder.field("_meta", meta);
}
for (Mapper mapper : metadataMappers) {

View File

@ -305,6 +305,10 @@ public abstract class ParseContext {
private SeqNoFieldMapper.SequenceIDFields seqID;
private final long maxAllowedNumNestedDocs;
private long numNestedDocs;
private final List<Mapper> dynamicMappers;
@ -321,6 +325,8 @@ public abstract class ParseContext {
this.version = null;
this.sourceToParse = source;
this.dynamicMappers = new ArrayList<>();
this.maxAllowedNumNestedDocs = MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.get(indexSettings);
this.numNestedDocs = 0L;
}
@Override
@ -366,6 +372,13 @@ public abstract class ParseContext {
@Override
protected void addDoc(Document doc) {
numNestedDocs ++;
if (numNestedDocs > maxAllowedNumNestedDocs) {
throw new MapperParsingException(
"The number of nested documents has exceeded the allowed limit of [" + maxAllowedNumNestedDocs + "]."
+ " This limit can be set by changing the [" + MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey()
+ "] index level setting.");
}
this.documents.add(doc);
}

View File

@ -28,6 +28,7 @@ import org.apache.lucene.search.DocValuesFieldExistsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -252,11 +253,17 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
// we share the parent docs fields to ensure good compression
SequenceIDFields seqID = context.seqID();
assert seqID != null;
for (int i = 1; i < context.docs().size(); i++) {
int numDocs = context.docs().size();
final Version versionCreated = context.mapperService().getIndexSettings().getIndexVersionCreated();
final boolean includePrimaryTerm = versionCreated.before(Version.V_6_1_0);
for (int i = 1; i < numDocs; i++) {
final Document doc = context.docs().get(i);
doc.add(seqID.seqNo);
doc.add(seqID.seqNoDocValue);
doc.add(seqID.primaryTerm);
if (includePrimaryTerm) {
// primary terms are used to distinguish between parent and nested docs since 6.1.0
doc.add(seqID.primaryTerm);
}
}
}

View File

@ -156,7 +156,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
.anyMatch(indexType::equals)) {
if (context.getMapperService().hasNested()) {
// type filters are expected not to match nested docs
return Queries.newNonNestedFilter();
return Queries.newNonNestedFilter(context.indexVersionCreated());
} else {
return new MatchAllDocsQuery();
}

View File

@ -282,7 +282,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
Query innerQuery;
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
if (objectMapper == null) {
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter());
parentFilter = context.bitsetFilter(Queries.newNonNestedFilter(context.indexVersionCreated()));
} else {
parentFilter = context.bitsetFilter(objectMapper.nestedTypeFilter());
}
@ -377,7 +377,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
SearchHit hit = hits[i];
Query rawParentFilter;
if (parentObjectMapper == null) {
rawParentFilter = Queries.newNonNestedFilter();
rawParentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated());
} else {
rawParentFilter = parentObjectMapper.nestedTypeFilter();
}

View File

@ -71,7 +71,7 @@ final class ShardSplittingQuery extends Query {
}
this.indexMetaData = indexMetaData;
this.shardId = shardId;
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer() : null;
this.nestedParentBitSetProducer = hasNested ? newParentDocBitSetProducer(indexMetaData.getCreationVersion()) : null;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) {
@ -336,9 +336,9 @@ final class ShardSplittingQuery extends Query {
* than once. There is no point in using BitsetFilterCache#BitSetProducerWarmer since we use this only as a delete by query which is
* executed on a recovery-private index writer. There is no point in caching it and it won't have a cache hit either.
*/
private static BitSetProducer newParentDocBitSetProducer() {
private static BitSetProducer newParentDocBitSetProducer(Version indexVersionCreated) {
return context -> {
Query query = Queries.newNonNestedFilter();
Query query = Queries.newNonNestedFilter(indexVersionCreated);
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);

View File

@ -731,7 +731,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
/**
* Represents a snapshot of the current directory build from the latest Lucene commit.
* Only files that are part of the last commit are considered in this datastrucutre.
* Only files that are part of the last commit are considered in this datastructure.
* For backwards compatibility the snapshot might include legacy checksums that
* are derived from a dedicated checksum file written by older elasticsearch version pre 1.3
* <p>

View File

@ -35,7 +35,6 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.ResourceNotFoundException;
@ -110,6 +109,7 @@ import java.nio.file.DirectoryNotEmptyException;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.NoSuchFileException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
@ -1451,6 +1451,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
SnapshotFiles snapshotFiles = new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles());
Store.MetadataSnapshot recoveryTargetMetadata;
try {
// this will throw an IOException if the store has no segments infos file. The
// store can still have existing files but they will be deleted just before being
// restored.
recoveryTargetMetadata = targetShard.snapshotStoreMetadata();
} catch (IndexNotFoundException e) {
// happens when restore to an empty shard, not a big deal
@ -1478,7 +1481,14 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata());
fileInfos.put(fileInfo.metadata().name(), fileInfo);
}
final Store.MetadataSnapshot sourceMetaData = new Store.MetadataSnapshot(unmodifiableMap(snapshotMetaData), emptyMap(), 0);
final StoreFileMetaData restoredSegmentsFile = sourceMetaData.getSegmentsFile();
if (restoredSegmentsFile == null) {
throw new IndexShardRestoreFailedException(shardId, "Snapshot has no segments file");
}
final Store.RecoveryDiff diff = sourceMetaData.recoveryDiff(recoveryTargetMetadata);
for (StoreFileMetaData md : diff.identical) {
BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfos.get(md.name());
@ -1505,29 +1515,31 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
logger.trace("no files to recover, all exists within the local store");
}
if (logger.isTraceEnabled()) {
logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId,
index.totalRecoverFiles(), new ByteSizeValue(index.totalRecoverBytes()), index.reusedFileCount(), new ByteSizeValue(index.reusedFileCount()));
}
try {
// first, delete pre-existing files in the store that have the same name but are
// different (i.e. different length/checksum) from those being restored in the snapshot
for (final StoreFileMetaData storeFileMetaData : diff.different) {
IOUtils.deleteFiles(store.directory(), storeFileMetaData.name());
}
// list of all existing store files
final List<String> deleteIfExistFiles = Arrays.asList(store.directory().listAll());
// restore the files from the snapshot to the Lucene store
for (final BlobStoreIndexShardSnapshot.FileInfo fileToRecover : filesToRecover) {
// if a file with a same physical name already exist in the store we need to delete it
// before restoring it from the snapshot. We could be lenient and try to reuse the existing
// store files (and compare their names/length/checksum again with the snapshot files) but to
// avoid extra complexity we simply delete them and restore them again like StoreRecovery
// does with dangling indices. Any existing store file that is not restored from the snapshot
// will be clean up by RecoveryTarget.cleanFiles().
final String physicalName = fileToRecover.physicalName();
if (deleteIfExistFiles.contains(physicalName)) {
logger.trace("[{}] [{}] deleting pre-existing file [{}]", shardId, snapshotId, physicalName);
store.directory().deleteFile(physicalName);
}
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
restoreFile(fileToRecover, store);
}
} catch (IOException ex) {
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", ex);
}
final StoreFileMetaData restoredSegmentsFile = sourceMetaData.getSegmentsFile();
if (recoveryTargetMetadata == null) {
throw new IndexShardRestoreFailedException(shardId, "Snapshot has no segments file");
}
assert restoredSegmentsFile != null;
// read the snapshot data persisted
final SegmentInfos segmentCommitInfos;
try {
@ -1602,5 +1614,4 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
}
}
}
}

View File

@ -62,7 +62,9 @@ public class RestClusterHealthAction extends BaseRestHandler {
clusterHealthRequest.waitForStatus(ClusterHealthStatus.valueOf(waitForStatus.toUpperCase(Locale.ROOT)));
}
clusterHealthRequest.waitForNoRelocatingShards(
request.paramAsBoolean("wait_for_no_relocating_shards", clusterHealthRequest.waitForNoRelocatingShards()));
request.paramAsBoolean("wait_for_no_relocating_shards", clusterHealthRequest.waitForNoRelocatingShards()));
clusterHealthRequest.waitForNoInitializingShards(
request.paramAsBoolean("wait_for_no_initializing_shards", clusterHealthRequest.waitForNoRelocatingShards()));
if (request.hasParam("wait_for_relocating_shards")) {
// wait_for_relocating_shards has been removed in favor of wait_for_no_relocating_shards
throw new IllegalArgumentException("wait_for_relocating_shards has been removed, " +

View File

@ -31,6 +31,7 @@ import org.elasticsearch.rest.action.RestActions;
import org.elasticsearch.rest.action.RestStatusToXContentListener;
import java.io.IOException;
import java.util.Locale;
import static org.elasticsearch.rest.RestRequest.Method.POST;
import static org.elasticsearch.rest.RestRequest.Method.PUT;
@ -63,9 +64,16 @@ public class RestIndexAction extends BaseRestHandler {
@Override
public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException {
validateOpType(request.params().get("op_type"));
request.params().put("op_type", "create");
return RestIndexAction.this.prepareRequest(request, client);
}
void validateOpType(String opType) {
if (null != opType && false == "create".equals(opType.toLowerCase(Locale.ROOT))) {
throw new IllegalArgumentException("opType must be 'create', found: [" + opType + "]");
}
}
}
@Override

View File

@ -270,7 +270,7 @@ final class DefaultSearchContext extends SearchContext {
&& typeFilter == null // when a _type filter is set, it will automatically exclude nested docs
&& new NestedHelper(mapperService()).mightMatchNestedDocs(query)
&& (aliasFilter == null || new NestedHelper(mapperService()).mightMatchNestedDocs(aliasFilter))) {
filters.add(Queries.newNonNestedFilter());
filters.add(Queries.newNonNestedFilter(mapperService().getIndexSettings().getIndexVersionCreated()));
}
if (aliasFilter != null) {

View File

@ -62,7 +62,9 @@ class NestedAggregator extends BucketsAggregator implements SingleBucketAggregat
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
boolean collectsFromSingleBucket) throws IOException {
super(name, factories, context, parentAggregator, pipelineAggregators, metaData);
Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter() : Queries.newNonNestedFilter();
Query parentFilter = parentObjectMapper != null ? parentObjectMapper.nestedTypeFilter()
: Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated());
this.parentFilter = context.bitsetFilterCache().getBitSetProducer(parentFilter);
this.childFilter = childObjectMapper.nestedTypeFilter();
this.collectsFromSingleBucket = collectsFromSingleBucket;

View File

@ -54,7 +54,7 @@ public class ReverseNestedAggregator extends BucketsAggregator implements Single
throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
if (objectMapper == null) {
parentFilter = Queries.newNonNestedFilter();
parentFilter = Queries.newNonNestedFilter(context.mapperService().getIndexSettings().getIndexVersionCreated());
} else {
parentFilter = objectMapper.nestedTypeFilter();
}

View File

@ -181,7 +181,9 @@ public class FetchPhase implements SearchPhase {
private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException {
if (context.mapperService().hasNested()) {
BitSet bits = context.bitsetFilterCache().getBitSetProducer(Queries.newNonNestedFilter()).getBitSet(subReaderContext);
BitSet bits = context.bitsetFilterCache()
.getBitSetProducer(Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated()))
.getBitSet(subReaderContext);
if (!bits.get(subDocId)) {
return bits.nextSetBit(subDocId);
}
@ -345,7 +347,7 @@ public class FetchPhase implements SearchPhase {
}
parentFilter = nestedParentObjectMapper.nestedTypeFilter();
} else {
parentFilter = Queries.newNonNestedFilter();
parentFilter = Queries.newNonNestedFilter(context.indexShard().indexSettings().getIndexVersionCreated());
}
Query childFilter = nestedObjectMapper.nestedTypeFilter();

View File

@ -162,12 +162,12 @@ public class QueryPhase implements SearchPhase {
searchContext.terminateAfter(searchContext.size());
searchContext.trackTotalHits(false);
} else if (canEarlyTerminate(indexSort, searchContext)) {
// now this gets interesting: since the index sort matches the search sort, we can directly
// now this gets interesting: since the search sort is a prefix of the index sort, we can directly
// skip to the desired doc
if (after != null) {
BooleanQuery bq = new BooleanQuery.Builder()
.add(query, BooleanClause.Occur.MUST)
.add(new SearchAfterSortedDocQuery(indexSort, (FieldDoc) after), BooleanClause.Occur.FILTER)
.add(new SearchAfterSortedDocQuery(searchContext.sort().sort, (FieldDoc) after), BooleanClause.Occur.FILTER)
.build();
query = bq;
}

View File

@ -212,7 +212,7 @@ public abstract class SortBuilder<T extends SortBuilder<T>> implements NamedWrit
Query parentQuery;
ObjectMapper objectMapper = context.nestedScope().getObjectMapper();
if (objectMapper == null) {
parentQuery = Queries.newNonNestedFilter();
parentQuery = Queries.newNonNestedFilter(context.indexVersionCreated());
} else {
parentQuery = objectMapper.nestedTypeFilter();
}

View File

@ -31,6 +31,8 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
@ -45,6 +47,9 @@ import java.util.function.Consumer;
public final class DirectCandidateGeneratorBuilder implements CandidateGenerator {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(
Loggers.getLogger(DirectCandidateGeneratorBuilder.class));
private static final String TYPE = "direct_generator";
public static final ParseField DIRECT_GENERATOR_FIELD = new ParseField(TYPE);
@ -211,8 +216,8 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator
* string distance for terms inside the index.
* <li><code>damerau_levenshtein</code> - String distance algorithm
* based on Damerau-Levenshtein algorithm.
* <li><code>levenstein</code> - String distance algorithm based on
* Levenstein edit distance algorithm.
* <li><code>levenshtein</code> - String distance algorithm based on
* Levenshtein edit distance algorithm.
* <li><code>jarowinkler</code> - String distance algorithm based on
* Jaro-Winkler algorithm.
* <li><code>ngram</code> - String distance algorithm based on character
@ -458,13 +463,16 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator
}
}
private static StringDistance resolveDistance(String distanceVal) {
static StringDistance resolveDistance(String distanceVal) {
distanceVal = distanceVal.toLowerCase(Locale.US);
if ("internal".equals(distanceVal)) {
return DirectSpellChecker.INTERNAL_LEVENSHTEIN;
} else if ("damerau_levenshtein".equals(distanceVal) || "damerauLevenshtein".equals(distanceVal)) {
return new LuceneLevenshteinDistance();
} else if ("levenstein".equals(distanceVal)) {
DEPRECATION_LOGGER.deprecated("Deprecated distance [levenstein] used, replaced by [levenshtein]");
return new LevensteinDistance();
} else if ("levenshtein".equals(distanceVal)) {
return new LevensteinDistance();
// TODO Jaro and Winkler are 2 people - so apply same naming logic
// as damerau_levenshtein

View File

@ -30,6 +30,8 @@ import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryShardContext;
@ -66,6 +68,9 @@ import static org.elasticsearch.search.suggest.phrase.DirectCandidateGeneratorBu
* global options, but are only applicable for this suggestion.
*/
public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuilder> {
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TermSuggestionBuilder.class));
private static final String SUGGESTION_NAME = "term";
private SuggestMode suggestMode = SuggestMode.MISSING;
@ -214,8 +219,8 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild
* string distance for terms inside the index.
* <li><code>damerau_levenshtein</code> - String distance algorithm based on
* Damerau-Levenshtein algorithm.
* <li><code>levenstein</code> - String distance algorithm based on
* Levenstein edit distance algorithm.
* <li><code>levenshtein</code> - String distance algorithm based on
* Levenshtein edit distance algorithm.
* <li><code>jarowinkler</code> - String distance algorithm based on
* Jaro-Winkler algorithm.
* <li><code>ngram</code> - String distance algorithm based on character
@ -543,8 +548,8 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild
return new LuceneLevenshteinDistance();
}
},
/** String distance algorithm based on Levenstein edit distance algorithm. */
LEVENSTEIN {
/** String distance algorithm based on Levenshtein edit distance algorithm. */
LEVENSHTEIN {
@Override
public StringDistance toLucene() {
return new LevensteinDistance();
@ -584,7 +589,10 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild
case "damerauLevenshtein":
return DAMERAU_LEVENSHTEIN;
case "levenstein":
return LEVENSTEIN;
DEPRECATION_LOGGER.deprecated("Deprecated distance [levenstein] used, replaced by [levenshtein]");
return LEVENSHTEIN;
case "levenshtein":
return LEVENSHTEIN;
case "ngram":
return NGRAM;
case "jarowinkler":

View File

@ -76,7 +76,6 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -189,7 +188,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateApp
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
List<String> filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
final MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
// Make sure that we can restore from this snapshot
validateSnapshotRestorable(request.repositoryName, snapshotInfo);

View File

@ -64,8 +64,7 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
initialConnectionTimeout = new TimeValue(input);
numNodesConnected = input.readVInt();
clusterAlias = input.readString();
//TODO update version once backported
if (input.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
if (input.getVersion().onOrAfter(Version.V_6_1_0)) {
skipUnavailable = input.readBoolean();
} else {
skipUnavailable = false;
@ -104,8 +103,7 @@ public final class RemoteConnectionInfo implements ToXContentFragment, Writeable
initialConnectionTimeout.writeTo(out);
out.writeVInt(numNodesConnected);
out.writeString(clusterAlias);
//TODO update version once backported
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
out.writeBoolean(skipUnavailable);
}
}

View File

@ -19,15 +19,28 @@
package org.apache.lucene.search;
import org.elasticsearch.Version;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
public class QueriesTests extends ESTestCase {
public void testNonNestedQuery() {
// This is a custom query that extends AutomatonQuery and want to make sure the equals method works
assertEquals(Queries.newNonNestedFilter(), Queries.newNonNestedFilter());
assertEquals(Queries.newNonNestedFilter().hashCode(), Queries.newNonNestedFilter().hashCode());
for (Version version : VersionUtils.allVersions()) {
// This is a custom query that extends AutomatonQuery and want to make sure the equals method works
assertEquals(Queries.newNonNestedFilter(version), Queries.newNonNestedFilter(version));
assertEquals(Queries.newNonNestedFilter(version).hashCode(), Queries.newNonNestedFilter(version).hashCode());
if (version.onOrAfter(Version.V_6_1_0)) {
assertEquals(Queries.newNonNestedFilter(version), new DocValuesFieldExistsQuery(SeqNoFieldMapper.PRIMARY_TERM_NAME));
} else {
assertEquals(Queries.newNonNestedFilter(version), new BooleanQuery.Builder()
.add(new MatchAllDocsQuery(), BooleanClause.Occur.FILTER)
.add(Queries.newNestedFilter(), BooleanClause.Occur.MUST_NOT)
.build());
}
}
}
}

View File

@ -0,0 +1,59 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.core.IsEqual.equalTo;
public class ClusterHealthRequestTests extends ESTestCase {
public void testSerialize() throws Exception {
final ClusterHealthRequest originalRequest = randomRequest();
final ClusterHealthRequest cloneRequest;
try (BytesStreamOutput out = new BytesStreamOutput()) {
originalRequest.writeTo(out);
try (StreamInput in = out.bytes().streamInput()) {
cloneRequest = new ClusterHealthRequest(in);
}
}
assertThat(cloneRequest.waitForStatus(), equalTo(originalRequest.waitForStatus()));
assertThat(cloneRequest.waitForNodes(), equalTo(originalRequest.waitForNodes()));
assertThat(cloneRequest.waitForNoInitializingShards(), equalTo(originalRequest.waitForNoInitializingShards()));
assertThat(cloneRequest.waitForNoRelocatingShards(), equalTo(originalRequest.waitForNoRelocatingShards()));
assertThat(cloneRequest.waitForActiveShards(), equalTo(originalRequest.waitForActiveShards()));
assertThat(cloneRequest.waitForEvents(), equalTo(originalRequest.waitForEvents()));
}
ClusterHealthRequest randomRequest() {
ClusterHealthRequest request = new ClusterHealthRequest();
request.waitForStatus(randomFrom(ClusterHealthStatus.values()));
request.waitForNodes(randomFrom("", "<", "<=", ">", ">=") + between(0, 1000));
request.waitForNoInitializingShards(randomBoolean());
request.waitForNoRelocatingShards(randomBoolean());
request.waitForActiveShards(randomIntBetween(0, 10));
request.waitForEvents(randomFrom(Priority.values()));
return request;
}
}

View File

@ -0,0 +1,102 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.health;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.IntStream;
import static org.hamcrest.core.IsEqual.equalTo;
public class TransportClusterHealthActionTests extends ESTestCase {
public void testWaitForInitializingShards() throws Exception {
final String[] indices = {"test"};
final ClusterHealthRequest request = new ClusterHealthRequest();
request.waitForNoInitializingShards(true);
ClusterState clusterState = randomClusterStateWithInitializingShards("test", 0);
ClusterHealthResponse response = new ClusterHealthResponse("", indices, clusterState);
assertThat(TransportClusterHealthAction.prepareResponse(request, response, clusterState, null), equalTo(1));
request.waitForNoInitializingShards(true);
clusterState = randomClusterStateWithInitializingShards("test", between(1, 10));
response = new ClusterHealthResponse("", indices, clusterState);
assertThat(TransportClusterHealthAction.prepareResponse(request, response, clusterState, null), equalTo(0));
request.waitForNoInitializingShards(false);
clusterState = randomClusterStateWithInitializingShards("test", randomInt(20));
response = new ClusterHealthResponse("", indices, clusterState);
assertThat(TransportClusterHealthAction.prepareResponse(request, response, clusterState, null), equalTo(0));
}
ClusterState randomClusterStateWithInitializingShards(String index, final int initializingShards) {
final IndexMetaData indexMetaData = IndexMetaData
.builder(index)
.settings(settings(Version.CURRENT))
.numberOfShards(between(1, 10))
.numberOfReplicas(randomInt(20))
.build();
final List<ShardRoutingState> shardRoutingStates = new ArrayList<>();
IntStream.range(0, between(1, 30)).forEach(i -> shardRoutingStates.add(randomFrom(
ShardRoutingState.STARTED, ShardRoutingState.UNASSIGNED, ShardRoutingState.RELOCATING)));
IntStream.range(0, initializingShards).forEach(i -> shardRoutingStates.add(ShardRoutingState.INITIALIZING));
Randomness.shuffle(shardRoutingStates);
final ShardId shardId = new ShardId(new Index("index", "uuid"), 0);
final IndexRoutingTable.Builder routingTable = new IndexRoutingTable.Builder(indexMetaData.getIndex());
// Primary
{
ShardRoutingState state = shardRoutingStates.remove(0);
String node = state == ShardRoutingState.UNASSIGNED ? null : "node";
routingTable.addShard(
TestShardRouting.newShardRouting(shardId, node, "relocating", true, state)
);
}
// Replicas
for (int i = 0; i < shardRoutingStates.size(); i++) {
ShardRoutingState state = shardRoutingStates.get(i);
String node = state == ShardRoutingState.UNASSIGNED ? null : "node" + i;
routingTable.addShard(TestShardRouting.newShardRouting(shardId, node, "relocating"+i, randomBoolean(), state));
}
return ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.metaData(MetaData.builder().put(indexMetaData, true))
.routingTable(RoutingTable.builder().add(routingTable.build()).build())
.build();
}
}

View File

@ -317,6 +317,7 @@ public class CreateIndexIT extends ESIntegTestCase {
response = prepareCreate("test_" + shards + "_" + partitionSize)
.setSettings(Settings.builder()
.put("index.number_of_shards", shards)
.put("index.number_of_routing_shards", shards)
.put("index.routing_partition_size", partitionSize))
.execute().actionGet();
} catch (IllegalStateException | IllegalArgumentException e) {

View File

@ -39,6 +39,7 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
import org.elasticsearch.cluster.routing.ShardRouting;
@ -66,7 +67,6 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.function.BiFunction;
import java.util.function.IntFunction;
import java.util.stream.IntStream;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
@ -88,21 +88,41 @@ public class SplitIndexIT extends ESIntegTestCase {
}
public void testCreateSplitIndexToN() throws IOException {
int[][] possibleShardSplits = new int[][] {{2,4,8}, {3, 6, 12}, {1, 2, 4}};
int[][] possibleShardSplits = new int[][]{{2, 4, 8}, {3, 6, 12}, {1, 2, 4}};
int[] shardSplits = randomFrom(possibleShardSplits);
assertEquals(shardSplits[0], (shardSplits[0] * shardSplits[1]) / shardSplits[1]);
assertEquals(shardSplits[1], (shardSplits[1] * shardSplits[2]) / shardSplits[2]);
splitToN(shardSplits[0], shardSplits[1], shardSplits[2]);
}
public void testSplitFromOneToN() {
splitToN(1, 5, 10);
client().admin().indices().prepareDelete("*").get();
int randomSplit = randomIntBetween(2, 6);
splitToN(1, randomSplit, randomSplit * 2);
}
private void splitToN(int sourceShards, int firstSplitShards, int secondSplitShards) {
assertEquals(sourceShards, (sourceShards * firstSplitShards) / firstSplitShards);
assertEquals(firstSplitShards, (firstSplitShards * secondSplitShards) / secondSplitShards);
internalCluster().ensureAtLeastNumDataNodes(2);
final boolean useRouting = randomBoolean();
final boolean useNested = randomBoolean();
final boolean useMixedRouting = useRouting ? randomBoolean() : false;
CreateIndexRequestBuilder createInitialIndex = prepareCreate("source");
final int routingShards = shardSplits[2] * randomIntBetween(1, 10);
Settings.Builder settings = Settings.builder().put(indexSettings())
.put("number_of_shards", shardSplits[0])
.put("index.number_of_routing_shards", routingShards);
if (useRouting && useMixedRouting == false && randomBoolean()) {
settings.put("index.routing_partition_size", randomIntBetween(1, routingShards - 1));
Settings.Builder settings = Settings.builder().put(indexSettings()).put("number_of_shards", sourceShards);
final boolean useRoutingPartition;
if (randomBoolean()) {
// randomly set the value manually
int routingShards = secondSplitShards * randomIntBetween(1, 10);
settings.put("index.number_of_routing_shards", routingShards);
useRoutingPartition = false;
} else {
useRoutingPartition = randomBoolean();
}
if (useRouting && useMixedRouting == false && useRoutingPartition) {
int numRoutingShards = MetaDataCreateIndexService.calculateNumRoutingShards(secondSplitShards, Version.CURRENT)-1;
settings.put("index.routing_partition_size",
randomIntBetween(1, numRoutingShards));
if (useNested) {
createInitialIndex.addMapping("t1", "_routing", "required=true", "nested1", "type=nested");
} else {
@ -172,11 +192,15 @@ public class SplitIndexIT extends ESIntegTestCase {
.setSettings(Settings.builder()
.put("index.blocks.write", true)).get();
ensureGreen();
Settings.Builder firstSplitSettingsBuilder = Settings.builder()
.put("index.number_of_replicas", 0)
.put("index.number_of_shards", firstSplitShards);
if (sourceShards == 1 && useRoutingPartition == false && randomBoolean()) { // try to set it if we have a source index with 1 shard
firstSplitSettingsBuilder.put("index.number_of_routing_shards", secondSplitShards);
}
assertAcked(client().admin().indices().prepareResizeIndex("source", "first_split")
.setResizeType(ResizeType.SPLIT)
.setSettings(Settings.builder()
.put("index.number_of_replicas", 0)
.put("index.number_of_shards", shardSplits[1]).build()).get());
.setSettings(firstSplitSettingsBuilder.build()).get());
ensureGreen();
assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
@ -204,7 +228,7 @@ public class SplitIndexIT extends ESIntegTestCase {
.setResizeType(ResizeType.SPLIT)
.setSettings(Settings.builder()
.put("index.number_of_replicas", 0)
.put("index.number_of_shards", shardSplits[2]).build()).get());
.put("index.number_of_shards", secondSplitShards).build()).get());
ensureGreen();
assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs);
// let it be allocated anywhere and bump replicas
@ -340,7 +364,6 @@ public class SplitIndexIT extends ESIntegTestCase {
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
.put("number_of_shards", 1)
.put("index.version.created", version)
.put("index.number_of_routing_shards", 2)
).get();
final int docs = randomIntBetween(0, 128);
for (int i = 0; i < docs; i++) {
@ -443,7 +466,6 @@ public class SplitIndexIT extends ESIntegTestCase {
Settings.builder()
.put(indexSettings())
.put("sort.field", "id")
.put("index.number_of_routing_shards", 16)
.put("sort.order", "desc")
.put("number_of_shards", 2)
.put("number_of_replicas", 0)

View File

@ -51,10 +51,13 @@ import static java.util.Collections.emptyMap;
public class TransportResizeActionTests extends ESTestCase {
private ClusterState createClusterState(String name, int numShards, int numReplicas, Settings settings) {
return createClusterState(name, numShards, numReplicas, numShards, settings);
}
private ClusterState createClusterState(String name, int numShards, int numReplicas, int numRoutingShards, Settings settings) {
MetaData.Builder metaBuilder = MetaData.builder();
IndexMetaData indexMetaData = IndexMetaData.builder(name).settings(settings(Version.CURRENT)
.put(settings))
.numberOfShards(numShards).numberOfReplicas(numReplicas).build();
.numberOfShards(numShards).numberOfReplicas(numReplicas).setRoutingNumShards(numRoutingShards).build();
metaBuilder.put(indexMetaData, false);
MetaData metaData = metaBuilder.build();
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
@ -108,6 +111,67 @@ public class TransportResizeActionTests extends ESTestCase {
(i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), "source", "target");
}
public void testPassNumRoutingShards() {
ClusterState clusterState = ClusterState.builder(createClusterState("source", 1, 0,
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
// now we start the shard
routingTable = service.applyStartedShards(clusterState,
routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
resizeRequest.setResizeType(ResizeType.SPLIT);
resizeRequest.getTargetIndexRequest()
.settings(Settings.builder().put("index.number_of_shards", 2).build());
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target");
resizeRequest.getTargetIndexRequest()
.settings(Settings.builder()
.put("index.number_of_routing_shards", randomIntBetween(2, 10))
.put("index.number_of_shards", 2)
.build());
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target");
}
public void testPassNumRoutingShardsAndFail() {
int numShards = randomIntBetween(2, 100);
ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, numShards * 4,
Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1")))
.build();
AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY,
Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))),
new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
// now we start the shard
routingTable = service.applyStartedShards(clusterState,
routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable();
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build();
ResizeRequest resizeRequest = new ResizeRequest("target", "source");
resizeRequest.setResizeType(ResizeType.SPLIT);
resizeRequest.getTargetIndexRequest()
.settings(Settings.builder().put("index.number_of_shards", numShards * 2).build());
TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target");
resizeRequest.getTargetIndexRequest()
.settings(Settings.builder()
.put("index.number_of_shards", numShards * 2)
.put("index.number_of_routing_shards", numShards * 2).build());
ClusterState finalState = clusterState;
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
() -> TransportResizeAction.prepareCreateIndexRequest(resizeRequest, finalState, null, "source", "target"));
assertEquals("cannot provide index.number_of_routing_shards on resize", iae.getMessage());
}
public void testShrinkIndexSettings() {
String indexName = randomAlphaOfLength(10);
// create one that won't fail

View File

@ -55,23 +55,18 @@ public class IndexRequestTests extends ESTestCase {
IndexRequest indexRequest = new IndexRequest("");
indexRequest.opType(create);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE));
assertThat(indexRequest.opType(), equalTo(DocWriteRequest.OpType.CREATE));
indexRequest.opType(createUpper);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.CREATE));
assertThat(indexRequest.opType(), equalTo(DocWriteRequest.OpType.CREATE));
indexRequest.opType(index);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX));
assertThat(indexRequest.opType(), equalTo(DocWriteRequest.OpType.INDEX));
indexRequest.opType(indexUpper);
assertThat(indexRequest.opType() , equalTo(DocWriteRequest.OpType.INDEX));
assertThat(indexRequest.opType(), equalTo(DocWriteRequest.OpType.INDEX));
}
public void testReadBogusString() {
try {
IndexRequest indexRequest = new IndexRequest("");
indexRequest.opType("foobar");
fail("Expected IllegalArgumentException");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), equalTo("opType must be 'create' or 'index', found: [foobar]"));
}
public void testReadIncorrectOpType() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new IndexRequest("").opType("foobar"));
assertThat(e.getMessage(), equalTo("opType must be 'create' or 'index', found: [foobar]"));
}
public void testCreateOperationRejectsVersions() {

View File

@ -293,7 +293,7 @@ public class SearchResponseTests extends ESTestCase {
public void testSerializationBwc() throws IOException {
final byte[] data = Base64.getDecoder().decode("AAAAAAAAAAAAAgABBQUAAAoAAAAAAAAA");
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_0_0_rc2);
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0);
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(data), namedWriteableRegistry)) {
in.setVersion(version);
SearchResponse deserialized = new SearchResponse();

View File

@ -152,10 +152,10 @@ public class GetTermVectorsTests extends ESSingleNodeTestCase {
.field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
Settings setting = Settings.builder()
.put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload_filter")
.put("index.analysis.filter.my_delimited_payload_filter.delimiter", delimiter)
.put("index.analysis.filter.my_delimited_payload_filter.encoding", encodingString)
.put("index.analysis.filter.my_delimited_payload_filter.type", "mock_payload_filter").build();
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload")
.put("index.analysis.filter.my_delimited_payload.delimiter", delimiter)
.put("index.analysis.filter.my_delimited_payload.encoding", encodingString)
.put("index.analysis.filter.my_delimited_payload.type", "mock_payload_filter").build();
createIndex("test", setting, "type1", mapping);
client().prepareIndex("test", "type1", Integer.toString(1))

View File

@ -34,7 +34,6 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.ResourceAlreadyExistsException;
@ -299,4 +298,39 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
.getDefault(Settings.EMPTY)).build()));
assertThat(e.getMessage(), endsWith(errorMessage));
}
public void testCalculateNumRoutingShards() {
assertEquals(1024, MetaDataCreateIndexService.calculateNumRoutingShards(1, Version.CURRENT));
assertEquals(1024, MetaDataCreateIndexService.calculateNumRoutingShards(2, Version.CURRENT));
assertEquals(768, MetaDataCreateIndexService.calculateNumRoutingShards(3, Version.CURRENT));
assertEquals(576, MetaDataCreateIndexService.calculateNumRoutingShards(9, Version.CURRENT));
assertEquals(1024, MetaDataCreateIndexService.calculateNumRoutingShards(512, Version.CURRENT));
assertEquals(2048, MetaDataCreateIndexService.calculateNumRoutingShards(1024, Version.CURRENT));
assertEquals(4096, MetaDataCreateIndexService.calculateNumRoutingShards(2048, Version.CURRENT));
Version latestV6 = VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1);
int numShards = randomIntBetween(1, 1000);
assertEquals(numShards, MetaDataCreateIndexService.calculateNumRoutingShards(numShards, latestV6));
assertEquals(numShards, MetaDataCreateIndexService.calculateNumRoutingShards(numShards,
VersionUtils.randomVersionBetween(random(), VersionUtils.getFirstVersion(), latestV6)));
for (int i = 0; i < 1000; i++) {
int randomNumShards = randomIntBetween(1, 10000);
int numRoutingShards = MetaDataCreateIndexService.calculateNumRoutingShards(randomNumShards, Version.CURRENT);
if (numRoutingShards <= 1024) {
assertTrue("numShards: " + randomNumShards, randomNumShards < 513);
assertTrue("numRoutingShards: " + numRoutingShards, numRoutingShards > 512);
} else {
assertEquals("numShards: " + randomNumShards, numRoutingShards / 2, randomNumShards);
}
double ratio = numRoutingShards / randomNumShards;
int intRatio = (int) ratio;
assertEquals(ratio, (double)(intRatio), 0.0d);
assertTrue(1 < ratio);
assertTrue(ratio <= 1024);
assertEquals(0, intRatio % 2);
assertEquals("ratio is not a power of two", intRatio, Integer.highestOneBit(intRatio));
}
}
}

View File

@ -52,7 +52,6 @@ import static org.hamcrest.object.HasToString.hasToString;
public class OperationRoutingTests extends ESTestCase{
public void testGenerateShardId() {
int[][] possibleValues = new int[][] {
{8,4,2}, {20, 10, 2}, {36, 12, 3}, {15,5,1}

View File

@ -25,9 +25,7 @@ import java.io.IOException;
import static org.elasticsearch.common.geo.builders.ShapeBuilder.SPATIAL_CONTEXT;
/**
* Created by nknize on 9/22/17.
*/
/** Base class for all geo parsing tests */
abstract class BaseGeoParsingTestCase extends ESTestCase {
protected static final GeometryFactory GEOMETRY_FACTORY = SPATIAL_CONTEXT.getGeometryFactory();

View File

@ -60,20 +60,23 @@ public class CombinedDeletionPolicyTests extends ESTestCase {
EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG);
List<IndexCommit> commitList = new ArrayList<>();
long count = randomIntBetween(10, 20);
long lastGen = 0;
long minGen = Long.MAX_VALUE;
for (int i = 0; i < count; i++) {
lastGen += randomIntBetween(10, 20000);
long lastGen = randomIntBetween(10, 20000);
minGen = Math.min(minGen, lastGen);
commitList.add(mockIndexCommitWithTranslogGen(lastGen));
}
combinedDeletionPolicy.onInit(commitList);
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(lastGen);
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(minGen);
commitList.clear();
minGen = Long.MAX_VALUE;
for (int i = 0; i < count; i++) {
lastGen += randomIntBetween(10, 20000);
long lastGen = randomIntBetween(10, 20000);
minGen = Math.min(minGen, lastGen);
commitList.add(mockIndexCommitWithTranslogGen(lastGen));
}
combinedDeletionPolicy.onCommit(commitList);
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(lastGen);
verify(translogDeletionPolicy, times(1)).setMinTranslogGenerationForRecovery(minGen);
}
IndexCommit mockIndexCommitWithTranslogGen(long gen) throws IOException {

View File

@ -395,4 +395,20 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase {
MapperService.MergeReason.MAPPING_UPDATE, randomBoolean()));
assertThat(e.getMessage(), containsString("[mapper [release_date] has different [format] values]"));
}
public void testMergeText() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("doc")
.startObject("properties").startObject("date").field("type", "date").endObject()
.endObject().endObject().endObject().string();
DocumentMapper mapper = indexService.mapperService().parse("doc", new CompressedXContent(mapping), false);
String mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("doc")
.startObject("properties").startObject("date").field("type", "text").endObject()
.endObject().endObject().endObject().string();
DocumentMapper update = indexService.mapperService().parse("doc", new CompressedXContent(mappingUpdate), false);
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> mapper.merge(update.mapping(), randomBoolean()));
assertEquals("mapper [date] of different type, current_type [date], merged_type [text]", e.getMessage());
}
}

View File

@ -289,4 +289,47 @@ public class DocumentMapperMergeTests extends ESSingleNodeTestCase {
Exception e = expectThrows(IllegalArgumentException.class, () -> initMapper.merge(updatedMapper.mapping(), false));
assertThat(e.getMessage(), containsString("The _parent field's type option can't be changed: [null]->[parent]"));
}
public void testMergeMeta() throws IOException {
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
String initMapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("test")
.startObject("_meta")
.field("foo").value("bar")
.endObject()
.endObject()
.endObject()
.string();
DocumentMapper initMapper = parser.parse("test", new CompressedXContent(initMapping));
assertThat(initMapper.meta().get("foo"), equalTo("bar"));
String updateMapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("test")
.startObject("properties")
.startObject("name").field("type", "text").endObject()
.endObject()
.endObject()
.endObject()
.string();
DocumentMapper updatedMapper = parser.parse("test", new CompressedXContent(updateMapping));
assertThat(initMapper.merge(updatedMapper.mapping(), true).meta().get("foo"), equalTo("bar"));
updateMapping = XContentFactory.jsonBuilder()
.startObject()
.startObject("test")
.startObject("_meta")
.field("foo").value("new_bar")
.endObject()
.endObject()
.endObject()
.string();
updatedMapper = parser.parse("test", new CompressedXContent(updateMapping));
assertThat(initMapper.merge(updatedMapper.mapping(), true).meta().get("foo"), equalTo("new_bar"));
}
}

View File

@ -19,12 +19,12 @@
package org.elasticsearch.index.mapper;
import java.util.HashMap;
import java.util.HashSet;
import org.apache.lucene.index.IndexableField;
import org.elasticsearch.Version;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.mapper.MapperService.MergeReason;
@ -524,4 +524,144 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
assertFalse(objectMapper.parentObjectMapperAreNested(mapperService));
}
public void testLimitNestedDocsDefaultSettings() throws Exception{
Settings settings = Settings.builder().build();
MapperService mapperService = createIndex("test1", settings).mapperService();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("nested1").field("type", "nested").endObject()
.endObject().endObject().endObject().string();
DocumentMapper docMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping));
long defaultMaxNoNestedDocs = MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.get(settings);
// parsing a doc with No. nested objects > defaultMaxNoNestedDocs fails
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
docBuilder.startObject();
{
docBuilder.startArray("nested1");
{
for(int i = 0; i <= defaultMaxNoNestedDocs; i++) {
docBuilder.startObject().field("f", i).endObject();
}
}
docBuilder.endArray();
}
docBuilder.endObject();
SourceToParse source1 = SourceToParse.source("test1", "type", "1", docBuilder.bytes(), XContentType.JSON);
MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source1));
assertEquals(
"The number of nested documents has exceeded the allowed limit of [" + defaultMaxNoNestedDocs
+ "]. This limit can be set by changing the [" + MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey()
+ "] index level setting.",
e.getMessage()
);
}
public void testLimitNestedDocs() throws Exception{
// setting limit to allow only two nested objects in the whole doc
long maxNoNestedDocs = 2L;
MapperService mapperService = createIndex("test1", Settings.builder()
.put(MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey(), maxNoNestedDocs).build()).mapperService();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("nested1").field("type", "nested").endObject()
.endObject().endObject().endObject().string();
DocumentMapper docMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping));
// parsing a doc with 2 nested objects succeeds
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
docBuilder.startObject();
{
docBuilder.startArray("nested1");
{
docBuilder.startObject().field("field1", "11").field("field2", "21").endObject();
docBuilder.startObject().field("field1", "12").field("field2", "22").endObject();
}
docBuilder.endArray();
}
docBuilder.endObject();
SourceToParse source1 = SourceToParse.source("test1", "type", "1", docBuilder.bytes(), XContentType.JSON);
ParsedDocument doc = docMapper.parse(source1);
assertThat(doc.docs().size(), equalTo(3));
// parsing a doc with 3 nested objects fails
XContentBuilder docBuilder2 = XContentFactory.jsonBuilder();
docBuilder2.startObject();
{
docBuilder2.startArray("nested1");
{
docBuilder2.startObject().field("field1", "11").field("field2", "21").endObject();
docBuilder2.startObject().field("field1", "12").field("field2", "22").endObject();
docBuilder2.startObject().field("field1", "13").field("field2", "23").endObject();
}
docBuilder2.endArray();
}
docBuilder2.endObject();
SourceToParse source2 = SourceToParse.source("test1", "type", "2", docBuilder2.bytes(), XContentType.JSON);
MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source2));
assertEquals(
"The number of nested documents has exceeded the allowed limit of [" + maxNoNestedDocs
+ "]. This limit can be set by changing the [" + MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey()
+ "] index level setting.",
e.getMessage()
);
}
public void testLimitNestedDocsMultipleNestedFields() throws Exception{
// setting limit to allow only two nested objects in the whole doc
long maxNoNestedDocs = 2L;
MapperService mapperService = createIndex("test1", Settings.builder()
.put(MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey(), maxNoNestedDocs).build()).mapperService();
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties")
.startObject("nested1").field("type", "nested").endObject()
.startObject("nested2").field("type", "nested").endObject()
.endObject().endObject().endObject().string();
DocumentMapper docMapper = mapperService.documentMapperParser().parse("type", new CompressedXContent(mapping));
// parsing a doc with 2 nested objects succeeds
XContentBuilder docBuilder = XContentFactory.jsonBuilder();
docBuilder.startObject();
{
docBuilder.startArray("nested1");
{
docBuilder.startObject().field("field1", "11").field("field2", "21").endObject();
}
docBuilder.endArray();
docBuilder.startArray("nested2");
{
docBuilder.startObject().field("field1", "21").field("field2", "22").endObject();
}
docBuilder.endArray();
}
docBuilder.endObject();
SourceToParse source1 = SourceToParse.source("test1", "type", "1", docBuilder.bytes(), XContentType.JSON);
ParsedDocument doc = docMapper.parse(source1);
assertThat(doc.docs().size(), equalTo(3));
// parsing a doc with 3 nested objects fails
XContentBuilder docBuilder2 = XContentFactory.jsonBuilder();
docBuilder2.startObject();
{
docBuilder2.startArray("nested1");
{
docBuilder2.startObject().field("field1", "11").field("field2", "21").endObject();
}
docBuilder2.endArray();
docBuilder2.startArray("nested2");
{
docBuilder2.startObject().field("field1", "12").field("field2", "22").endObject();
docBuilder2.startObject().field("field1", "13").field("field2", "23").endObject();
}
docBuilder2.endArray();
}
docBuilder2.endObject();
SourceToParse source2 = SourceToParse.source("test1", "type", "2", docBuilder2.bytes(), XContentType.JSON);
MapperParsingException e = expectThrows(MapperParsingException.class, () -> docMapper.parse(source2));
assertEquals(
"The number of nested documents has exceeded the allowed limit of [" + maxNoNestedDocs
+ "]. This limit can be set by changing the [" + MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey()
+ "] index level setting.",
e.getMessage()
);
}
}

View File

@ -44,6 +44,7 @@ import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.test.VersionUtils;
import org.mockito.Mockito;
import java.io.IOException;
@ -58,14 +59,16 @@ public class TypeFieldTypeTests extends FieldTypeTestCase {
public void testTermsQueryWhenTypesAreDisabled() throws Exception {
QueryShardContext context = Mockito.mock(QueryShardContext.class);
Version indexVersionCreated = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.CURRENT);
Settings indexSettings = Settings.builder()
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()).build();
IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build();
IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
Mockito.when(context.getIndexSettings()).thenReturn(mockSettings);
Mockito.when(context.indexVersionCreated()).thenReturn(indexVersionCreated);
MapperService mapperService = Mockito.mock(MapperService.class);
Set<String> types = Collections.emptySet();
@ -84,7 +87,7 @@ public class TypeFieldTypeTests extends FieldTypeTestCase {
Mockito.when(mapperService.hasNested()).thenReturn(true);
query = ft.termQuery("my_type", context);
assertEquals(Queries.newNonNestedFilter(), query);
assertEquals(Queries.newNonNestedFilter(context.indexVersionCreated()), query);
types = Collections.singleton("other_type");
Mockito.when(mapperService.types()).thenReturn(types);

View File

@ -39,6 +39,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.RoutingFieldMapper;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.test.ESTestCase;
@ -51,6 +52,7 @@ import java.util.List;
public class ShardSplittingQueryTests extends ESTestCase {
public void testSplitOnID() throws IOException {
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
Directory dir = newFSDirectory(createTempDir());
final int numDocs = randomIntBetween(50, 100);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
@ -76,13 +78,15 @@ public class ShardSplittingQueryTests extends ESTestCase {
}
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
));
writer.addDocuments(docs);
} else {
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
));
}
}
@ -95,6 +99,7 @@ public class ShardSplittingQueryTests extends ESTestCase {
}
public void testSplitOnRouting() throws IOException {
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
Directory dir = newFSDirectory(createTempDir());
final int numDocs = randomIntBetween(50, 100);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
@ -122,14 +127,16 @@ public class ShardSplittingQueryTests extends ESTestCase {
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
));
writer.addDocuments(docs);
} else {
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
));
}
}
@ -140,6 +147,7 @@ public class ShardSplittingQueryTests extends ESTestCase {
}
public void testSplitOnIdOrRouting() throws IOException {
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
Directory dir = newFSDirectory(createTempDir());
final int numDocs = randomIntBetween(50, 100);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
@ -160,13 +168,15 @@ public class ShardSplittingQueryTests extends ESTestCase {
rootDoc = Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
);
} else {
shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null);
rootDoc = Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
);
}
@ -194,6 +204,7 @@ public class ShardSplittingQueryTests extends ESTestCase {
public void testSplitOnRoutingPartitioned() throws IOException {
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
Directory dir = newFSDirectory(createTempDir());
final int numDocs = randomIntBetween(50, 100);
RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
@ -223,14 +234,16 @@ public class ShardSplittingQueryTests extends ESTestCase {
docs.add(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
));
writer.addDocuments(docs);
} else {
writer.addDocument(Arrays.asList(
new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES),
new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES),
new SortedNumericDocValuesField("shard_id", shardId)
new SortedNumericDocValuesField("shard_id", shardId),
sequenceIDFields.primaryTerm
));
}
}

View File

@ -95,7 +95,8 @@ public class IndicesRequestCacheIT extends ESIntegTestCase {
Client client = client();
assertAcked(client.admin().indices().prepareCreate("index").addMapping("type", "s", "type=date")
.setSettings(Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get());
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5).put("index.number_of_routing_shards", 5)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get());
indexRandom(true, client.prepareIndex("index", "type", "1").setRouting("1").setSource("s", "2016-03-19"),
client.prepareIndex("index", "type", "2").setRouting("1").setSource("s", "2016-03-20"),
client.prepareIndex("index", "type", "3").setRouting("1").setSource("s", "2016-03-21"),
@ -362,7 +363,8 @@ public class IndicesRequestCacheIT extends ESIntegTestCase {
public void testCanCache() throws Exception {
Client client = client();
Settings settings = Settings.builder().put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).build();
assertAcked(client.admin().indices().prepareCreate("index").addMapping("type", "s", "type=date")
.setSettings(settings)
.get());

View File

@ -840,7 +840,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
// create an index with too few shards
IllegalArgumentException eBadIndex = expectThrows(IllegalArgumentException.class,
() -> prepareCreate("test_bad", Settings.builder()
.put("index.number_of_shards", 5))
.put("index.number_of_shards", 5)
.put("index.number_of_routing_shards", 5))
.get());
assertThat(eBadIndex.getMessage(), containsString("partition size [6] should be a positive number "
@ -848,7 +849,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
// finally, create a valid index
prepareCreate("test_good", Settings.builder()
.put("index.number_of_shards", 7))
.put("index.number_of_shards", 7)
.put("index.number_of_routing_shards", 7))
.get();
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test_good").get();

View File

@ -0,0 +1,143 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.blobstore;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.TestUtil;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.TestEnvironment;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState;
import org.elasticsearch.index.shard.IndexShardTestCase;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotId;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE;
/**
* This class tests the behavior of {@link BlobStoreRepository} when it
* restores a shard from a snapshot but some files with same names already
* exist on disc.
*/
public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase {
/**
* Restoring a snapshot that contains multiple files must succeed even when
* some files already exist in the shard's store.
*/
public void testRestoreSnapshotWithExistingFiles() throws IOException {
final IndexId indexId = new IndexId(randomAlphaOfLength(10), UUIDs.randomBase64UUID());
final ShardId shardId = new ShardId(indexId.getName(), indexId.getId(), 0);
IndexShard shard = newShard(shardId, true);
try {
// index documents in the shards
final int numDocs = scaledRandomIntBetween(1, 500);
recoverShardFromStore(shard);
for (int i = 0; i < numDocs; i++) {
indexDoc(shard, "doc", Integer.toString(i));
if (rarely()) {
flushShard(shard, false);
}
}
assertDocCount(shard, numDocs);
// snapshot the shard
final Repository repository = createRepository();
final Snapshot snapshot = new Snapshot(repository.getMetadata().name(), new SnapshotId(randomAlphaOfLength(10), "_uuid"));
snapshotShard(shard, snapshot, repository);
// capture current store files
final Store.MetadataSnapshot storeFiles = shard.snapshotStoreMetadata();
assertFalse(storeFiles.asMap().isEmpty());
// close the shard
closeShards(shard);
// delete some random files in the store
List<String> deletedFiles = randomSubsetOf(randomIntBetween(1, storeFiles.size() - 1), storeFiles.asMap().keySet());
for (String deletedFile : deletedFiles) {
Files.delete(shard.shardPath().resolveIndex().resolve(deletedFile));
}
// build a new shard using the same store directory as the closed shard
ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE);
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, null, () -> {});
// restore the shard
recoverShardFromSnapshot(shard, snapshot, repository);
// check that the shard is not corrupted
TestUtil.checkIndex(shard.store().directory());
// check that all files have been restored
final Directory directory = shard.store().directory();
final List<String> directoryFiles = Arrays.asList(directory.listAll());
for (StoreFileMetaData storeFile : storeFiles) {
String fileName = storeFile.name();
assertTrue("File [" + fileName + "] does not exist in store directory", directoryFiles.contains(fileName));
assertEquals(storeFile.length(), shard.store().directory().fileLength(fileName));
}
} finally {
if (shard != null && shard.state() != IndexShardState.CLOSED) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
/** Create a {@link Repository} with a random name **/
private Repository createRepository() throws IOException {
Settings settings = Settings.builder().put("location", randomAlphaOfLength(10)).build();
RepositoryMetaData repositoryMetaData = new RepositoryMetaData(randomAlphaOfLength(10), FsRepository.TYPE, settings);
return new FsRepository(repositoryMetaData, createEnvironment(), xContentRegistry());
}
/** Create a {@link Environment} with random path.home and path.repo **/
private Environment createEnvironment() {
Path home = createTempDir();
return TestEnvironment.newEnvironment(Settings.builder()
.put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath())
.put(Environment.PATH_REPO_SETTING.getKey(), home.resolve("repo").toAbsolutePath())
.build());
}
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.document;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.test.ESTestCase;
import static org.hamcrest.Matchers.equalTo;
import static org.mockito.Mockito.mock;
public class RestIndexActionTests extends ESTestCase {
public void testCreateOpTypeValidation() throws Exception {
Settings settings = settings(Version.CURRENT).build();
RestIndexAction.CreateHandler create = new RestIndexAction(settings, mock(RestController.class)).new CreateHandler(settings);
String opType = randomFrom("CREATE", null);
create.validateOpType(opType);
String illegalOpType = randomFrom("index", "unknown", "");
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> create.validateOpType(illegalOpType));
assertThat(e.getMessage(), equalTo("opType must be 'create', found: [" + illegalOpType + "]"));
}
}

View File

@ -24,6 +24,8 @@ import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.test.ESIntegTestCase;
@ -196,6 +198,13 @@ public class AliasRoutingIT extends ESIntegTestCase {
}
@Override
public Settings indexSettings() {
Settings settings = super.indexSettings();
int numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(settings);
return Settings.builder().put(settings).put("index.number_of_routing_shards", numShards).build();
}
public void testAliasSearchRoutingWithTwoIndices() throws Exception {
createIndex("test-a");
createIndex("test-b");

View File

@ -42,6 +42,7 @@ public class PartitionedRoutingIT extends ESIntegTestCase {
client().admin().indices().prepareCreate(index)
.setSettings(Settings.builder()
.put("index.number_of_shards", shards)
.put("index.number_of_routing_shards", shards)
.put("index.routing_partition_size", partitionSize))
.addMapping("type", "{\"type\":{\"_routing\":{\"required\":true}}}", XContentType.JSON)
.execute().actionGet();
@ -67,6 +68,7 @@ public class PartitionedRoutingIT extends ESIntegTestCase {
client().admin().indices().prepareCreate(index)
.setSettings(Settings.builder()
.put("index.number_of_shards", currentShards)
.put("index.number_of_routing_shards", currentShards)
.put("index.number_of_replicas", numberOfReplicas())
.put("index.routing_partition_size", partitionSize))
.addMapping("type", "{\"type\":{\"_routing\":{\"required\":true}}}", XContentType.JSON)

View File

@ -36,8 +36,14 @@ import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.OperationRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ESIntegTestCase;
@ -53,12 +59,27 @@ public class SimpleRoutingIT extends ESIntegTestCase {
return 2;
}
public String findNonMatchingRoutingValue(String index, String id) {
OperationRouting operationRouting = new OperationRouting(Settings.EMPTY,
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
ClusterState state = client().admin().cluster().prepareState().all().get().getState();
int routing = -1;
ShardId idShard;
ShardId routingShard;
do {
idShard = operationRouting.shardId(state, index, id, null);
routingShard = operationRouting.shardId(state, index, id, Integer.toString(++routing));
} while (idShard.getId() == routingShard.id());
return Integer.toString(routing);
}
public void testSimpleCrudRouting() throws Exception {
createIndex("test");
ensureGreen();
logger.info("--> indexing with id [1], and routing [0]");
client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE)
String routingValue = findNonMatchingRoutingValue("test", "1");
logger.info("--> indexing with id [1], and routing [{}]", routingValue);
client().prepareIndex("test", "type1", "1").setRouting(routingValue).setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE)
.get();
logger.info("--> verifying get with no routing, should not find anything");
for (int i = 0; i < 5; i++) {
@ -66,25 +87,25 @@ public class SimpleRoutingIT extends ESIntegTestCase {
}
logger.info("--> verifying get with routing, should find");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
}
logger.info("--> deleting with no routing, should not delete anything");
client().prepareDelete("test", "type1", "1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
for (int i = 0; i < 5; i++) {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
}
logger.info("--> deleting with routing, should delete");
client().prepareDelete("test", "type1", "1").setRouting("0").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
client().prepareDelete("test", "type1", "1").setRouting(routingValue).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
for (int i = 0; i < 5; i++) {
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false));
assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false));
}
logger.info("--> indexing with id [1], and routing [0]");
client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE)
client().prepareIndex("test", "type1", "1").setRouting(routingValue).setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE)
.get();
logger.info("--> verifying get with no routing, should not find anything");
for (int i = 0; i < 5; i++) {
@ -92,16 +113,17 @@ public class SimpleRoutingIT extends ESIntegTestCase {
}
logger.info("--> verifying get with routing, should find");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
}
}
public void testSimpleSearchRouting() {
createIndex("test");
ensureGreen();
String routingValue = findNonMatchingRoutingValue("test", "1");
logger.info("--> indexing with id [1], and routing [0]");
client().prepareIndex("test", "type1", "1").setRouting("0").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE)
logger.info("--> indexing with id [1], and routing [{}]", routingValue);
client().prepareIndex("test", "type1", "1").setRouting(routingValue).setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE)
.get();
logger.info("--> verifying get with no routing, should not find anything");
for (int i = 0; i < 5; i++) {
@ -109,7 +131,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
}
logger.info("--> verifying get with routing, should find");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet("test", "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
}
logger.info("--> search with no routing, should fine one");
@ -125,12 +147,13 @@ public class SimpleRoutingIT extends ESIntegTestCase {
logger.info("--> search with correct routing, should find");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
}
logger.info("--> indexing with id [2], and routing [1]");
client().prepareIndex("test", "type1", "2").setRouting("1").setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
String secondRoutingValue = "1";
logger.info("--> indexing with id [{}], and routing [{}]", routingValue, secondRoutingValue);
client().prepareIndex("test", "type1", routingValue).setRouting(secondRoutingValue).setSource("field", "value1").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
logger.info("--> search with no routing, should fine two");
for (int i = 0; i < 5; i++) {
@ -138,28 +161,28 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
}
logger.info("--> search with 0 routing, should find one");
logger.info("--> search with {} routing, should find one", routingValue);
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setSize(0).setRouting("0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
}
logger.info("--> search with 1 routing, should find one");
logger.info("--> search with {} routing, should find one", secondRoutingValue);
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setSize(0).setRouting("1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
assertThat(client().prepareSearch().setSize(0).setRouting(secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(1L));
}
logger.info("--> search with 0,1 routings , should find two");
logger.info("--> search with {},{} routings , should find two", routingValue, "1");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting("0", "1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
}
logger.info("--> search with 0,1,0 routings , should find two");
logger.info("--> search with {},{},{} routings , should find two", routingValue, secondRoutingValue, routingValue);
for (int i = 0; i < 5; i++) {
assertThat(client().prepareSearch().setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting("0", "1", "0").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setRouting(routingValue, secondRoutingValue, routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
assertThat(client().prepareSearch().setSize(0).setRouting(routingValue, secondRoutingValue,routingValue).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(2L));
}
}
@ -168,9 +191,10 @@ public class SimpleRoutingIT extends ESIntegTestCase {
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
.execute().actionGet();
ensureGreen();
String routingValue = findNonMatchingRoutingValue("test", "1");
logger.info("--> indexing with id [1], and routing [0]");
client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1")
logger.info("--> indexing with id [1], and routing [{}]", routingValue);
client().prepareIndex(indexOrAlias(), "type1", "1").setRouting(routingValue).setSource("field", "value1")
.setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
logger.info("--> verifying get with no routing, should fail");
@ -184,7 +208,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
logger.info("--> verifying get with routing, should find");
for (int i = 0; i < 5; i++) {
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
}
logger.info("--> deleting with no routing, should fail");
@ -203,7 +227,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
}
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
}
try {
@ -213,7 +237,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(e.unwrapCause(), instanceOf(RoutingMissingException.class));
}
client().prepareUpdate(indexOrAlias(), "type1", "1").setRouting("0").setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").get();
client().prepareUpdate(indexOrAlias(), "type1", "1").setRouting(routingValue).setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2").get();
client().admin().indices().prepareRefresh().execute().actionGet();
for (int i = 0; i < 5; i++) {
@ -224,12 +248,12 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
}
GetResponse getResponse = client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet();
GetResponse getResponse = client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
assertThat(getResponse.getSourceAsMap().get("field"), equalTo("value2"));
}
client().prepareDelete(indexOrAlias(), "type1", "1").setRouting("0").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
client().prepareDelete(indexOrAlias(), "type1", "1").setRouting(routingValue).setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
for (int i = 0; i < 5; i++) {
try {
@ -239,7 +263,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST));
assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
}
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false));
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(false));
}
}
@ -251,7 +275,6 @@ public class SimpleRoutingIT extends ESIntegTestCase {
.endObject().endObject())
.execute().actionGet();
ensureGreen();
{
BulkResponse bulkResponse = client().prepareBulk().add(Requests.indexRequest(indexOrAlias()).type("type1").id("1")
.source(Requests.INDEX_CONTENT_TYPE, "field", "value")).execute().actionGet();
@ -320,19 +343,21 @@ public class SimpleRoutingIT extends ESIntegTestCase {
}
public void testRequiredRoutingMappingVariousAPIs() throws Exception {
client().admin().indices().prepareCreate("test").addAlias(new Alias("alias"))
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_routing").field("required", true).endObject().endObject().endObject())
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("_routing").field("required", true).endObject().endObject().endObject())
.execute().actionGet();
ensureGreen();
logger.info("--> indexing with id [1], and routing [0]");
client().prepareIndex(indexOrAlias(), "type1", "1").setRouting("0").setSource("field", "value1").get();
logger.info("--> indexing with id [2], and routing [0]");
client().prepareIndex(indexOrAlias(), "type1", "2").setRouting("0").setSource("field", "value2")
String routingValue = findNonMatchingRoutingValue("test", "1");
logger.info("--> indexing with id [1], and routing [{}]", routingValue);
client().prepareIndex(indexOrAlias(), "type1", "1").setRouting(routingValue).setSource("field", "value1").get();
logger.info("--> indexing with id [2], and routing [{}]", routingValue);
client().prepareIndex(indexOrAlias(), "type1", "2").setRouting(routingValue).setSource("field", "value2")
.setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
logger.info("--> verifying get with id [1] with routing [0], should succeed");
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true));
assertThat(client().prepareGet(indexOrAlias(), "type1", "1").setRouting(routingValue).execute().actionGet().isExists(), equalTo(true));
logger.info("--> verifying get with id [1], with no routing, should fail");
try {
@ -345,7 +370,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
logger.info("--> verifying explain with id [2], with routing [0], should succeed");
ExplainResponse explainResponse = client().prepareExplain(indexOrAlias(), "type1", "2")
.setQuery(QueryBuilders.matchAllQuery())
.setRouting("0").get();
.setRouting(routingValue).get();
assertThat(explainResponse.isExists(), equalTo(true));
assertThat(explainResponse.isMatch(), equalTo(true));
@ -359,7 +384,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
}
logger.info("--> verifying term vector with id [1], with routing [0], should succeed");
TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "type1", "1").setRouting("0").get();
TermVectorsResponse termVectorsResponse = client().prepareTermVectors(indexOrAlias(), "type1", "1").setRouting(routingValue).get();
assertThat(termVectorsResponse.isExists(), equalTo(true));
assertThat(termVectorsResponse.getId(), equalTo("1"));
@ -370,7 +395,7 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(e.getMessage(), equalTo("routing is required for [test]/[type1]/[1]"));
}
UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setRouting("0")
UpdateResponse updateResponse = client().prepareUpdate(indexOrAlias(), "type1", "1").setRouting(routingValue)
.setDoc(Requests.INDEX_CONTENT_TYPE, "field1", "value1").get();
assertThat(updateResponse.getId(), equalTo("1"));
assertThat(updateResponse.getVersion(), equalTo(2L));
@ -405,8 +430,8 @@ public class SimpleRoutingIT extends ESIntegTestCase {
assertThat(multiGetResponse.getResponses()[1].getFailure().getMessage(), equalTo("routing is required for [test]/[type1]/[2]"));
MultiTermVectorsResponse multiTermVectorsResponse = client().prepareMultiTermVectors()
.add(new TermVectorsRequest(indexOrAlias(), "type1", "1").routing("0"))
.add(new TermVectorsRequest(indexOrAlias(), "type1", "2").routing("0")).get();
.add(new TermVectorsRequest(indexOrAlias(), "type1", "1").routing(routingValue))
.add(new TermVectorsRequest(indexOrAlias(), "type1", "2").routing(routingValue)).get();
assertThat(multiTermVectorsResponse.getResponses().length, equalTo(2));
assertThat(multiTermVectorsResponse.getResponses()[0].getId(), equalTo("1"));
assertThat(multiTermVectorsResponse.getResponses()[0].isFailed(), equalTo(false));

View File

@ -102,6 +102,7 @@ public class DefaultSearchContextTests extends ESTestCase {
IndexMetaData indexMetaData = IndexMetaData.builder("index").settings(settings).build();
IndexSettings indexSettings = new IndexSettings(indexMetaData, Settings.EMPTY);
when(indexService.getIndexSettings()).thenReturn(indexSettings);
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());

View File

@ -220,12 +220,15 @@ public class AggregationsTests extends ESTestCase {
* - we cannot insert randomly into VALUE or VALUES objects e.g. in Percentiles, the keys need to be numeric there
*
* - we cannot insert into ExtendedMatrixStats "covariance" or "correlation" fields, their syntax is strict
*
* - exclude "key", it can be an array of objects and we need strict values
*/
Predicate<String> excludes = path -> (path.isEmpty() || path.endsWith("aggregations")
|| path.endsWith(Aggregation.CommonFields.META.getPreferredName())
|| path.endsWith(Aggregation.CommonFields.BUCKETS.getPreferredName())
|| path.endsWith(CommonFields.VALUES.getPreferredName()) || path.endsWith("covariance") || path.endsWith("correlation")
|| path.contains(CommonFields.VALUE.getPreferredName()));
|| path.contains(CommonFields.VALUE.getPreferredName())
|| path.endsWith(CommonFields.KEY.getPreferredName()));
mutated = insertRandomFields(xContentType, originalBytes, excludes, random());
} else {
mutated = originalBytes;

View File

@ -36,11 +36,13 @@ import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.Version;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.mapper.KeywordFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
@ -56,6 +58,7 @@ import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
import org.elasticsearch.search.aggregations.metrics.sum.InternalSum;
import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.util.ArrayList;
@ -73,6 +76,9 @@ public class NestedAggregatorTests extends AggregatorTestCase {
private static final String MAX_AGG_NAME = "maxAgg";
private static final String SUM_AGG_NAME = "sumAgg";
private final SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
public void testNoDocs() throws IOException {
try (Directory directory = newDirectory()) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) {
@ -120,6 +126,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test",
TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
}
@ -168,6 +175,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test",
TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
}
@ -216,6 +224,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test",
TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
}
@ -254,6 +263,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
public void testResetRootDocId() throws Exception {
IndexWriterConfig iwc = new IndexWriterConfig(null);
iwc.setMergePolicy(NoMergePolicy.INSTANCE);
SeqNoFieldMapper.SequenceIDFields sequenceIDFields = SeqNoFieldMapper.SequenceIDFields.emptySeqID();
try (Directory directory = newDirectory()) {
try (RandomIndexWriter iw = new RandomIndexWriter(random(), directory, iwc)) {
List<Document> documents = new ArrayList<>();
@ -274,6 +284,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#1", UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
iw.commit();
@ -288,6 +299,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#2", UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
documents.clear();
@ -299,6 +311,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
document = new Document();
document.add(new Field(UidFieldMapper.NAME, "type#3", UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "test", TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
documents.add(document);
iw.addDocuments(documents);
@ -314,7 +327,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
fieldType.setName(VALUE_FIELD_NAME);
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST);
bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST);
bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), BooleanClause.Occur.MUST_NOT);
Nested nested = search(newSearcher(indexReader, false, true),
@ -550,6 +563,7 @@ public class NestedAggregatorTests extends AggregatorTestCase {
Document document = new Document();
document.add(new Field(UidFieldMapper.NAME, "book#" + id, UidFieldMapper.Defaults.FIELD_TYPE));
document.add(new Field(TypeFieldMapper.NAME, "book", TypeFieldMapper.Defaults.FIELD_TYPE));
document.add(sequenceIDFields.primaryTerm);
for (String author : authors) {
document.add(new SortedSetDocValuesField("author", new BytesRef(author)));
}

View File

@ -29,6 +29,7 @@ import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.store.Directory;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.search.aggregations.AggregatorTestCase;
@ -108,6 +109,7 @@ public class ReverseNestedAggregatorTests extends AggregatorTestCase {
TypeFieldMapper.Defaults.FIELD_TYPE));
long value = randomNonNegativeLong() % 10000;
document.add(new SortedNumericDocValuesField(VALUE_FIELD_NAME, value));
document.add(SeqNoFieldMapper.SequenceIDFields.emptySeqID().primaryTerm);
if (numNestedDocs > 0) {
expectedMaxValue = Math.max(expectedMaxValue, value);
expectedParentDocs++;

View File

@ -62,6 +62,7 @@ import org.elasticsearch.search.sort.SortAndFormats;
import org.elasticsearch.test.TestSearchContext;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
@ -465,11 +466,11 @@ public class QueryPhaseTests extends IndexShardTestCase {
public void testIndexSortScrollOptimization() throws Exception {
Directory dir = newDirectory();
final Sort sort = new Sort(
final Sort indexSort = new Sort(
new SortField("rank", SortField.Type.INT),
new SortField("tiebreaker", SortField.Type.INT)
);
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(sort);
IndexWriterConfig iwc = newIndexWriterConfig().setIndexSort(indexSort);
RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
final int numDocs = scaledRandomIntBetween(100, 200);
for (int i = 0; i < numDocs; ++i) {
@ -483,44 +484,49 @@ public class QueryPhaseTests extends IndexShardTestCase {
}
w.close();
TestSearchContext context = new TestSearchContext(null, indexShard);
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
ScrollContext scrollContext = new ScrollContext();
scrollContext.lastEmittedDoc = null;
scrollContext.maxScore = Float.NaN;
scrollContext.totalHits = -1;
context.scrollContext(scrollContext);
context.setTask(new SearchTask(123L, "", "", "", null));
context.setSize(10);
context.sort(new SortAndFormats(sort, new DocValueFormat[] {DocValueFormat.RAW, DocValueFormat.RAW}));
final IndexReader reader = DirectoryReader.open(dir);
IndexSearcher contextSearcher = new IndexSearcher(reader);
List<SortAndFormats> searchSortAndFormats = new ArrayList<>();
searchSortAndFormats.add(new SortAndFormats(indexSort, new DocValueFormat[]{DocValueFormat.RAW, DocValueFormat.RAW}));
// search sort is a prefix of the index sort
searchSortAndFormats.add(new SortAndFormats(new Sort(indexSort.getSort()[0]), new DocValueFormat[]{DocValueFormat.RAW}));
for (SortAndFormats searchSortAndFormat : searchSortAndFormats) {
IndexSearcher contextSearcher = new IndexSearcher(reader);
TestSearchContext context = new TestSearchContext(null, indexShard);
context.parsedQuery(new ParsedQuery(new MatchAllDocsQuery()));
ScrollContext scrollContext = new ScrollContext();
scrollContext.lastEmittedDoc = null;
scrollContext.maxScore = Float.NaN;
scrollContext.totalHits = -1;
context.scrollContext(scrollContext);
context.setTask(new SearchTask(123L, "", "", "", null));
context.setSize(10);
context.sort(searchSortAndFormat);
QueryPhase.execute(context, contextSearcher, checkCancelled -> {}, sort);
assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs));
assertNull(context.queryResult().terminatedEarly());
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs));
int sizeMinus1 = context.queryResult().topDocs().scoreDocs.length - 1;
FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[sizeMinus1];
QueryPhase.execute(context, contextSearcher, checkCancelled -> {}, searchSortAndFormat.sort);
assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs));
assertNull(context.queryResult().terminatedEarly());
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs));
int sizeMinus1 = context.queryResult().topDocs().scoreDocs.length - 1;
FieldDoc lastDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[sizeMinus1];
contextSearcher = getAssertingEarlyTerminationSearcher(reader, 10);
QueryPhase.execute(context, contextSearcher, checkCancelled -> {}, sort);
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs));
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs));
FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0];
for (int i = 0; i < sort.getSort().length; i++) {
@SuppressWarnings("unchecked")
FieldComparator<Object> comparator = (FieldComparator<Object>) sort.getSort()[i].getComparator(1, i);
int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]);
if (cmp == 0) {
continue;
contextSearcher = getAssertingEarlyTerminationSearcher(reader, 10);
QueryPhase.execute(context, contextSearcher, checkCancelled -> {}, searchSortAndFormat.sort);
assertNull(context.queryResult().terminatedEarly());
assertThat(context.queryResult().topDocs().totalHits, equalTo((long) numDocs));
assertThat(context.terminateAfter(), equalTo(0));
assertThat(context.queryResult().getTotalHits(), equalTo((long) numDocs));
FieldDoc firstDoc = (FieldDoc) context.queryResult().topDocs().scoreDocs[0];
for (int i = 0; i < searchSortAndFormat.sort.getSort().length; i++) {
@SuppressWarnings("unchecked")
FieldComparator<Object> comparator = (FieldComparator<Object>) searchSortAndFormat.sort.getSort()[i].getComparator(1, i);
int cmp = comparator.compareValues(firstDoc.fields[i], lastDoc.fields[i]);
if (cmp == 0) {
continue;
}
assertThat(cmp, equalTo(1));
break;
}
assertThat(cmp, equalTo(1));
break;
}
reader.close();
dir.close();

View File

@ -19,6 +19,11 @@
package org.elasticsearch.search.suggest.phrase;
import org.apache.lucene.search.spell.DirectSpellChecker;
import org.apache.lucene.search.spell.JaroWinklerDistance;
import org.apache.lucene.search.spell.LevensteinDistance;
import org.apache.lucene.search.spell.LuceneLevenshteinDistance;
import org.apache.lucene.search.spell.NGramDistance;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.xcontent.ToXContent;
@ -38,6 +43,8 @@ import java.util.List;
import java.util.function.Supplier;
import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.core.IsInstanceOf.instanceOf;
public class DirectCandidateGeneratorTests extends ESTestCase {
private static final int NUMBER_OF_RUNS = 20;
@ -65,6 +72,22 @@ public class DirectCandidateGeneratorTests extends ESTestCase {
}
}
public void testFromString() {
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("internal"), equalTo(DirectSpellChecker.INTERNAL_LEVENSHTEIN));
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("damerau_levenshtein"), instanceOf(LuceneLevenshteinDistance.class));
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenshtein"), instanceOf(LevensteinDistance.class));
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("jaroWinkler"), instanceOf(JaroWinklerDistance.class));
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("ngram"), instanceOf(NGramDistance.class));
expectThrows(IllegalArgumentException.class, () -> DirectCandidateGeneratorBuilder.resolveDistance("doesnt_exist"));
expectThrows(NullPointerException.class, () -> DirectCandidateGeneratorBuilder.resolveDistance(null));
}
public void testLevensteinDeprecation() {
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenstein"), instanceOf(LevensteinDistance.class));
assertWarnings("Deprecated distance [levenstein] used, replaced by [levenshtein]");
}
private static DirectCandidateGeneratorBuilder mutate(DirectCandidateGeneratorBuilder original) throws IOException {
DirectCandidateGeneratorBuilder mutation = copy(original);
List<Supplier<DirectCandidateGeneratorBuilder>> mutators = new ArrayList<>();
@ -89,7 +112,7 @@ public class DirectCandidateGeneratorTests extends ESTestCase {
mutators.add(() -> mutation.preFilter(original.preFilter() == null ? "preFilter" : original.preFilter() + "_other"));
mutators.add(() -> mutation.sort(original.sort() == null ? "score" : original.sort() + "_other"));
mutators.add(
() -> mutation.stringDistance(original.stringDistance() == null ? "levenstein" : original.stringDistance() + "_other"));
() -> mutation.stringDistance(original.stringDistance() == null ? "levenshtein" : original.stringDistance() + "_other"));
mutators.add(() -> mutation.suggestMode(original.suggestMode() == null ? "missing" : original.suggestMode() + "_other"));
return randomFrom(mutators).get();
}
@ -189,7 +212,7 @@ public class DirectCandidateGeneratorTests extends ESTestCase {
maybeSet(generator::postFilter, randomAlphaOfLengthBetween(1, 20));
maybeSet(generator::size, randomIntBetween(1, 20));
maybeSet(generator::sort, randomFrom("score", "frequency"));
maybeSet(generator::stringDistance, randomFrom("internal", "damerau_levenshtein", "levenstein", "jarowinkler", "ngram"));
maybeSet(generator::stringDistance, randomFrom("internal", "damerau_levenshtein", "levenshtein", "jarowinkler", "ngram"));
maybeSet(generator::suggestMode, randomFrom("missing", "popular", "always"));
return generator;
}

View File

@ -20,10 +20,10 @@
package org.elasticsearch.search.suggest.term;
import org.elasticsearch.common.io.stream.AbstractWriteableEnumTestCase;
import org.elasticsearch.search.suggest.term.TermSuggestionBuilder.StringDistanceImpl;
import java.io.IOException;
import static org.elasticsearch.search.suggest.term.TermSuggestionBuilder.StringDistanceImpl;
import static org.hamcrest.Matchers.equalTo;
/**
@ -38,7 +38,7 @@ public class StringDistanceImplTests extends AbstractWriteableEnumTestCase {
public void testValidOrdinals() {
assertThat(StringDistanceImpl.INTERNAL.ordinal(), equalTo(0));
assertThat(StringDistanceImpl.DAMERAU_LEVENSHTEIN.ordinal(), equalTo(1));
assertThat(StringDistanceImpl.LEVENSTEIN.ordinal(), equalTo(2));
assertThat(StringDistanceImpl.LEVENSHTEIN.ordinal(), equalTo(2));
assertThat(StringDistanceImpl.JAROWINKLER.ordinal(), equalTo(3));
assertThat(StringDistanceImpl.NGRAM.ordinal(), equalTo(4));
}
@ -47,28 +47,27 @@ public class StringDistanceImplTests extends AbstractWriteableEnumTestCase {
public void testFromString() {
assertThat(StringDistanceImpl.resolve("internal"), equalTo(StringDistanceImpl.INTERNAL));
assertThat(StringDistanceImpl.resolve("damerau_levenshtein"), equalTo(StringDistanceImpl.DAMERAU_LEVENSHTEIN));
assertThat(StringDistanceImpl.resolve("levenstein"), equalTo(StringDistanceImpl.LEVENSTEIN));
assertThat(StringDistanceImpl.resolve("levenshtein"), equalTo(StringDistanceImpl.LEVENSHTEIN));
assertThat(StringDistanceImpl.resolve("jarowinkler"), equalTo(StringDistanceImpl.JAROWINKLER));
assertThat(StringDistanceImpl.resolve("ngram"), equalTo(StringDistanceImpl.NGRAM));
final String doesntExist = "doesnt_exist";
try {
StringDistanceImpl.resolve(doesntExist);
fail("StringDistanceImpl should not have an element " + doesntExist);
} catch (IllegalArgumentException e) {
}
try {
StringDistanceImpl.resolve(null);
fail("StringDistanceImpl.resolve on a null value should throw an exception.");
} catch (NullPointerException e) {
assertThat(e.getMessage(), equalTo("Input string is null"));
}
expectThrows(IllegalArgumentException.class, () -> StringDistanceImpl.resolve(doesntExist));
NullPointerException e = expectThrows(NullPointerException.class, () -> StringDistanceImpl.resolve(null));
assertThat(e.getMessage(), equalTo("Input string is null"));
}
public void testLevensteinDeprecation() {
assertThat(StringDistanceImpl.resolve("levenstein"), equalTo(StringDistanceImpl.LEVENSHTEIN));
assertWarnings("Deprecated distance [levenstein] used, replaced by [levenshtein]");
}
@Override
public void testWriteTo() throws IOException {
assertWriteToStream(StringDistanceImpl.INTERNAL, 0);
assertWriteToStream(StringDistanceImpl.DAMERAU_LEVENSHTEIN, 1);
assertWriteToStream(StringDistanceImpl.LEVENSTEIN, 2);
assertWriteToStream(StringDistanceImpl.LEVENSHTEIN, 2);
assertWriteToStream(StringDistanceImpl.JAROWINKLER, 3);
assertWriteToStream(StringDistanceImpl.NGRAM, 4);
}
@ -77,7 +76,7 @@ public class StringDistanceImplTests extends AbstractWriteableEnumTestCase {
public void testReadFrom() throws IOException {
assertReadFromStream(0, StringDistanceImpl.INTERNAL);
assertReadFromStream(1, StringDistanceImpl.DAMERAU_LEVENSHTEIN);
assertReadFromStream(2, StringDistanceImpl.LEVENSTEIN);
assertReadFromStream(2, StringDistanceImpl.LEVENSHTEIN);
assertReadFromStream(3, StringDistanceImpl.JAROWINKLER);
assertReadFromStream(4, StringDistanceImpl.NGRAM);
}

View File

@ -99,7 +99,7 @@ public class TermSuggestionBuilderTests extends AbstractSuggestionBuilderTestCas
switch (randomVal) {
case 0: return StringDistanceImpl.INTERNAL;
case 1: return StringDistanceImpl.DAMERAU_LEVENSHTEIN;
case 2: return StringDistanceImpl.LEVENSTEIN;
case 2: return StringDistanceImpl.LEVENSHTEIN;
case 3: return StringDistanceImpl.JAROWINKLER;
case 4: return StringDistanceImpl.NGRAM;
default: throw new IllegalArgumentException("No string distance algorithm with an ordinal of " + randomVal);

View File

@ -50,7 +50,7 @@ public class SharedSignificantTermsTestMethods {
public static void aggregateAndCheckFromSeveralShards(ESIntegTestCase testCase) throws ExecutionException, InterruptedException {
String type = ESTestCase.randomBoolean() ? "text" : "keyword";
String settings = "{\"index.number_of_shards\": 7, \"index.number_of_replicas\": 0}";
String settings = "{\"index.number_of_shards\": 7, \"index.number_of_routing_shards\": 7, \"index.number_of_replicas\": 0}";
index01Docs(type, settings, testCase);
testCase.ensureGreen();
testCase.logClusterState();

View File

@ -777,7 +777,7 @@ public class RemoteClusterConnectionTests extends ESTestCase {
}
public void testRemoteConnectionInfoBwComp() throws IOException {
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_0, Version.V_6_0_0_rc2);
final Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_6_5, Version.V_6_0_0);
RemoteConnectionInfo expected = new RemoteConnectionInfo("test_cluster",
Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 1)),
Collections.singletonList(new TransportAddress(TransportAddress.META_ADDRESS, 80)),

View File

@ -258,7 +258,7 @@ public class SimpleValidateQueryIT extends ESIntegTestCase {
public void testExplainWithRewriteValidateQueryAllShards() throws Exception {
client().admin().indices().prepareCreate("test")
.addMapping("type1", "field", "type=text,analyzer=whitespace")
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2)).get();
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 2).put("index.number_of_routing_shards", 2)).get();
// We are relying on specific routing behaviors for the result to be right, so
// we cannot randomize the number of shards or change ids here.
client().prepareIndex("test", "type1", "1")

View File

@ -19,6 +19,8 @@
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.Version
import java.util.regex.Matcher
/**
* This is a dummy project which does a local checkout of the previous
@ -26,41 +28,20 @@ import org.elasticsearch.gradle.LoggedExec
* tests to test against the next unreleased version, closest to this version,
* without relying on snapshots.
*/
String bwcVersion
boolean enabled = true
if (project.name == 'bwc-stable-snapshot') {
/* bwc-stable is only used if the last version is on a stable branch instead
* of a bugfix branch */
enabled = indexCompatVersions[-1].bugfix == 0
bwcVersion = indexCompatVersions[-1]
} else if (project.name == 'bwc-release-snapshot') {
if (indexCompatVersions[-1].bugfix == 0) {
/* The last version is on a stable branch so it is handled by the bwc-stable
* project. This project will instead handle the version before that which
* *should* be on a stable branch. */
bwcVersion = indexCompatVersions[-2]
} else {
// The last version is on a release branch so it is handled by this project
bwcVersion = indexCompatVersions[-1]
}
} else {
final Matcher match = project.name =~ /bwc-snapshot-(\d+\.(\d+|x))/
if (!match.matches()) {
throw new InvalidUserDataException("Unsupport project name ${project.name}")
}
String bwcBranch = match.group(1)
if (project.hasProperty('bwcVersion')) {
Version bwcVersion = project.ext.bwcVersion
if (enabled) {
apply plugin: 'distribution'
// Not published so no need to assemble
tasks.remove(assemble)
build.dependsOn.remove('assemble')
def (String major, String minor, String bugfix) = bwcVersion.split('\\.')
def (String currentMajor, String currentMinor, String currentBugfix) = version.split('\\.')
String bwcBranch
if (project.name == 'bwc-stable-snapshot' && major != currentMajor) {
bwcBranch = "${major}.x"
} else {
bwcBranch = "${major}.${minor}"
}
File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}")
final String remote = System.getProperty("tests.bwc.remote", "elastic")

View File

@ -194,6 +194,13 @@ buildRestTests.setups['sales'] = '''
// Dummy bank account data used by getting-started.asciidoc
buildRestTests.setups['bank'] = '''
- do:
indices.create:
index: bank
body:
settings:
number_of_shards: 5
number_of_routing_shards: 5
- do:
bulk:
index: bank

View File

@ -224,7 +224,8 @@ Time values can also be specified via abbreviations supported by <<time-units,ti
Note that fractional time values are not supported, but you can address this by shifting to another
time unit (e.g., `1.5h` could instead be specified as `90m`).
====== Time Zone
[float]
===== Time Zone
Date-times are stored in Elasticsearch in UTC. By default, all bucketing and
rounding is also done in UTC. The `time_zone` parameter can be used to indicate
@ -583,4 +584,4 @@ GET /_search
--------------------------------------------------
// CONSOLE
See <<index-modules-index-sorting, index sorting>> for more details.
See <<index-modules-index-sorting, index sorting>> for more details.

View File

@ -14,7 +14,8 @@ To make this more formal, here is the rounding function that is used:
bucket_key = Math.floor((value - offset) / interval) * interval + offset
--------------------------------------------------
The `interval` must be a positive decimal, while the `offset` must be a decimal in `[0, interval[`.
The `interval` must be a positive decimal, while the `offset` must be a decimal in `[0, interval)`
(a decimal greater than or equal to `0` and less than `interval`)
The following snippet "buckets" the products based on their `price` by interval of `50`:
@ -73,7 +74,7 @@ And the following may be the response:
==== Minimum document count
The response above show that no documents has a price that falls within the range of `[100 - 150)`. By default the
The response above show that no documents has a price that falls within the range of `[100, 150)`. By default the
response will fill gaps in the histogram with empty buckets. It is possible change that and request buckets with
a higher minimum count thanks to the `min_doc_count` setting:
@ -185,10 +186,10 @@ the `order` setting. Supports the same `order` functionality as the <<search-agg
==== Offset
By default the bucket keys start with 0 and then continue in even spaced steps of `interval`, e.g. if the interval is 10 the first buckets
(assuming there is data inside them) will be [0 - 9], [10-19], [20-29]. The bucket boundaries can be shifted by using the `offset` option.
(assuming there is data inside them) will be `[0, 10)`, `[10, 20)`, `[20, 30)`. The bucket boundaries can be shifted by using the `offset` option.
This can be best illustrated with an example. If there are 10 documents with values ranging from 5 to 14, using interval `10` will result in
two buckets with 5 documents each. If an additional offset `5` is used, there will be only one single bucket [5-14] containing all the 10
two buckets with 5 documents each. If an additional offset `5` is used, there will be only one single bucket `[5, 15)` containing all the 10
documents.
==== Response Format

View File

@ -60,8 +60,8 @@ The response for the above aggregation:
"aggregations": {
"centroid": {
"location": {
"lat": 51.00982963107526,
"lon": 3.9662130922079086
"lat": 51.00982963806018,
"lon": 3.9662131061777472
},
"count": 6
}

View File

@ -1,7 +1,7 @@
[[analysis-delimited-payload-tokenfilter]]
=== Delimited Payload Token Filter
Named `delimited_payload_filter`. Splits tokens into tokens and payload whenever a delimiter character is found.
Named `delimited_payload`. Splits tokens into tokens and payload whenever a delimiter character is found.
Example: "the|1 quick|2 fox|3" is split by default into tokens `the`, `quick`, and `fox` with payloads `1`, `2`, and `3` respectively.

View File

@ -17,8 +17,8 @@ might look like:
["source","txt",subs="attributes,callouts"]
--------------------------------------------------
index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound
test 3 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
test1 3 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
test 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
test1 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
--------------------------------------------------
// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat]

View File

@ -89,6 +89,11 @@ The cluster health API accepts the following request parameters:
for the cluster to have no shard relocations. Defaults to false, which means
it will not wait for relocating shards.
`wait_for_no_initializing_shards`::
A boolean value which controls whether to wait (until the timeout provided)
for the cluster to have no shard initializations. Defaults to false, which means
it will not wait for initializing shards.
`wait_for_active_shards`::
A number controlling to how many active shards to wait for, `all` to wait
for all shards in the cluster to be active, or `0` to not wait. Defaults to `0`.

View File

@ -1,23 +1,36 @@
[[indices-split-index]]
== Split Index
number_of_routing_shards
The split index API allows you to split an existing index into a new index,
where each original primary shard is split into two or more primary shards in
the new index.
The number of times the index can be split (and the number of shards that each
original shard can be split into) is determined by the
`index.number_of_routing_shards` setting. The number of routing shards
specifies the hashing space that is used internally to distribute documents
across shards with consistent hashing. For instance, a 5 shard index with
`number_of_routing_shards` set to `30` (`5 x 2 x 3`) could be split by a
factor of `2` or `3`. In other words, it could be split as follows:
* `5` -> `10` -> `30` (split by 2, then by 3)
* `5` -> `15` -> `30` (split by 3, then by 2)
* `5` -> `30` (split by 6)
While you can set the `index.number_of_routing_shards` setting explicitly at
index creation time, the default value depends upon the number of primary
shards in the original index. The default is designed to allow you to split
by factors of 2 up to a maximum of 1024 shards. However, the original number
of primary shards must taken into account. For instance, an index created
with 5 primary shards could be split into 10, 20, 40, 80, 160, 320, or a
maximum of 740 shards (with a single split action or multiple split actions).
If the original index contains one primary shard (or a multi-shard index has
been <<indices-shrink-index,shrunk>> down to a single primary shard), then the
index may by split into an arbitrary number of shards greater than 1. The
properties of the default number of routing shards will then apply to the
newly split index.
The split index API allows you to split an existing index into a new index
with multiple of it's primary shards. Similarly to the <<indices-shrink-index,Shrink API>>
where the number of primary shards in the shrunk index must be a factor of the source index.
The `_split` API requires the source index to be created with a specific number of routing shards
in order to be split in the future. (Note: this requirement might be remove in future releases)
The number of routing shards specify the hashing space that is used internally to distribute documents
across shards, in oder to have a consistent hashing that is compatible with the method elasticsearch
uses today.
For example an index with `8` primary shards and a `index.number_of_routing_shards` of `32`
can be split into `16` and `32` primary shards. An index with `1` primary shard
and `index.number_of_routing_shards` of `64` can be split into `2`, `4`, `8`, `16`, `32` or `64`.
The same works for non power of two routing shards ie. an index with `1` primary shard and
`index.number_of_routing_shards` set to `15` can be split into `3` and `15` or alternatively`5` and `15`.
The number of shards in the split index must always be a factor of `index.number_of_routing_shards`
in the source index. Before splitting, a (primary) copy of every shard in the index must be active in the cluster.
Splitting works as follows:
@ -29,7 +42,7 @@ Splitting works as follows:
into the new index, which is a much more time consuming process.)
* Once the low level files are created all documents will be `hashed` again to delete
documents that belong in a different shard.
documents that belong to a different shard.
* Finally, it recovers the target index as though it were a closed index which
had just been re-opened.
@ -37,23 +50,19 @@ Splitting works as follows:
[float]
=== Preparing an index for splitting
Create an index with a routing shards factor:
Create a new index:
[source,js]
--------------------------------------------------
PUT my_source_index
{
"settings": {
"index.number_of_shards" : 1,
"index.number_of_routing_shards" : 2 <1>
}
"settings": {
"index.number_of_shards" : 1
}
}
-------------------------------------------------
// CONSOLE
<1> Allows to split the index into two shards or in other words, it allows
for a single split operation.
In order to split an index, the index must be marked as read-only,
and have <<cluster-health,health>> `green`.
@ -75,7 +84,7 @@ PUT /my_source_index/_settings
changes like deleting the index.
[float]
=== Spitting an index
=== Splitting an index
To split `my_source_index` into a new index called `my_target_index`, issue
the following request:
@ -102,7 +111,7 @@ Indices can only be split if they satisfy the following requirements:
* the target index must not exist
* The index must have less primary shards than the target index.
* The source index must have fewer primary shards than the target index.
* The number of primary shards in the target index must be a factor of the
number of primary shards in the source index.
@ -128,7 +137,7 @@ POST my_source_index/_split/my_target_index
}
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true, "index.number_of_routing_shards" : 5, "index.number_of_shards": "1"}}\n/]
// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true, "index.number_of_shards": "1"}}\n/]
<1> The number of shards in the target index. This must be a factor of the
number of shards in the source index.
@ -162,4 +171,4 @@ replicas and may decide to relocate the primary shard to another node.
Because the split operation creates a new index to split the shards to,
the <<create-index-wait-for-active-shards,wait for active shards>> setting
on index creation applies to the split index action as well.
on index creation applies to the split index action as well.

View File

@ -90,6 +90,12 @@ causing a mapping explosion:
Indexing 1 document with 100 nested fields actually indexes 101 documents
as each nested document is indexed as a separate hidden document.
`index.mapping.nested_objects.limit`::
The maximum number of `nested` json objects within a single document across
all nested fields, defaults to 10000. Indexing one document with an array of
100 objects within a nested field, will actually create 101 documents, as
each nested object will be indexed as a separate hidden document.
[float]
== Dynamic mapping

View File

@ -201,3 +201,13 @@ Indexing a document with 100 nested fields actually indexes 101 documents as eac
document is indexed as a separate document. To safeguard against ill-defined mappings
the number of nested fields that can be defined per index has been limited to 50. See
<<mapping-limit-settings>>.
==== Limiting the number of `nested` json objects
Indexing a document with an array of 100 objects within a nested field, will actually
create 101 documents, as each nested object will be indexed as a separate document.
To prevent out of memory errors when a single document contains too many nested json
objects, the number of nested json objects that a single document may contain across all fields
has been limited to 10000. See <<mapping-limit-settings>>.

View File

@ -30,6 +30,7 @@ way to reindex old indices is to use the `reindex` API.
* <<breaking_70_mappings_changes>>
* <<breaking_70_search_changes>>
* <<breaking_70_plugins_changes>>
* <<breaking_70_analysis_changes>>
* <<breaking_70_api_changes>>

View File

@ -0,0 +1,8 @@
[[breaking_70_analysis_changes]]
=== Analysis changes
==== The `delimited_payload_filter` is renamed
The `delimited_payload_filter` is renamed to `delimited_payload`, the old name is
deprecated and will be removed at some point, so it should be replaced by
`delimited_payload`.

Some files were not shown because too many files have changed in this diff Show More