Merge remote-tracking branch 'es/master' into ccr

* es/master: (22 commits)
  Fix building Javadoc JARs on JDK for client JARs (#29274)
  Require JDK 10 to build Elasticsearch (#29174)
  Decouple NamedXContentRegistry from ElasticsearchException (#29253)
  Docs: Update generating test coverage reports (#29255)
  [TEST] Fix issue with HttpInfo passed invalid parameter
  Remove all dependencies from XContentBuilder (#29225)
  Fix sporadic failure in CompositeValuesCollectorQueueTests
  Propagate ignore_unmapped to inner_hits (#29261)
  TEST: Increase timeout for testPrimaryReplicaResyncFailed
  REST client: hosts marked dead for the first time should not be immediately retried (#29230)
  TEST: Use different translog dir for a new engine
  Make SearchStats implement Writeable (#29258)
  [Docs] Spelling and grammar changes to reindex.asciidoc (#29232)
  Do not optimize append-only if seen normal op with higher seqno (#28787)
  [test] packaging: gradle tasks for groovy tests (#29046)
  Prune only gc deletes below local checkpoint (#28790)
  remove testUnassignedShardAndEmptyNodesInRoutingTable
  #28745: remove extra option in the composite rest tests
  Fold EngineDiskUtils into Store, for better lock semantics (#29156)
  Add file permissions checks to precommit task
  ...
This commit is contained in:
Martijn van Groningen 2018-03-28 09:24:27 +02:00
commit ffb5281cc0
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
148 changed files with 3885 additions and 2058 deletions

View File

@ -92,11 +92,11 @@ Contributing to the Elasticsearch codebase
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
JDK 9 is required to build Elasticsearch. You must have a JDK 9 installation
JDK 10 is required to build Elasticsearch. You must have a JDK 10 installation
with the environment variable `JAVA_HOME` referencing the path to Java home for
your JDK 9 installation. By default, tests use the same runtime as `JAVA_HOME`.
your JDK 10 installation. By default, tests use the same runtime as `JAVA_HOME`.
However, since Elasticsearch, supports JDK 8 the build supports compiling with
JDK 9 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME`
JDK 10 and testing on a JDK 8 runtime; to do this, set `RUNTIME_JAVA_HOME`
pointing to the Java home of a JDK 8 installation. Note that this mechanism can
be used to test against other JDKs as well, this is not only limited to JDK 8.

View File

@ -414,16 +414,16 @@ and in another window:
----------------------------------------------------
vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7
cd $BATS_ARCHIVES
cd $PACKAGING_ARCHIVES
sudo -E bats $BATS_TESTS/*rpm*.bats
----------------------------------------------------
If you wanted to retest all the release artifacts on a single VM you could:
-------------------------------------------------
./gradlew setupBats
./gradlew setupPackagingTest
cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404
cd $BATS_ARCHIVES
cd $PACKAGING_ARCHIVES
sudo -E bats $BATS_TESTS/*.bats
-------------------------------------------------
@ -499,32 +499,21 @@ will contain your change.
. Push both branches to your remote repository.
. Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec=index_req_bwc_5.x`.
== Coverage analysis
== Test coverage analysis
Tests can be run instrumented with jacoco to produce a coverage report in
`target/site/jacoco/`.
Generating test coverage reports for Elasticsearch is currently not possible through Gradle.
However, it _is_ possible to gain insight in code coverage using IntelliJ's built-in coverage
analysis tool that can measure coverage upon executing specific tests. Eclipse may also be able
to do the same using the EclEmma plugin.
Unit test coverage:
---------------------------------------------------------------------------
mvn -Dtests.coverage test jacoco:report
---------------------------------------------------------------------------
Integration test coverage:
---------------------------------------------------------------------------
mvn -Dtests.coverage -Dskip.unit.tests verify jacoco:report
---------------------------------------------------------------------------
Combined (Unit+Integration) coverage:
---------------------------------------------------------------------------
mvn -Dtests.coverage verify jacoco:report
---------------------------------------------------------------------------
Test coverage reporting used to be possible with JaCoCo when Elasticsearch was using Maven
as its build system. Since the switch to Gradle though, this is no longer possible, seeing as
the code currently used to build Elasticsearch does not allow JaCoCo to recognize its tests.
For more information on this, see the discussion in https://github.com/elastic/elasticsearch/issues/28867[issue #28867].
== Launching and debugging from an IDE
If you want to run elasticsearch from your IDE, the `./gradlew run` task
If you want to run Elasticsearch from your IDE, the `./gradlew run` task
supports a remote debugging option:
---------------------------------------------------------------------------

8
Vagrantfile vendored
View File

@ -334,9 +334,9 @@ export TAR=/elasticsearch/distribution/tar/build/distributions
export RPM=/elasticsearch/distribution/rpm/build/distributions
export DEB=/elasticsearch/distribution/deb/build/distributions
export BATS=/project/build/bats
export BATS_UTILS=/project/build/bats/utils
export BATS_TESTS=/project/build/bats/tests
export BATS_ARCHIVES=/project/build/bats/archives
export BATS_UTILS=/project/build/packaging/bats/utils
export BATS_TESTS=/project/build/packaging/bats/tests
export PACKAGING_ARCHIVES=/project/build/packaging/archives
VARS
cat \<\<SUDOERS_VARS > /etc/sudoers.d/elasticsearch_vars
Defaults env_keep += "ZIP"
@ -346,7 +346,7 @@ Defaults env_keep += "DEB"
Defaults env_keep += "BATS"
Defaults env_keep += "BATS_UTILS"
Defaults env_keep += "BATS_TESTS"
Defaults env_keep += "BATS_ARCHIVES"
Defaults env_keep += "PACKAGING_ARCHIVES"
SUDOERS_VARS
chmod 0440 /etc/sudoers.d/elasticsearch_vars
SHELL

View File

@ -58,7 +58,7 @@ import java.time.ZonedDateTime
class BuildPlugin implements Plugin<Project> {
static final JavaVersion minimumRuntimeVersion = JavaVersion.VERSION_1_8
static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_9
static final JavaVersion minimumCompilerVersion = JavaVersion.VERSION_1_10
@Override
void apply(Project project) {

View File

@ -168,12 +168,10 @@ public class PluginBuildPlugin extends BuildPlugin {
Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName),
StandardCopyOption.REPLACE_EXISTING)
if (project.compilerJavaVersion < JavaVersion.VERSION_1_10) {
String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
StandardCopyOption.REPLACE_EXISTING)
}
String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
StandardCopyOption.REPLACE_EXISTING)
}
project.assemble.dependsOn(clientJar)
}

View File

@ -0,0 +1,87 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.gradle.api.DefaultTask
import org.gradle.api.GradleException
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.OutputFile
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.TaskAction
import org.gradle.api.tasks.util.PatternSet
import org.gradle.api.tasks.util.PatternFilterable
import org.apache.tools.ant.taskdefs.condition.Os
import java.nio.file.Files
import java.nio.file.attribute.PosixFilePermission
import java.nio.file.attribute.PosixFileAttributeView
import static java.nio.file.attribute.PosixFilePermission.OTHERS_EXECUTE
import static java.nio.file.attribute.PosixFilePermission.GROUP_EXECUTE
import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE
/**
* Checks source files for correct file permissions.
*/
public class FilePermissionsTask extends DefaultTask {
/** A pattern set of which files should be checked. */
private PatternFilterable filesFilter = new PatternSet()
@OutputFile
File outputMarker = new File(project.buildDir, 'markers/filePermissions')
FilePermissionsTask() {
onlyIf { !Os.isFamily(Os.FAMILY_WINDOWS) }
description = "Checks java source files for correct file permissions"
// we always include all source files, and exclude what should not be checked
filesFilter.include('**')
// exclude sh files that might have the executable bit set
filesFilter.exclude('**/*.sh')
}
/** Returns the files this task will check */
@InputFiles
FileCollection files() {
List<FileCollection> collections = new ArrayList<>()
for (SourceSet sourceSet : project.sourceSets) {
collections.add(sourceSet.allSource.matching(filesFilter))
}
return project.files(collections.toArray())
}
@TaskAction
void checkInvalidPermissions() {
List<String> failures = new ArrayList<>()
for (File f : files()) {
PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(f.toPath(), PosixFileAttributeView.class)
Set<PosixFilePermission> permissions = fileAttributeView.readAttributes().permissions()
if (permissions.contains(OTHERS_EXECUTE) || permissions.contains(OWNER_EXECUTE) ||
permissions.contains(GROUP_EXECUTE)) {
failures.add("Source file is executable: " + f)
}
}
if (failures.isEmpty() == false) {
throw new GradleException('Found invalid file permissions:\n' + failures.join('\n'))
}
outputMarker.setText('done', 'UTF-8')
}
}

View File

@ -37,6 +37,7 @@ class PrecommitTasks {
configureNamingConventions(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('filepermissions', FilePermissionsTask.class),
project.tasks.create('jarHell', JarHellTask.class),
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)]

View File

@ -37,9 +37,6 @@ class VagrantPropertiesExtension {
@Input
Boolean inheritTests
@Input
Boolean inheritTestArchives
@Input
Boolean inheritTestUtils
@ -60,10 +57,6 @@ class VagrantPropertiesExtension {
this.inheritTests = inheritTests
}
void setInheritTestArchives(Boolean inheritTestArchives) {
this.inheritTestArchives = inheritTestArchives
}
void setInheritTestUtils(Boolean inheritTestUtils) {
this.inheritTestUtils = inheritTestUtils
}

View File

@ -1,6 +1,5 @@
package org.elasticsearch.gradle.vagrant
import com.carrotsearch.gradle.junit4.RandomizedTestingPlugin
import org.apache.tools.ant.taskdefs.condition.Os
import org.elasticsearch.gradle.FileContentsTask
import org.elasticsearch.gradle.LoggedExec
@ -43,8 +42,9 @@ class VagrantTestPlugin implements Plugin<Project> {
/** Packages onboarded for upgrade tests **/
static List<String> UPGRADE_FROM_ARCHIVES = ['rpm', 'deb']
private static final PACKAGING_CONFIGURATION = 'packaging'
private static final BATS = 'bats'
private static final String BATS_TEST_COMMAND ="cd \$BATS_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS"
private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest"
@Override
@ -53,11 +53,11 @@ class VagrantTestPlugin implements Plugin<Project> {
// Creates the Vagrant extension for the project
project.extensions.create('esvagrant', VagrantPropertiesExtension, listVagrantBoxes(project))
// Add required repositories for Bats tests
configureBatsRepositories(project)
// Add required repositories for packaging tests
configurePackagingArchiveRepositories(project)
// Creates custom configurations for Bats testing files (and associated scripts and archives)
createBatsConfiguration(project)
createPackagingConfiguration(project)
// Creates all the main Vagrant tasks
createVagrantTasks(project)
@ -87,7 +87,7 @@ class VagrantTestPlugin implements Plugin<Project> {
}
}
private static void configureBatsRepositories(Project project) {
private static void configurePackagingArchiveRepositories(Project project) {
RepositoryHandler repos = project.repositories
// Try maven central first, it'll have releases before 5.0.0
@ -102,10 +102,10 @@ class VagrantTestPlugin implements Plugin<Project> {
}
}
private static void createBatsConfiguration(Project project) {
project.configurations.create(BATS)
private static void createPackagingConfiguration(Project project) {
project.configurations.create(PACKAGING_CONFIGURATION)
String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion");
String upgradeFromVersion = System.getProperty("tests.packaging.upgradeVersion")
if (upgradeFromVersion == null) {
String firstPartOfSeed = project.rootProject.testSeed.tokenize(':').get(0)
final long seed = Long.parseUnsignedLong(firstPartOfSeed, 16)
@ -120,12 +120,14 @@ class VagrantTestPlugin implements Plugin<Project> {
} else {
it = "packages:${it}"
}
project.dependencies.add(BATS, project.dependencies.project(path: ":distribution:${it}", configuration: 'default'))
project.dependencies.add(PACKAGING_CONFIGURATION,
project.dependencies.project(path: ":distribution:${it}", configuration: 'default'))
}
UPGRADE_FROM_ARCHIVES.each {
// The version of elasticsearch that we upgrade *from*
project.dependencies.add(BATS, "org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}")
project.dependencies.add(PACKAGING_CONFIGURATION,
"org.elasticsearch.distribution.${it}:elasticsearch:${upgradeFromVersion}@${it}")
}
project.extensions.esvagrant.upgradeFromVersion = upgradeFromVersion
@ -154,22 +156,28 @@ class VagrantTestPlugin implements Plugin<Project> {
}
private static void createPrepareVagrantTestEnvTask(Project project) {
File batsDir = new File("${project.buildDir}/${BATS}")
File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION)
Task createBatsDirsTask = project.tasks.create('createBatsDirs')
createBatsDirsTask.outputs.dir batsDir
createBatsDirsTask.doLast {
batsDir.mkdirs()
File archivesDir = new File(packagingDir, 'archives')
Copy copyPackagingArchives = project.tasks.create('copyPackagingArchives', Copy) {
into archivesDir
from project.configurations[PACKAGING_CONFIGURATION]
}
Copy copyBatsArchives = project.tasks.create('copyBatsArchives', Copy) {
dependsOn createBatsDirsTask
into "${batsDir}/archives"
from project.configurations[BATS]
Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
dependsOn copyPackagingArchives
file "${archivesDir}/version"
contents project.version
}
Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) {
dependsOn copyPackagingArchives
file "${archivesDir}/upgrade_from_version"
contents project.extensions.esvagrant.upgradeFromVersion
}
File batsDir = new File(packagingDir, BATS)
Copy copyBatsTests = project.tasks.create('copyBatsTests', Copy) {
dependsOn createBatsDirsTask
into "${batsDir}/tests"
from {
"${project.extensions.esvagrant.batsDir}/tests"
@ -177,7 +185,6 @@ class VagrantTestPlugin implements Plugin<Project> {
}
Copy copyBatsUtils = project.tasks.create('copyBatsUtils', Copy) {
dependsOn createBatsDirsTask
into "${batsDir}/utils"
from {
"${project.extensions.esvagrant.batsDir}/utils"
@ -185,42 +192,30 @@ class VagrantTestPlugin implements Plugin<Project> {
}
// Now we iterate over dependencies of the bats configuration. When a project dependency is found,
// we bring back its own archives, test files or test utils.
// we bring back its test files or test utils.
project.afterEvaluate {
project.configurations.bats.dependencies.findAll {it.targetConfiguration == BATS }.each { d ->
if (d instanceof DefaultProjectDependency) {
DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d
Project externalBatsProject = externalBatsDependency.dependencyProject
String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir
project.configurations[PACKAGING_CONFIGURATION].dependencies
.findAll {it.targetConfiguration == PACKAGING_CONFIGURATION }
.each { d ->
if (d instanceof DefaultProjectDependency) {
DefaultProjectDependency externalBatsDependency = (DefaultProjectDependency) d
Project externalBatsProject = externalBatsDependency.dependencyProject
String externalBatsDir = externalBatsProject.extensions.esvagrant.batsDir
if (project.extensions.esvagrant.inheritTests) {
copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests"))
if (project.extensions.esvagrant.inheritTests) {
copyBatsTests.from(externalBatsProject.files("${externalBatsDir}/tests"))
}
if (project.extensions.esvagrant.inheritTestUtils) {
copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils"))
}
}
if (project.extensions.esvagrant.inheritTestArchives) {
copyBatsArchives.from(externalBatsDependency.projectConfiguration.files)
}
if (project.extensions.esvagrant.inheritTestUtils) {
copyBatsUtils.from(externalBatsProject.files("${externalBatsDir}/utils"))
}
}
}
}
Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) {
dependsOn createBatsDirsTask
file "${batsDir}/archives/version"
contents project.version
}
Task createUpgradeFromFile = project.tasks.create('createUpgradeFromFile', FileContentsTask) {
dependsOn createBatsDirsTask
file "${batsDir}/archives/upgrade_from_version"
contents project.extensions.esvagrant.upgradeFromVersion
}
Task vagrantSetUpTask = project.tasks.create('setupBats')
Task vagrantSetUpTask = project.tasks.create('setupPackagingTest')
vagrantSetUpTask.dependsOn 'vagrantCheckVersion'
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils, copyBatsArchives, createVersionFile, createUpgradeFromFile
vagrantSetUpTask.dependsOn copyPackagingArchives, createVersionFile, createUpgradeFromFile
vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils
}
private static void createPackagingTestTask(Project project) {
@ -270,8 +265,8 @@ class VagrantTestPlugin implements Plugin<Project> {
assert project.tasks.virtualboxCheckVersion != null
Task virtualboxCheckVersion = project.tasks.virtualboxCheckVersion
assert project.tasks.setupBats != null
Task setupBats = project.tasks.setupBats
assert project.tasks.setupPackagingTest != null
Task setupPackagingTest = project.tasks.setupPackagingTest
assert project.tasks.packagingTest != null
Task packagingTest = project.tasks.packagingTest
@ -308,7 +303,7 @@ class VagrantTestPlugin implements Plugin<Project> {
environmentVars vagrantEnvVars
dependsOn vagrantCheckVersion, virtualboxCheckVersion
}
update.mustRunAfter(setupBats)
update.mustRunAfter(setupPackagingTest)
/*
* Destroying before every execution can be annoying while iterating on tests locally. Therefore, we provide a flag
@ -359,32 +354,39 @@ class VagrantTestPlugin implements Plugin<Project> {
}
vagrantSmokeTest.dependsOn(smoke)
Task packaging = project.tasks.create("vagrant${boxTask}#packagingTest", BatsOverVagrantTask) {
Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) {
remoteCommand BATS_TEST_COMMAND
boxName box
environmentVars vagrantEnvVars
dependsOn up, setupBats
dependsOn up, setupPackagingTest
finalizedBy halt
}
TaskExecutionAdapter packagingReproListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew"
if (state.failure != null) {
println "REPRODUCE WITH: ${gradlew} ${packaging.path} " +
"-Dtests.seed=${project.testSeed} "
}
}
TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path)
batsPackagingTest.doFirst {
project.gradle.addListener(batsPackagingReproListener)
}
packaging.doFirst {
project.gradle.addListener(packagingReproListener)
}
packaging.doLast {
project.gradle.removeListener(packagingReproListener)
batsPackagingTest.doLast {
project.gradle.removeListener(batsPackagingReproListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
packagingTest.dependsOn(packaging)
packagingTest.dependsOn(batsPackagingTest)
}
// This task doesn't do anything yet. In the future it will execute a jar containing tests on the vm
Task groovyPackagingTest = project.tasks.create("vagrant${boxTask}#groovyPackagingTest")
groovyPackagingTest.dependsOn(up)
groovyPackagingTest.finalizedBy(halt)
TaskExecutionAdapter groovyPackagingReproListener = createReproListener(project, groovyPackagingTest.path)
groovyPackagingTest.doFirst {
project.gradle.addListener(groovyPackagingReproListener)
}
groovyPackagingTest.doLast {
project.gradle.removeListener(groovyPackagingReproListener)
}
if (project.extensions.esvagrant.boxes.contains(box)) {
packagingTest.dependsOn(groovyPackagingTest)
}
Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) {
@ -395,15 +397,7 @@ class VagrantTestPlugin implements Plugin<Project> {
finalizedBy halt
args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}"
}
TaskExecutionAdapter platformReproListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
if (state.failure != null) {
println "REPRODUCE WITH: gradle ${platform.path} " +
"-Dtests.seed=${project.testSeed} "
}
}
}
TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path)
platform.doFirst {
project.gradle.addListener(platformReproListener)
}
@ -415,4 +409,16 @@ class VagrantTestPlugin implements Plugin<Project> {
}
}
}
private static TaskExecutionAdapter createReproListener(Project project, String reproTaskPath) {
return new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
final String gradlew = Os.isFamily(Os.FAMILY_WINDOWS) ? "gradlew" : "./gradlew"
if (state.failure != null) {
println "REPRODUCE WITH: ${gradlew} ${reproTaskPath} -Dtests.seed=${project.testSeed} "
}
}
}
}
}

View File

@ -121,7 +121,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
builder.startObject();
{
builder.field("user", "kimchy");
builder.field("postDate", new Date());
builder.timeField("postDate", new Date());
builder.field("message", "trying out Elasticsearch");
}
builder.endObject();
@ -331,7 +331,7 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
{
builder.field("updated", new Date());
builder.timeField("updated", new Date());
builder.field("reason", "daily update");
}
builder.endObject();

View File

@ -26,31 +26,50 @@ import java.util.concurrent.TimeUnit;
* when the host should be retried (based on number of previous failed attempts).
* Class is immutable, a new copy of it should be created each time the state has to be changed.
*/
final class DeadHostState {
final class DeadHostState implements Comparable<DeadHostState> {
private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1);
private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
static final DeadHostState INITIAL_DEAD_STATE = new DeadHostState();
private final int failedAttempts;
private final long deadUntilNanos;
private final TimeSupplier timeSupplier;
private DeadHostState() {
/**
* Build the initial dead state of a host. Useful when a working host stops functioning
* and needs to be marked dead after its first failure. In such case the host will be retried after a minute or so.
*
* @param timeSupplier a way to supply the current time and allow for unit testing
*/
DeadHostState(TimeSupplier timeSupplier) {
this.failedAttempts = 1;
this.deadUntilNanos = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS;
this.deadUntilNanos = timeSupplier.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS;
this.timeSupplier = timeSupplier;
}
/**
* We keep track of how many times a certain node fails consecutively. The higher that number is the longer we will wait
* to retry that same node again. Minimum is 1 minute (for a node the only failed once), maximum is 30 minutes (for a node
* that failed many consecutive times).
* Build the dead state of a host given its previous dead state. Useful when a host has been failing before, hence
* it already failed for one or more consecutive times. The more failed attempts we register the longer we wait
* to retry that same host again. Minimum is 1 minute (for a node the only failed once created
* through {@link #DeadHostState(TimeSupplier)}), maximum is 30 minutes (for a node that failed more than 10 consecutive times)
*
* @param previousDeadHostState the previous state of the host which allows us to increase the wait till the next retry attempt
*/
DeadHostState(DeadHostState previousDeadHostState) {
DeadHostState(DeadHostState previousDeadHostState, TimeSupplier timeSupplier) {
long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1),
MAX_CONNECTION_TIMEOUT_NANOS);
this.deadUntilNanos = System.nanoTime() + timeoutNanos;
this.deadUntilNanos = timeSupplier.nanoTime() + timeoutNanos;
this.failedAttempts = previousDeadHostState.failedAttempts + 1;
this.timeSupplier = timeSupplier;
}
/**
* Indicates whether it's time to retry to failed host or not.
*
* @return true if the host should be retried, false otherwise
*/
boolean shallBeRetried() {
return timeSupplier.nanoTime() - deadUntilNanos > 0;
}
/**
@ -61,6 +80,15 @@ final class DeadHostState {
return deadUntilNanos;
}
int getFailedAttempts() {
return failedAttempts;
}
@Override
public int compareTo(DeadHostState other) {
return Long.compare(deadUntilNanos, other.deadUntilNanos);
}
@Override
public String toString() {
return "DeadHostState{" +
@ -68,4 +96,19 @@ final class DeadHostState {
", deadUntilNanos=" + deadUntilNanos +
'}';
}
/**
* Time supplier that makes timing aspects pluggable to ease testing
*/
interface TimeSupplier {
TimeSupplier DEFAULT = new TimeSupplier() {
@Override
public long nanoTime() {
return System.nanoTime();
}
};
long nanoTime();
}
}

View File

@ -47,6 +47,7 @@ import org.apache.http.nio.client.methods.HttpAsyncMethods;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import javax.net.ssl.SSLHandshakeException;
import java.io.Closeable;
import java.io.IOException;
import java.net.SocketTimeoutException;
@ -72,7 +73,6 @@ import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import javax.net.ssl.SSLHandshakeException;
/**
* Client that connects to an Elasticsearch cluster through HTTP.
@ -457,18 +457,18 @@ public class RestClient implements Closeable {
do {
Set<HttpHost> filteredHosts = new HashSet<>(hostTuple.hosts);
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
if (entry.getValue().shallBeRetried() == false) {
filteredHosts.remove(entry.getKey());
}
}
if (filteredHosts.isEmpty()) {
//last resort: if there are no good host to use, return a single dead one, the one that's closest to being retried
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
if (sortedHosts.size() > 0) {
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
return o1.getValue().compareTo(o2.getValue());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
@ -499,14 +499,15 @@ public class RestClient implements Closeable {
* Called after each failed attempt.
* Receives as an argument the host that was used for the failed attempt.
*/
private void onFailure(HttpHost host) throws IOException {
private void onFailure(HttpHost host) {
while(true) {
DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, DeadHostState.INITIAL_DEAD_STATE);
DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, new DeadHostState(DeadHostState.TimeSupplier.DEFAULT));
if (previousDeadHostState == null) {
logger.debug("added host [" + host + "] to blacklist");
break;
}
if (blacklist.replace(host, previousDeadHostState, new DeadHostState(previousDeadHostState))) {
if (blacklist.replace(host, previousDeadHostState,
new DeadHostState(previousDeadHostState, DeadHostState.TimeSupplier.DEFAULT))) {
logger.debug("updated host [" + host + "] already in blacklist");
break;
}

View File

@ -0,0 +1,118 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import java.util.concurrent.TimeUnit;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.lessThan;
public class DeadHostStateTests extends RestClientTestCase {
private static long[] EXPECTED_TIMEOUTS_SECONDS = new long[]{60, 84, 120, 169, 240, 339, 480, 678, 960, 1357, 1800};
public void testInitialDeadHostStateDefaultTimeSupplier() {
DeadHostState deadHostState = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
long currentTime = System.nanoTime();
assertThat(deadHostState.getDeadUntilNanos(), greaterThan(currentTime));
assertThat(deadHostState.getFailedAttempts(), equalTo(1));
}
public void testDeadHostStateFromPreviousDefaultTimeSupplier() {
DeadHostState previous = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
int iters = randomIntBetween(5, 30);
for (int i = 0; i < iters; i++) {
DeadHostState deadHostState = new DeadHostState(previous, DeadHostState.TimeSupplier.DEFAULT);
assertThat(deadHostState.getDeadUntilNanos(), greaterThan(previous.getDeadUntilNanos()));
assertThat(deadHostState.getFailedAttempts(), equalTo(previous.getFailedAttempts() + 1));
previous = deadHostState;
}
}
public void testCompareToDefaultTimeSupplier() {
int numObjects = randomIntBetween(EXPECTED_TIMEOUTS_SECONDS.length, 30);
DeadHostState[] deadHostStates = new DeadHostState[numObjects];
for (int i = 0; i < numObjects; i++) {
if (i == 0) {
deadHostStates[i] = new DeadHostState(DeadHostState.TimeSupplier.DEFAULT);
} else {
deadHostStates[i] = new DeadHostState(deadHostStates[i - 1], DeadHostState.TimeSupplier.DEFAULT);
}
}
for (int k = 1; k < deadHostStates.length; k++) {
assertThat(deadHostStates[k - 1].getDeadUntilNanos(), lessThan(deadHostStates[k].getDeadUntilNanos()));
assertThat(deadHostStates[k - 1], lessThan(deadHostStates[k]));
}
}
public void testShallBeRetried() {
ConfigurableTimeSupplier timeSupplier = new ConfigurableTimeSupplier();
DeadHostState deadHostState = null;
for (int i = 0; i < EXPECTED_TIMEOUTS_SECONDS.length; i++) {
long expectedTimeoutSecond = EXPECTED_TIMEOUTS_SECONDS[i];
timeSupplier.nanoTime = 0;
if (i == 0) {
deadHostState = new DeadHostState(timeSupplier);
} else {
deadHostState = new DeadHostState(deadHostState, timeSupplier);
}
for (int j = 0; j < expectedTimeoutSecond; j++) {
timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1);
assertThat(deadHostState.shallBeRetried(), is(false));
}
int iters = randomIntBetween(5, 30);
for (int j = 0; j < iters; j++) {
timeSupplier.nanoTime += TimeUnit.SECONDS.toNanos(1);
assertThat(deadHostState.shallBeRetried(), is(true));
}
}
}
public void testDeadHostStateTimeouts() {
ConfigurableTimeSupplier zeroTimeSupplier = new ConfigurableTimeSupplier();
zeroTimeSupplier.nanoTime = 0L;
DeadHostState previous = new DeadHostState(zeroTimeSupplier);
for (long expectedTimeoutsSecond : EXPECTED_TIMEOUTS_SECONDS) {
assertThat(TimeUnit.NANOSECONDS.toSeconds(previous.getDeadUntilNanos()), equalTo(expectedTimeoutsSecond));
previous = new DeadHostState(previous, zeroTimeSupplier);
}
//check that from here on the timeout does not increase
int iters = randomIntBetween(5, 30);
for (int i = 0; i < iters; i++) {
DeadHostState deadHostState = new DeadHostState(previous, zeroTimeSupplier);
assertThat(TimeUnit.NANOSECONDS.toSeconds(deadHostState.getDeadUntilNanos()),
equalTo(EXPECTED_TIMEOUTS_SECONDS[EXPECTED_TIMEOUTS_SECONDS.length - 1]));
previous = deadHostState;
}
}
private static class ConfigurableTimeSupplier implements DeadHostState.TimeSupplier {
long nanoTime;
@Override
public long nanoTime() {
return nanoTime;
}
}
}

View File

@ -101,7 +101,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
@Before
@SuppressWarnings("unchecked")
public void createRestClient() throws IOException {
public void createRestClient() {
httpClient = mock(CloseableHttpAsyncClient.class);
when(httpClient.<HttpResponse>execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class),
any(HttpClientContext.class), any(FutureCallback.class))).thenAnswer(new Answer<Future<HttpResponse>>() {
@ -160,17 +160,6 @@ public class RestClientSingleHostTests extends RestClientTestCase {
exec.shutdown();
}
public void testNullPath() throws IOException {
for (String method : getHttpMethods()) {
try {
restClient.performRequest(method, null);
fail("path set to null should fail!");
} catch (NullPointerException e) {
assertEquals("path must not be null", e.getMessage());
}
}
}
/**
* Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client
*/
@ -196,33 +185,6 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
}
public void testSetHosts() throws IOException {
try {
restClient.setHosts((HttpHost[]) null);
fail("setHosts should have failed");
} catch (IllegalArgumentException e) {
assertEquals("hosts must not be null nor empty", e.getMessage());
}
try {
restClient.setHosts();
fail("setHosts should have failed");
} catch (IllegalArgumentException e) {
assertEquals("hosts must not be null nor empty", e.getMessage());
}
try {
restClient.setHosts((HttpHost) null);
fail("setHosts should have failed");
} catch (NullPointerException e) {
assertEquals("host cannot be null", e.getMessage());
}
try {
restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201));
fail("setHosts should have failed");
} catch (NullPointerException e) {
assertEquals("host cannot be null", e.getMessage());
}
}
/**
* End to end test for ok status codes
*/
@ -289,7 +251,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
}
public void testIOExceptions() throws IOException {
public void testIOExceptions() {
for (String method : getHttpMethods()) {
//IOExceptions should be let bubble up
try {

View File

@ -28,6 +28,7 @@ import java.net.URI;
import java.util.Collections;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
import static org.hamcrest.Matchers.instanceOf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
@ -147,8 +148,48 @@ public class RestClientTests extends RestClientTestCase {
}
}
public void testSetHostsWrongArguments() throws IOException {
try (RestClient restClient = createRestClient()) {
restClient.setHosts((HttpHost[]) null);
fail("setHosts should have failed");
} catch (IllegalArgumentException e) {
assertEquals("hosts must not be null nor empty", e.getMessage());
}
try (RestClient restClient = createRestClient()) {
restClient.setHosts();
fail("setHosts should have failed");
} catch (IllegalArgumentException e) {
assertEquals("hosts must not be null nor empty", e.getMessage());
}
try (RestClient restClient = createRestClient()) {
restClient.setHosts((HttpHost) null);
fail("setHosts should have failed");
} catch (NullPointerException e) {
assertEquals("host cannot be null", e.getMessage());
}
try (RestClient restClient = createRestClient()) {
restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201));
fail("setHosts should have failed");
} catch (NullPointerException e) {
assertEquals("host cannot be null", e.getMessage());
}
}
public void testNullPath() throws IOException {
try (RestClient restClient = createRestClient()) {
for (String method : getHttpMethods()) {
try {
restClient.performRequest(method, null);
fail("path set to null should fail!");
} catch (NullPointerException e) {
assertEquals("path must not be null", e.getMessage());
}
}
}
}
private static RestClient createRestClient() {
HttpHost[] hosts = new HttpHost[]{new HttpHost("localhost", 9200)};
return new RestClient(mock(CloseableHttpAsyncClient.class), randomLongBetween(1_000, 30_000), new Header[]{}, hosts, null, null);
return new RestClient(mock(CloseableHttpAsyncClient.class), randomIntBetween(1_000, 30_000), new Header[]{}, hosts, null, null);
}
}

View File

@ -545,88 +545,3 @@ GET /_search
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\.//]
==== Index sorting
By default this aggregation runs on every document that match the query.
Though if the index sort matches the composite sort this aggregation can optimize
the execution and can skip documents that contain composite buckets that would not
be part of the response.
For instance the following aggregations:
[source,js]
--------------------------------------------------
GET /_search
{
"aggs" : {
"my_buckets": {
"composite" : {
"size": 2,
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d", "order": "asc" } } },
{ "product": { "terms": { "field": "product", "order": "asc" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
\... is much faster on an index that uses the following sort:
[source,js]
--------------------------------------------------
PUT twitter
{
"settings" : {
"index" : {
"sort.field" : ["timestamp", "product"],
"sort.order" : ["asc", "asc"]
}
},
"mappings": {
"sales": {
"properties": {
"timestamp": {
"type": "date"
},
"product": {
"type": "keyword"
}
}
}
}
}
--------------------------------------------------
// CONSOLE
WARNING: The optimization takes effect only if the fields used for sorting are single-valued and follow
the same order as the aggregation (`desc` or `asc`).
If only the aggregation results are needed it is also better to set the size of the query to 0
and `track_total_hits` to false in order to remove other slowing factors:
[source,js]
--------------------------------------------------
GET /_search
{
"size": 0,
"track_total_hits": false,
"aggs" : {
"my_buckets": {
"composite" : {
"size": 2,
"sources" : [
{ "date": { "date_histogram": { "field": "timestamp", "interval": "1d" } } },
{ "product": { "terms": { "field": "product" } } }
]
}
}
}
}
--------------------------------------------------
// CONSOLE
See <<index-modules-index-sorting, index sorting>> for more details.

View File

@ -136,7 +136,7 @@ POST _reindex
// TEST[setup:twitter]
You can limit the documents by adding a type to the `source` or by adding a
query. This will only copy ++tweet++&apos;s made by `kimchy` into `new_twitter`:
query. This will only copy tweets made by `kimchy` into `new_twitter`:
[source,js]
--------------------------------------------------
@ -161,11 +161,13 @@ POST _reindex
`index` and `type` in `source` can both be lists, allowing you to copy from
lots of sources in one request. This will copy documents from the `_doc` and
`post` types in the `twitter` and `blog` index. It'd include the `post` type in
the `twitter` index and the `_doc` type in the `blog` index. If you want to be
more specific you'll need to use the `query`. It also makes no effort to handle
ID collisions. The target index will remain valid but it's not easy to predict
which document will survive because the iteration order isn't well defined.
`post` types in the `twitter` and `blog` index. The copied documents would include the
`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more
specific parameters, you can use `query`.
The Reindex API makes no effort to handle ID collisions. For such issues, the target index
will remain valid, but it's not easy to predict which document will survive because
the iteration order isn't well defined.
[source,js]
--------------------------------------------------
@ -203,8 +205,8 @@ POST _reindex
// CONSOLE
// TEST[setup:twitter]
If you want a particular set of documents from the twitter index you'll
need to sort. Sorting makes the scroll less efficient but in some contexts
If you want a particular set of documents from the `twitter` index you'll
need to use `sort`. Sorting makes the scroll less efficient but in some contexts
it's worth it. If possible, prefer a more selective query to `size` and `sort`.
This will copy 10000 documents from `twitter` into `new_twitter`:
@ -226,8 +228,8 @@ POST _reindex
// TEST[setup:twitter]
The `source` section supports all the elements that are supported in a
<<search-request-body,search request>>. For instance only a subset of the
fields from the original documents can be reindexed using source filtering
<<search-request-body,search request>>. For instance, only a subset of the
fields from the original documents can be reindexed using `source` filtering
as follows:
[source,js]
@ -286,10 +288,10 @@ Set `ctx.op = "delete"` if your script decides that the document must be
deleted from the destination index. The deletion will be reported in the
`deleted` counter in the <<docs-reindex-response-body, response body>>.
Setting `ctx.op` to anything else is an error. Setting any
other field in `ctx` is an error.
Setting `ctx.op` to anything else will return an error, as will setting any
other field in `ctx`.
Think of the possibilities! Just be careful! With great power.... You can
Think of the possibilities! Just be careful; you are able to
change:
* `_id`
@ -299,7 +301,7 @@ change:
* `_routing`
Setting `_version` to `null` or clearing it from the `ctx` map is just like not
sending the version in an indexing request. It will cause that document to be
sending the version in an indexing request; it will cause the document to be
overwritten in the target index regardless of the version on the target or the
version type you use in the `_reindex` request.
@ -310,11 +312,11 @@ preserved unless it's changed by the script. You can set `routing` on the
`keep`::
Sets the routing on the bulk request sent for each match to the routing on
the match. The default.
the match. This is the default value.
`discard`::
Sets the routing on the bulk request sent for each match to null.
Sets the routing on the bulk request sent for each match to `null`.
`=<some text>`::
@ -422,7 +424,7 @@ POST _reindex
The `host` parameter must contain a scheme, host, and port (e.g.
`https://otherhost:9200`). The `username` and `password` parameters are
optional and when they are present reindex will connect to the remote
optional, and when they are present `_reindex` will connect to the remote
Elasticsearch node using basic auth. Be sure to use `https` when using
basic auth or the password will be sent in plain text.
@ -446,7 +448,7 @@ NOTE: Reindexing from remote clusters does not support
Reindexing from a remote server uses an on-heap buffer that defaults to a
maximum size of 100mb. If the remote index includes very large documents you'll
need to use a smaller batch size. The example below sets the batch size `10`
need to use a smaller batch size. The example below sets the batch size to `10`
which is very, very small.
[source,js]
@ -477,8 +479,8 @@ POST _reindex
It is also possible to set the socket read timeout on the remote connection
with the `socket_timeout` field and the connection timeout with the
`connect_timeout` field. Both default to thirty seconds. This example
sets the socket read timeout to one minute and the connection timeout to ten
`connect_timeout` field. Both default to 30 seconds. This example
sets the socket read timeout to one minute and the connection timeout to 10
seconds:
[source,js]
@ -533,14 +535,14 @@ for details. `timeout` controls how long each write request waits for unavailabl
shards to become available. Both work exactly how they work in the
<<docs-bulk,Bulk API>>. As `_reindex` uses scroll search, you can also specify
the `scroll` parameter to control how long it keeps the "search context" alive,
eg `?scroll=10m`, by default it's 5 minutes.
(e.g. `?scroll=10m`). The default value is 5 minutes.
`requests_per_second` can be set to any positive decimal number (`1.4`, `6`,
`1000`, etc) and throttles rate at which reindex issues batches of index
`1000`, etc) and throttles the rate at which `_reindex` issues batches of index
operations by padding each batch with a wait time. The throttling can be
disabled by setting `requests_per_second` to `-1`.
The throttling is done by waiting between batches so that scroll that reindex
The throttling is done by waiting between batches so that the `scroll` which `_reindex`
uses internally can be given a timeout that takes into account the padding.
The padding time is the difference between the batch size divided by the
`requests_per_second` and the time spent writing. By default the batch size is
@ -552,9 +554,9 @@ target_time = 1000 / 500 per second = 2 seconds
wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds
--------------------------------------------------
Since the batch is issued as a single `_bulk` request large batch sizes will
Since the batch is issued as a single `_bulk` request, large batch sizes will
cause Elasticsearch to create many requests and then wait for a while before
starting the next set. This is "bursty" instead of "smooth". The default is `-1`.
starting the next set. This is "bursty" instead of "smooth". The default value is `-1`.
[float]
[[docs-reindex-response-body]]
@ -606,12 +608,12 @@ The JSON response looks like this:
`took`::
The number of milliseconds from start to end of the whole operation.
The total milliseconds the entire operation took.
`timed_out`::
This flag is set to `true` if any of the requests executed during the
reindex has timed out.
reindex timed out.
`total`::
@ -657,7 +659,7 @@ The number of requests per second effectively executed during the reindex.
`throttled_until_millis`::
This field should always be equal to zero in a delete by query response. It only
This field should always be equal to zero in a `_delete_by_query` response. It only
has meaning when using the <<docs-reindex-task-api, Task API>>, where it
indicates the next time (in milliseconds since epoch) a throttled request will be
executed again in order to conform to `requests_per_second`.
@ -681,7 +683,7 @@ GET _tasks?detailed=true&actions=*reindex
--------------------------------------------------
// CONSOLE
The responses looks like:
The response looks like:
[source,js]
--------------------------------------------------
@ -726,9 +728,9 @@ The responses looks like:
// NOTCONSOLE
// We can't test tasks output
<1> this object contains the actual status. It is just like the response json
with the important addition of the `total` field. `total` is the total number
of operations that the reindex expects to perform. You can estimate the
<1> this object contains the actual status. It is identical to the response JSON
except for the important addition of the `total` field. `total` is the total number
of operations that the `_reindex` expects to perform. You can estimate the
progress by adding the `updated`, `created`, and `deleted` fields. The request
will finish when their sum is equal to the `total` field.
@ -743,7 +745,7 @@ GET /_tasks/taskId:1
The advantage of this API is that it integrates with `wait_for_completion=false`
to transparently return the status of completed tasks. If the task is completed
and `wait_for_completion=false` was set on it them it'll come back with a
and `wait_for_completion=false` was set, it will return a
`results` or an `error` field. The cost of this feature is the document that
`wait_for_completion=false` creates at `.tasks/task/${taskId}`. It is up to
you to delete that document.
@ -761,10 +763,10 @@ POST _tasks/task_id:1/_cancel
--------------------------------------------------
// CONSOLE
The `task_id` can be found using the tasks API above.
The `task_id` can be found using the Tasks API.
Cancelation should happen quickly but might take a few seconds. The task status
API above will continue to list the task until it is wakes to cancel itself.
Cancelation should happen quickly but might take a few seconds. The Tasks
API will continue to list the task until it wakes to cancel itself.
[float]
@ -780,9 +782,9 @@ POST _reindex/task_id:1/_rethrottle?requests_per_second=-1
--------------------------------------------------
// CONSOLE
The `task_id` can be found using the tasks API above.
The `task_id` can be found using the Tasks API above.
Just like when setting it on the `_reindex` API `requests_per_second`
Just like when setting it on the Reindex API, `requests_per_second`
can be either `-1` to disable throttling or any decimal number
like `1.7` or `12` to throttle to that level. Rethrottling that speeds up the
query takes effect immediately but rethrotting that slows down the query will
@ -806,7 +808,7 @@ POST test/_doc/1?refresh
--------------------------------------------------
// CONSOLE
But you don't like the name `flag` and want to replace it with `tag`.
but you don't like the name `flag` and want to replace it with `tag`.
`_reindex` can create the other index for you:
[source,js]
@ -836,7 +838,7 @@ GET test2/_doc/1
// CONSOLE
// TEST[continued]
and it'll look like:
which will return:
[source,js]
--------------------------------------------------
@ -854,8 +856,6 @@ and it'll look like:
--------------------------------------------------
// TESTRESPONSE
Or you can search by `tag` or whatever you want.
[float]
[[docs-reindex-slice]]
=== Slicing
@ -902,7 +902,7 @@ POST _reindex
// CONSOLE
// TEST[setup:big_twitter]
Which you can verify works with:
You can verify this works by:
[source,js]
----------------------------------------------------------------
@ -912,7 +912,7 @@ POST new_twitter/_search?size=0&filter_path=hits.total
// CONSOLE
// TEST[continued]
Which results in a sensible `total` like this one:
which results in a sensible `total` like this one:
[source,js]
----------------------------------------------------------------
@ -928,7 +928,7 @@ Which results in a sensible `total` like this one:
[[docs-reindex-automatic-slice]]
==== Automatic slicing
You can also let reindex automatically parallelize using <<sliced-scroll>> to
You can also let `_reindex` automatically parallelize using <<sliced-scroll>> to
slice on `_uid`. Use `slices` to specify the number of slices to use:
[source,js]
@ -946,7 +946,7 @@ POST _reindex?slices=5&refresh
// CONSOLE
// TEST[setup:big_twitter]
Which you also can verify works with:
You can also this verify works by:
[source,js]
----------------------------------------------------------------
@ -955,7 +955,7 @@ POST new_twitter/_search?size=0&filter_path=hits.total
// CONSOLE
// TEST[continued]
Which results in a sensible `total` like this one:
which results in a sensible `total` like this one:
[source,js]
----------------------------------------------------------------
@ -979,7 +979,7 @@ section above, creating sub-requests which means it has some quirks:
sub-requests are "child" tasks of the task for the request with `slices`.
* Fetching the status of the task for the request with `slices` only contains
the status of completed slices.
* These sub-requests are individually addressable for things like cancellation
* These sub-requests are individually addressable for things like cancelation
and rethrottling.
* Rethrottling the request with `slices` will rethrottle the unfinished
sub-request proportionally.
@ -992,7 +992,7 @@ are distributed proportionally to each sub-request. Combine that with the point
above about distribution being uneven and you should conclude that the using
`size` with `slices` might not result in exactly `size` documents being
`_reindex`ed.
* Each sub-requests gets a slightly different snapshot of the source index
* Each sub-request gets a slightly different snapshot of the source index,
though these are all taken at approximately the same time.
[float]
@ -1000,12 +1000,12 @@ though these are all taken at approximately the same time.
===== Picking the number of slices
If slicing automatically, setting `slices` to `auto` will choose a reasonable
number for most indices. If you're slicing manually or otherwise tuning
number for most indices. If slicing manually or otherwise tuning
automatic slicing, use these guidelines.
Query performance is most efficient when the number of `slices` is equal to the
number of shards in the index. If that number is large, (for example,
500) choose a lower number as too many `slices` will hurt performance. Setting
number of shards in the index. If that number is large (e.g. 500),
choose a lower number as too many `slices` will hurt performance. Setting
`slices` higher than the number of shards generally does not improve efficiency
and adds overhead.
@ -1018,10 +1018,10 @@ documents being reindexed and cluster resources.
[float]
=== Reindex daily indices
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
to reindex daily indices to apply a new template to the existing documents.
You can use `_reindex` in combination with <<modules-scripting-painless, Painless>>
to reindex daily indices to apply a new template to the existing documents.
Assuming you have indices consisting of documents as following:
Assuming you have indices consisting of documents as follows:
[source,js]
----------------------------------------------------------------
@ -1032,12 +1032,12 @@ PUT metricbeat-2016.05.31/_doc/1?refresh
----------------------------------------------------------------
// CONSOLE
The new template for the `metricbeat-*` indices is already loaded into Elasticsearch
The new template for the `metricbeat-*` indices is already loaded into Elasticsearch,
but it applies only to the newly created indices. Painless can be used to reindex
the existing documents and apply the new template.
The script below extracts the date from the index name and creates a new index
with `-1` appended. All data from `metricbeat-2016.05.31` will be reindex
with `-1` appended. All data from `metricbeat-2016.05.31` will be reindexed
into `metricbeat-2016.05.31-1`.
[source,js]
@ -1059,7 +1059,7 @@ POST _reindex
// CONSOLE
// TEST[continued]
All documents from the previous metricbeat indices now can be found in the `*-1` indices.
All documents from the previous metricbeat indices can now be found in the `*-1` indices.
[source,js]
----------------------------------------------------------------
@ -1069,13 +1069,13 @@ GET metricbeat-2016.05.31-1/_doc/1
// CONSOLE
// TEST[continued]
The previous method can also be used in combination with <<docs-reindex-change-name, change the name of a field>>
to only load the existing data into the new index, but also rename fields if needed.
The previous method can also be used in conjunction with <<docs-reindex-change-name, change the name of a field>>
to load only the existing data into the new index and rename any fields if needed.
[float]
=== Extracting a random subset of an index
Reindex can be used to extract a random subset of an index for testing:
`_reindex` can be used to extract a random subset of an index for testing:
[source,js]
----------------------------------------------------------------
@ -1100,5 +1100,5 @@ POST _reindex
// CONSOLE
// TEST[setup:big_twitter]
<1> Reindex defaults to sorting by `_doc` so `random_score` won't have any
<1> `_reindex` defaults to sorting by `_doc` so `random_score` will not have any
effect unless you override the sort to `_score`.

View File

@ -93,7 +93,7 @@ which returns something similar to:
{
"commit" : {
"id" : "3M3zkw2GHMo2Y4h4/KFKCg==",
"generation" : 3,
"generation" : 4,
"user_data" : {
"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA",
"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ",

View File

@ -167,6 +167,7 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
public HasChildQueryBuilder innerHit(InnerHitBuilder innerHit) {
this.innerHitBuilder = innerHit;
innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped);
return this;
}
@ -212,6 +213,9 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
*/
public HasChildQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) {
this.ignoreUnmapped = ignoreUnmapped;
if (innerHitBuilder!= null ){
innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped);
}
return this;
}
@ -291,7 +295,6 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped);
if (innerHitBuilder != null) {
hasChildQueryBuilder.innerHit(innerHitBuilder);
hasChildQueryBuilder.ignoreUnmapped(ignoreUnmapped);
}
return hasChildQueryBuilder;
}

View File

@ -145,6 +145,7 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
public HasParentQueryBuilder innerHit(InnerHitBuilder innerHit) {
this.innerHitBuilder = innerHit;
innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped);
return this;
}
@ -155,6 +156,9 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
*/
public HasParentQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) {
this.ignoreUnmapped = ignoreUnmapped;
if (innerHitBuilder != null) {
innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped);
}
return this;
}

View File

@ -158,8 +158,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
hqb.innerHit(new InnerHitBuilder()
.setName(randomAlphaOfLengthBetween(1, 10))
.setSize(randomIntBetween(0, 100))
.addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC))
.setIgnoreUnmapped(hqb.ignoreUnmapped()));
.addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)));
}
return hqb;
}
@ -345,13 +344,19 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
public void testIgnoreUnmapped() throws IOException {
final HasChildQueryBuilder queryBuilder = new HasChildQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None);
queryBuilder.innerHit(new InnerHitBuilder());
assertFalse(queryBuilder.innerHit().isIgnoreUnmapped());
queryBuilder.ignoreUnmapped(true);
assertTrue(queryBuilder.innerHit().isIgnoreUnmapped());
Query query = queryBuilder.toQuery(createShardContext());
assertThat(query, notNullValue());
assertThat(query, instanceOf(MatchNoDocsQuery.class));
final HasChildQueryBuilder failingQueryBuilder = new HasChildQueryBuilder("unmapped", new MatchAllQueryBuilder(), ScoreMode.None);
failingQueryBuilder.innerHit(new InnerHitBuilder());
assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped());
failingQueryBuilder.ignoreUnmapped(false);
assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped());
QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext()));
assertThat(e.getMessage(), containsString("[" + HasChildQueryBuilder.NAME +
"] join field [join_field] doesn't hold [unmapped] as a child"));

View File

@ -132,8 +132,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
hqb.innerHit(new InnerHitBuilder()
.setName(randomAlphaOfLengthBetween(1, 10))
.setSize(randomIntBetween(0, 100))
.addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC))
.setIgnoreUnmapped(hqb.ignoreUnmapped()));
.addSort(new FieldSortBuilder(STRING_FIELD_NAME_2).order(SortOrder.ASC)));
}
return hqb;
}
@ -245,13 +244,19 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
public void testIgnoreUnmapped() throws IOException {
final HasParentQueryBuilder queryBuilder = new HasParentQueryBuilder("unmapped", new MatchAllQueryBuilder(), false);
queryBuilder.innerHit(new InnerHitBuilder());
assertFalse(queryBuilder.innerHit().isIgnoreUnmapped());
queryBuilder.ignoreUnmapped(true);
assertTrue(queryBuilder.innerHit().isIgnoreUnmapped());
Query query = queryBuilder.toQuery(createShardContext());
assertThat(query, notNullValue());
assertThat(query, instanceOf(MatchNoDocsQuery.class));
final HasParentQueryBuilder failingQueryBuilder = new HasParentQueryBuilder("unmapped", new MatchAllQueryBuilder(), false);
failingQueryBuilder.innerHit(new InnerHitBuilder());
assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped());
failingQueryBuilder.ignoreUnmapped(false);
assertFalse(failingQueryBuilder.innerHit().isIgnoreUnmapped());
QueryShardException e = expectThrows(QueryShardException.class, () -> failingQueryBuilder.toQuery(createShardContext()));
assertThat(e.getMessage(),
containsString("[has_parent] join field [join_field] doesn't hold [unmapped] as a parent"));

View File

@ -25,14 +25,14 @@ for (Project subproj : project.rootProject.subprojects) {
if (subproj.path.startsWith(':plugins:') || subproj.path.equals(':example-plugins:custom-settings')) {
// add plugin as a dep
dependencies {
bats project(path: "${subproj.path}", configuration: 'zip')
packaging project(path: "${subproj.path}", configuration: 'zip')
}
plugins.add(subproj.name)
}
}
plugins = plugins.toSorted()
setupBats {
setupPackagingTest {
doFirst {
File expectedPlugins = file('build/plugins/expected')
expectedPlugins.parentFile.mkdirs()

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
@ -141,8 +142,8 @@ public class SnapshotStats implements Streamable, ToXContentFragment {
builder.startObject(Fields.STATS);
builder.field(Fields.NUMBER_OF_FILES, getNumberOfFiles());
builder.field(Fields.PROCESSED_FILES, getProcessedFiles());
builder.byteSizeField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, getTotalSize());
builder.byteSizeField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, getProcessedSize());
builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, new ByteSizeValue(getTotalSize()));
builder.humanReadableField(Fields.PROCESSED_SIZE_IN_BYTES, Fields.PROCESSED_SIZE, new ByteSizeValue(getProcessedSize()));
builder.field(Fields.START_TIME_IN_MILLIS, getStartTime());
builder.humanReadableField(Fields.TIME_IN_MILLIS, Fields.TIME, new TimeValue(getTime()));
builder.endObject();

View File

@ -501,8 +501,8 @@ public class ClusterStatsNodes implements ToXContentFragment {
}
builder.endArray();
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, heapUsed);
builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, heapMax);
builder.humanReadableField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, getHeapUsed());
builder.humanReadableField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, getHeapMax());
builder.endObject();
builder.field(Fields.THREADS, threads);

View File

@ -132,8 +132,8 @@ public class IndicesSegmentResponse extends BroadcastResponse {
builder.field(Fields.GENERATION, segment.getGeneration());
builder.field(Fields.NUM_DOCS, segment.getNumDocs());
builder.field(Fields.DELETED_DOCS, segment.getDeletedDocs());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSizeInBytes());
builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, segment.getMemoryInBytes());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, segment.getSize());
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, new ByteSizeValue(segment.getMemoryInBytes()));
builder.field(Fields.COMMITTED, segment.isCommitted());
builder.field(Fields.SEARCH, segment.isSearch());
if (segment.getVersion() != null) {

View File

@ -233,7 +233,7 @@ public class CommonStats implements Writeable, ToXContentFragment {
store = in.readOptionalStreamable(StoreStats::new);
indexing = in.readOptionalStreamable(IndexingStats::new);
get = in.readOptionalStreamable(GetStats::new);
search = in.readOptionalStreamable(SearchStats::new);
search = in.readOptionalWriteable(SearchStats::new);
merge = in.readOptionalStreamable(MergeStats::new);
refresh = in.readOptionalStreamable(RefreshStats::new);
flush = in.readOptionalStreamable(FlushStats::new);
@ -253,7 +253,7 @@ public class CommonStats implements Writeable, ToXContentFragment {
out.writeOptionalStreamable(store);
out.writeOptionalStreamable(indexing);
out.writeOptionalStreamable(get);
out.writeOptionalStreamable(search);
out.writeOptionalWriteable(search);
out.writeOptionalStreamable(merge);
out.writeOptionalStreamable(refresh);
out.writeOptionalStreamable(flush);

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
@ -116,9 +117,10 @@ public class UpgradeStatusResponse extends BroadcastResponse {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes());
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes());
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalBytes()));
builder.humanReadableField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, new ByteSizeValue(getToUpgradeBytes()));
builder.humanReadableField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT,
new ByteSizeValue(getToUpgradeBytesAncient()));
String level = params.param("level", "indices");
boolean outputShards = "shards".equals(level);
@ -128,9 +130,11 @@ public class UpgradeStatusResponse extends BroadcastResponse {
for (IndexUpgradeStatus indexUpgradeStatus : getIndices().values()) {
builder.startObject(indexUpgradeStatus.getIndex());
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, indexUpgradeStatus.getTotalBytes());
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, indexUpgradeStatus.getToUpgradeBytes());
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, indexUpgradeStatus.getToUpgradeBytesAncient());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(indexUpgradeStatus.getTotalBytes()));
builder.humanReadableField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE,
new ByteSizeValue(indexUpgradeStatus.getToUpgradeBytes()));
builder.humanReadableField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT,
new ByteSizeValue(indexUpgradeStatus.getToUpgradeBytesAncient()));
if (outputShards) {
builder.startObject(Fields.SHARDS);
for (IndexShardUpgradeStatus indexShardUpgradeStatus : indexUpgradeStatus) {
@ -138,9 +142,11 @@ public class UpgradeStatusResponse extends BroadcastResponse {
for (ShardUpgradeStatus shardUpgradeStatus : indexShardUpgradeStatus) {
builder.startObject();
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes());
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes());
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient());
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(getTotalBytes()));
builder.humanReadableField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE,
new ByteSizeValue(getToUpgradeBytes()));
builder.humanReadableField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT,
new ByteSizeValue(getToUpgradeBytesAncient()));
builder.startObject(Fields.ROUTING);
builder.field(Fields.STATE, shardUpgradeStatus.getShardRouting().state());

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -133,7 +134,7 @@ public class ClusterInfo implements ToXContentFragment, Writeable {
builder.endObject(); // end "nodes"
builder.startObject("shard_sizes"); {
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
builder.byteSizeField(c.key + "_bytes", c.key, c.value);
builder.humanReadableField(c.key + "_bytes", c.key, new ByteSizeValue(c.value));
}
}
builder.endObject(); // end "shard_sizes"

View File

@ -75,9 +75,9 @@ public class DiskUsage implements ToXContentFragment, Writeable {
XContentBuilder toShortXContent(XContentBuilder builder) throws IOException {
builder.field("path", this.path);
builder.byteSizeField("total_bytes", "total", this.totalBytes);
builder.byteSizeField("used_bytes", "used", this.getUsedBytes());
builder.byteSizeField("free_bytes", "free", this.freeBytes);
builder.humanReadableField("total_bytes", "total", new ByteSizeValue(this.totalBytes));
builder.humanReadableField("used_bytes", "used", new ByteSizeValue(this.getUsedBytes()));
builder.humanReadableField("free_bytes", "free", new ByteSizeValue(this.freeBytes));
builder.field("free_disk_percent", truncatePercent(this.getFreeDiskAsPercentage()));
builder.field("used_disk_percent", truncatePercent(this.getUsedDiskAsPercentage()));
return builder;

View File

@ -434,7 +434,7 @@ public final class IndexGraveyard implements MetaData.Custom {
builder.startObject();
builder.field(INDEX_KEY);
index.toXContent(builder, params);
builder.dateField(DELETE_DATE_IN_MILLIS_KEY, DELETE_DATE_KEY, deleteDateInMillis);
builder.timeField(DELETE_DATE_IN_MILLIS_KEY, DELETE_DATE_KEY, deleteDateInMillis);
return builder.endObject();
}

View File

@ -43,7 +43,7 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.UnknownNamedObjectException;
import org.elasticsearch.common.xcontent.NamedObjectNotFoundException;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -1173,7 +1173,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
try {
Custom custom = parser.namedObject(Custom.class, currentFieldName, null);
builder.putCustom(custom.getWriteableName(), custom);
} catch (UnknownNamedObjectException ex) {
} catch (NamedObjectNotFoundException ex) {
logger.warn("Skipping unknown custom object with type {}", currentFieldName);
parser.skipChildren();
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -289,7 +290,7 @@ public class NodeAllocationResult implements ToXContentObject, Writeable, Compar
if (hasMatchingSyncId()) {
builder.field("matching_sync_id", true);
} else {
builder.byteSizeField("matching_size_in_bytes", "matching_size", matchingBytes);
builder.humanReadableField("matching_size_in_bytes", "matching_size", new ByteSizeValue(matchingBytes));
}
}
if (storeException != null) {

View File

@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
@ -85,7 +86,7 @@ public final class FieldMemoryStats implements Writeable, Iterable<ObjectLongCur
builder.startObject(key);
for (ObjectLongCursor<String> entry : stats) {
builder.startObject(entry.key);
builder.byteSizeField(rawKey, readableKey, entry.value);
builder.humanReadableField(rawKey, readableKey, new ByteSizeValue(entry.value));
builder.endObject();
}
builder.endObject();

View File

@ -17,20 +17,19 @@
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
package org.elasticsearch.common.xcontent;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin;
/**
* Thrown when {@link NamedXContentRegistry} cannot locate a named object to
* parse for a particular name
*/
public class NamedObjectNotFoundException extends XContentParseException {
import java.util.Arrays;
import java.util.List;
public NamedObjectNotFoundException(String message) {
this(null, message);
}
public class CompositeAggregationPlugin extends Plugin implements SearchPlugin {
@Override
public List<AggregationSpec> getAggregations() {
return Arrays.asList(
new AggregationSpec(CompositeAggregationBuilder.NAME, CompositeAggregationBuilder::new, CompositeAggregationBuilder::parse)
.addResultReader(InternalComposite::new)
);
public NamedObjectNotFoundException(XContentLocation location, String message) {
super(location, message);
}
}

View File

@ -19,10 +19,8 @@
package org.elasticsearch.common.xcontent;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import java.io.IOException;
import java.util.ArrayList;
@ -114,28 +112,31 @@ public class NamedXContentRegistry {
}
/**
* Parse a named object, throwing an exception if the parser isn't found. Throws an {@link ElasticsearchException} if the
* {@code categoryClass} isn't registered because this is almost always a bug. Throws a {@link UnknownNamedObjectException} if the
* Parse a named object, throwing an exception if the parser isn't found. Throws an {@link NamedObjectNotFoundException} if the
* {@code categoryClass} isn't registered because this is almost always a bug. Throws an {@link NamedObjectNotFoundException} if the
* {@code categoryClass} is registered but the {@code name} isn't.
*
* @throws NamedObjectNotFoundException if the categoryClass or name is not registered
*/
public <T, C> T parseNamedObject(Class<T> categoryClass, String name, XContentParser parser, C context) throws IOException {
Map<String, Entry> parsers = registry.get(categoryClass);
if (parsers == null) {
if (registry.isEmpty()) {
// The "empty" registry will never work so we throw a better exception as a hint.
throw new ElasticsearchException("namedObject is not supported for this parser");
throw new NamedObjectNotFoundException("named objects are not supported for this parser");
}
throw new ElasticsearchException("Unknown namedObject category [" + categoryClass.getName() + "]");
throw new NamedObjectNotFoundException("unknown named object category [" + categoryClass.getName() + "]");
}
Entry entry = parsers.get(name);
if (entry == null) {
throw new UnknownNamedObjectException(parser.getTokenLocation(), categoryClass, name);
throw new NamedObjectNotFoundException(parser.getTokenLocation(), "unable to parse " + categoryClass.getSimpleName() +
" with name [" + name + "]: parser not found");
}
if (false == entry.name.match(name, parser.getDeprecationHandler())) {
/* Note that this shouldn't happen because we already looked up the entry using the names but we need to call `match` anyway
* because it is responsible for logging deprecation warnings. */
throw new ParsingException(parser.getTokenLocation(),
"Unknown " + categoryClass.getSimpleName() + " [" + name + "]: Parser didn't match");
throw new NamedObjectNotFoundException(parser.getTokenLocation(),
"unable to parse " + categoryClass.getSimpleName() + " with name [" + name + "]: parser didn't match");
}
return categoryClass.cast(entry.parser.parse(parser, context));
}

View File

@ -19,12 +19,7 @@
package org.elasticsearch.common.xcontent;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.CollectionUtils;
import org.joda.time.DateTimeZone;
import org.joda.time.ReadableInstant;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISODateTimeFormat;
import java.io.ByteArrayOutputStream;
import java.io.Closeable;
@ -38,12 +33,14 @@ import java.util.Arrays;
import java.util.Calendar;
import java.util.Collections;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.ServiceLoader;
import java.util.Set;
import java.util.function.Function;
/**
* A utility to build XContent (ie json).
@ -81,16 +78,15 @@ public final class XContentBuilder implements Closeable, Flushable {
return new XContentBuilder(xContent, new ByteArrayOutputStream(), includes, excludes);
}
public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
private static final Map<Class<?>, Writer> WRITERS;
private static final Map<Class<?>, HumanReadableTransformer> HUMAN_READABLE_TRANSFORMERS;
private static final Map<Class<?>, Function<Object, Object>> DATE_TRANSFORMERS;
static {
Map<Class<?>, Writer> writers = new HashMap<>();
writers.put(Boolean.class, (b, v) -> b.value((Boolean) v));
writers.put(Byte.class, (b, v) -> b.value((Byte) v));
writers.put(byte[].class, (b, v) -> b.value((byte[]) v));
writers.put(Date.class, (b, v) -> b.value((Date) v));
writers.put(Date.class, XContentBuilder::timeValue);
writers.put(Double.class, (b, v) -> b.value((Double) v));
writers.put(double[].class, (b, v) -> b.values((double[]) v));
writers.put(Float.class, (b, v) -> b.value((Float) v));
@ -106,26 +102,37 @@ public final class XContentBuilder implements Closeable, Flushable {
writers.put(Locale.class, (b, v) -> b.value(v.toString()));
writers.put(Class.class, (b, v) -> b.value(v.toString()));
writers.put(ZonedDateTime.class, (b, v) -> b.value(v.toString()));
writers.put(Calendar.class, XContentBuilder::timeValue);
writers.put(GregorianCalendar.class, XContentBuilder::timeValue);
Map<Class<?>, HumanReadableTransformer> humanReadableTransformer = new HashMap<>();
Map<Class<?>, Function<Object, Object>> dateTransformers = new HashMap<>();
// treat strings as already converted
dateTransformers.put(String.class, Function.identity());
// Load pluggable extensions
for (XContentBuilderExtension service : ServiceLoader.load(XContentBuilderExtension.class)) {
Map<Class<?>, Writer> addlWriters = service.getXContentWriters();
Map<Class<?>, HumanReadableTransformer> addlTransformers = service.getXContentHumanReadableTransformers();
Map<Class<?>, Function<Object, Object>> addlDateTransformers = service.getDateTransformers();
addlWriters.forEach((key, value) -> Objects.requireNonNull(value,
"invalid null xcontent writer for class " + key));
addlTransformers.forEach((key, value) -> Objects.requireNonNull(value,
"invalid null xcontent transformer for human readable class " + key));
dateTransformers.forEach((key, value) -> Objects.requireNonNull(value,
"invalid null xcontent date transformer for class " + key));
writers.putAll(addlWriters);
humanReadableTransformer.putAll(addlTransformers);
dateTransformers.putAll(addlDateTransformers);
}
WRITERS = Collections.unmodifiableMap(writers);
HUMAN_READABLE_TRANSFORMERS = Collections.unmodifiableMap(humanReadableTransformer);
DATE_TRANSFORMERS = Collections.unmodifiableMap(dateTransformers);
}
@FunctionalInterface
@ -610,15 +617,6 @@ public final class XContentBuilder implements Closeable, Flushable {
return this;
}
/**
* Writes the binary content of the given byte array as UTF-8 bytes.
*
* Use {@link XContentParser#charBuffer()} to read the value back
*/
public XContentBuilder utf8Field(String name, byte[] bytes, int offset, int length) throws IOException {
return field(name).utf8Value(bytes, offset, length);
}
/**
* Writes the binary content of the given byte array as UTF-8 bytes.
*
@ -634,63 +632,49 @@ public final class XContentBuilder implements Closeable, Flushable {
// Date
//////////////////////////////////
public XContentBuilder field(String name, ReadableInstant value) throws IOException {
return field(name).value(value);
/**
* Write a time-based field and value, if the passed timeValue is null a
* null value is written, otherwise a date transformers lookup is performed.
* @throws IllegalArgumentException if there is no transformers for the type of object
*/
public XContentBuilder timeField(String name, Object timeValue) throws IOException {
return field(name).timeValue(timeValue);
}
public XContentBuilder field(String name, ReadableInstant value, DateTimeFormatter formatter) throws IOException {
return field(name).value(value, formatter);
}
public XContentBuilder value(ReadableInstant value) throws IOException {
return value(value, DEFAULT_DATE_PRINTER);
}
public XContentBuilder value(ReadableInstant value, DateTimeFormatter formatter) throws IOException {
if (value == null) {
return nullValue();
}
ensureFormatterNotNull(formatter);
return value(formatter.print(value));
}
public XContentBuilder field(String name, Date value) throws IOException {
return field(name).value(value);
}
public XContentBuilder field(String name, Date value, DateTimeFormatter formatter) throws IOException {
return field(name).value(value, formatter);
}
public XContentBuilder value(Date value) throws IOException {
return value(value, DEFAULT_DATE_PRINTER);
}
public XContentBuilder value(Date value, DateTimeFormatter formatter) throws IOException {
if (value == null) {
return nullValue();
}
return value(formatter, value.getTime());
}
public XContentBuilder dateField(String name, String readableName, long value) throws IOException {
/**
* If the {@code humanReadable} flag is set, writes both a formatted and
* unformatted version of the time value using the date transformer for the
* {@link Long} class.
*/
public XContentBuilder timeField(String name, String readableName, long value) throws IOException {
if (humanReadable) {
field(readableName).value(DEFAULT_DATE_PRINTER, value);
Function<Object, Object> longTransformer = DATE_TRANSFORMERS.get(Long.class);
if (longTransformer == null) {
throw new IllegalArgumentException("cannot write time value xcontent for unknown value of type Long");
}
field(readableName).value(longTransformer.apply(value));
}
field(name, value);
return this;
}
XContentBuilder value(Calendar value) throws IOException {
if (value == null) {
return nullValue();
}
return value(DEFAULT_DATE_PRINTER, value.getTimeInMillis());
}
/**
* Write a time-based value, if the value is null a null value is written,
* otherwise a date transformers lookup is performed.
XContentBuilder value(DateTimeFormatter formatter, long value) throws IOException {
ensureFormatterNotNull(formatter);
return value(formatter.print(value));
* @throws IllegalArgumentException if there is no transformers for the type of object
*/
public XContentBuilder timeValue(Object timeValue) throws IOException {
if (timeValue == null) {
return nullValue();
} else {
Function<Object, Object> transformer = DATE_TRANSFORMERS.get(timeValue.getClass());
if (transformer == null) {
throw new IllegalArgumentException("cannot write time value xcontent for unknown value of type " + timeValue.getClass());
}
return value(transformer.apply(timeValue));
}
}
////////////////////////////////////////////////////////////////////////////
@ -761,10 +745,6 @@ public final class XContentBuilder implements Closeable, Flushable {
value((Iterable<?>) value, ensureNoSelfReferences);
} else if (value instanceof Object[]) {
values((Object[]) value, ensureNoSelfReferences);
} else if (value instanceof Calendar) {
value((Calendar) value);
} else if (value instanceof ReadableInstant) {
value((ReadableInstant) value);
} else if (value instanceof ToXContent) {
value((ToXContent) value);
} else if (value instanceof Enum<?>) {
@ -895,14 +875,6 @@ public final class XContentBuilder implements Closeable, Flushable {
return this;
}
public XContentBuilder byteSizeField(String rawFieldName, String readableFieldName, long rawSize) throws IOException {
if (humanReadable) {
field(readableFieldName, new ByteSizeValue(rawSize).toString());
}
field(rawFieldName, rawSize);
return this;
}
////////////////////////////////////////////////////////////////////////////
// Raw fields
//////////////////////////////////
@ -960,10 +932,6 @@ public final class XContentBuilder implements Closeable, Flushable {
ensureNotNull(name, "Field name cannot be null");
}
static void ensureFormatterNotNull(DateTimeFormatter formatter) {
ensureNotNull(formatter, "DateTimeFormatter cannot be null");
}
static void ensureNotNull(Object value, String message) {
if (value == null) {
throw new IllegalArgumentException(message);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.xcontent;
import java.util.Map;
import java.util.function.Function;
/**
* This interface provides a way for non-JDK classes to plug in a way to serialize to xcontent.
@ -61,4 +62,20 @@ public interface XContentBuilderExtension {
* @return a map of class name to transformer used to retrieve raw value
*/
Map<Class<?>, XContentBuilder.HumanReadableTransformer> getXContentHumanReadableTransformers();
/**
* Used for plugging a transformer for a date or time type object into a String (or other
* encodable object).
*
* For example:
*
* <pre>
* {@code
* final DateTimeFormatter datePrinter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
* Map<Class<?>, Function<Object, Object>> transformers = new HashMap<>();
* transformers.put(Date.class, d -> datePrinter.print(((Date) d).getTime()));
* }
* </pre>
*/
Map<Class<?>, Function<Object, Object>> getDateTransformers();
}

View File

@ -23,13 +23,23 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import org.joda.time.Instant;
import org.joda.time.MutableDateTime;
import org.joda.time.ReadableInstant;
import org.joda.time.format.DateTimeFormatter;
import org.joda.time.format.ISODateTimeFormat;
import org.joda.time.tz.CachedDateTimeZone;
import org.joda.time.tz.FixedDateTimeZone;
import java.util.Calendar;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
/**
* SPI extensions for Elasticsearch-specific classes (like the Lucene or Joda
@ -38,6 +48,8 @@ import java.util.Objects;
*/
public class XContentElasticsearchExtension implements XContentBuilderExtension {
public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC);
@Override
public Map<Class<?>, XContentBuilder.Writer> getXContentWriters() {
Map<Class<?>, XContentBuilder.Writer> writers = new HashMap<>();
@ -47,6 +59,8 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension
writers.put(DateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(CachedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(MutableDateTime.class, XContentBuilder::timeValue);
writers.put(DateTime.class, XContentBuilder::timeValue);
writers.put(BytesReference.class, (b, v) -> {
if (v == null) {
@ -75,4 +89,18 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension
transformers.put(ByteSizeValue.class, v -> ((ByteSizeValue) v).getBytes());
return transformers;
}
@Override
public Map<Class<?>, Function<Object, Object>> getDateTransformers() {
Map<Class<?>, Function<Object, Object>> transformers = new HashMap<>();
transformers.put(Date.class, d -> DEFAULT_DATE_PRINTER.print(((Date) d).getTime()));
transformers.put(DateTime.class, d -> DEFAULT_DATE_PRINTER.print((DateTime) d));
transformers.put(MutableDateTime.class, d -> DEFAULT_DATE_PRINTER.print((MutableDateTime) d));
transformers.put(ReadableInstant.class, d -> DEFAULT_DATE_PRINTER.print((ReadableInstant) d));
transformers.put(Long.class, d -> DEFAULT_DATE_PRINTER.print((long) d));
transformers.put(Calendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis()));
transformers.put(GregorianCalendar.class, d -> DEFAULT_DATE_PRINTER.print(((Calendar) d).getTimeInMillis()));
transformers.put(Instant.class, d -> DEFAULT_DATE_PRINTER.print((Instant) d));
return transformers;
}
}

View File

@ -0,0 +1,52 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.xcontent;
import java.util.Optional;
/**
* Thrown when one of the XContent parsers cannot parse something.
*/
public class XContentParseException extends IllegalArgumentException {
private final Optional<XContentLocation> location;
public XContentParseException(String message) {
this(null, message);
}
public XContentParseException(XContentLocation location, String message) {
super(message);
this.location = Optional.ofNullable(location);
}
public int getLineNumber() {
return location.map(l -> l.lineNumber).orElse(-1);
}
public int getColumnNumber() {
return location.map(l -> l.columnNumber).orElse(-1);
}
@Override
public String getMessage() {
return location.map(l -> "[" + l.toString() + "] ").orElse("") + super.getMessage();
}
}

View File

@ -235,7 +235,7 @@ public interface XContentParser extends Closeable {
* as well as via their <code>String</code> variants of the separated value methods.
* Note: Do not use this method to read values written with:
* <ul>
* <li>{@link XContentBuilder#utf8Field(String, byte[], int, int)}</li>
* <li>{@link XContentBuilder#utf8Value(byte[], int, int)}</li>
* </ul>
*
* these methods write UTF-8 encoded strings and must be read through:

View File

@ -63,7 +63,7 @@ public class HttpInfo implements Writeable, ToXContentFragment {
builder.startObject(Fields.HTTP);
builder.array(Fields.BOUND_ADDRESS, (Object[]) address.boundAddresses());
builder.field(Fields.PUBLISH_ADDRESS, address.publishAddress().toString());
builder.byteSizeField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength);
builder.humanReadableField(Fields.MAX_CONTENT_LENGTH_IN_BYTES, Fields.MAX_CONTENT_LENGTH, maxContentLength());
builder.endObject();
return builder;
}

View File

@ -24,7 +24,6 @@ import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.Sort;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.util.Accountable;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -40,6 +39,7 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;

View File

@ -128,7 +128,7 @@ public class QueryCacheStats implements Streamable, ToXContentFragment {
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject(Fields.QUERY_CACHE);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed);
builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, getMemorySize());
builder.field(Fields.TOTAL_COUNT, getTotalCount());
builder.field(Fields.HIT_COUNT, getHitCount());
builder.field(Fields.MISS_COUNT, getMissCount());

View File

@ -92,7 +92,7 @@ public class RequestCacheStats implements Streamable, ToXContentFragment {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.REQUEST_CACHE_STATS);
builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize);
builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, getMemorySize());
builder.field(Fields.EVICTIONS, getEvictions());
builder.field(Fields.HIT_COUNT, getHitCount());
builder.field(Fields.MISS_COUNT, getMissCount());

View File

@ -1,144 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.store.Directory;
import org.elasticsearch.Assertions;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
import java.io.IOException;
import java.nio.file.Path;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* This class contains utility methods for mutating the shard lucene index and translog as a preparation to be opened.
*/
public abstract class EngineDiskUtils {
/**
* creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted.
*/
public static void createEmpty(final Directory dir, final Path translogPath, final ShardId shardId) throws IOException {
try (IndexWriter writer = newIndexWriter(true, dir)) {
final String translogUuid = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId);
final Map<String, String> map = new HashMap<>();
map.put(Translog.TRANSLOG_GENERATION_KEY, "1");
map.put(Translog.TRANSLOG_UUID_KEY, translogUuid);
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1");
updateCommitData(writer, map);
}
}
/**
* Converts an existing lucene index and marks it with a new history uuid. Also creates a new empty translog file.
* This is used to make sure no existing shard will recovery from this index using ops based recovery.
*/
public static void bootstrapNewHistoryFromLuceneIndex(final Directory dir, final Path translogPath, final ShardId shardId)
throws IOException {
try (IndexWriter writer = newIndexWriter(false, dir)) {
final Map<String, String> userData = getUserData(writer);
final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO));
final String translogUuid = Translog.createEmptyTranslog(translogPath, maxSeqNo, shardId);
final Map<String, String> map = new HashMap<>();
map.put(Translog.TRANSLOG_GENERATION_KEY, "1");
map.put(Translog.TRANSLOG_UUID_KEY, translogUuid);
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo));
updateCommitData(writer, map);
}
}
/**
* Creates a new empty translog and associates it with an existing lucene index.
*/
public static void createNewTranslog(final Directory dir, final Path translogPath, long initialGlobalCheckpoint, final ShardId shardId)
throws IOException {
if (Assertions.ENABLED) {
final List<IndexCommit> existingCommits = DirectoryReader.listCommits(dir);
assert existingCommits.size() == 1 : "creating a translog translog should have one commit, commits[" + existingCommits + "]";
SequenceNumbers.CommitInfo commitInfo = Store.loadSeqNoInfo(existingCommits.get(0));
assert commitInfo.localCheckpoint >= initialGlobalCheckpoint :
"trying to create a shard whose local checkpoint [" + commitInfo.localCheckpoint + "] is < global checkpoint ["
+ initialGlobalCheckpoint + "]";
}
try (IndexWriter writer = newIndexWriter(false, dir)) {
final String translogUuid = Translog.createEmptyTranslog(translogPath, initialGlobalCheckpoint, shardId);
final Map<String, String> map = new HashMap<>();
map.put(Translog.TRANSLOG_GENERATION_KEY, "1");
map.put(Translog.TRANSLOG_UUID_KEY, translogUuid);
updateCommitData(writer, map);
}
}
/**
* Checks that the Lucene index contains a history uuid marker. If not, a new one is generated and committed.
*/
public static void ensureIndexHasHistoryUUID(final Directory dir) throws IOException {
try (IndexWriter writer = newIndexWriter(false, dir)) {
final Map<String, String> userData = getUserData(writer);
if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) {
updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()));
}
}
}
private static void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException {
final Map<String, String> userData = getUserData(writer);
userData.putAll(keysToUpdate);
writer.setLiveCommitData(userData.entrySet());
writer.commit();
}
private static Map<String, String> getUserData(IndexWriter writer) {
final Map<String, String> userData = new HashMap<>();
writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue()));
return userData;
}
private static IndexWriter newIndexWriter(final boolean create, final Directory dir) throws IOException {
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setCommitOnClose(false)
// we don't want merges to happen here - we call maybe merge on the engine
// later once we stared it up otherwise we would need to wait for it here
// we also don't specify a codec here and merges should use the engines for this index
.setMergePolicy(NoMergePolicy.INSTANCE)
.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);
return new IndexWriter(dir, iwc);
}
}

View File

@ -81,6 +81,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -136,6 +137,7 @@ public class InternalEngine extends Engine {
private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false);
public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp";
private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1);
private final AtomicLong maxSeqNoOfNonAppendOnlyOperations = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
private final CounterMetric numVersionLookups = new CounterMetric();
private final CounterMetric numIndexVersionsLookups = new CounterMetric();
/**
@ -186,7 +188,7 @@ public class InternalEngine extends Engine {
this.combinedDeletionPolicy = new CombinedDeletionPolicy(logger, translogDeletionPolicy,
translog::getLastSyncedGlobalCheckpoint, startingCommit);
writer = createWriter(startingCommit);
updateMaxUnsafeAutoIdTimestampFromWriter(writer);
bootstrapAppendOnlyInfoFromWriter(writer);
historyUUID = loadOrGenerateHistoryUUID(writer);
Objects.requireNonNull(historyUUID, "history uuid should not be null");
indexWriter = writer;
@ -345,15 +347,20 @@ public class InternalEngine extends Engine {
}
}
private void updateMaxUnsafeAutoIdTimestampFromWriter(IndexWriter writer) {
long commitMaxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
private void bootstrapAppendOnlyInfoFromWriter(IndexWriter writer) {
for (Map.Entry<String, String> entry : writer.getLiveCommitData()) {
if (entry.getKey().equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) {
commitMaxUnsafeAutoIdTimestamp = Long.parseLong(entry.getValue());
break;
final String key = entry.getKey();
if (key.equals(MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID)) {
assert maxUnsafeAutoIdTimestamp.get() == -1 :
"max unsafe timestamp was assigned already [" + maxUnsafeAutoIdTimestamp.get() + "]";
maxUnsafeAutoIdTimestamp.set(Long.parseLong(entry.getValue()));
}
if (key.equals(SequenceNumbers.MAX_SEQ_NO)) {
assert maxSeqNoOfNonAppendOnlyOperations.get() == -1 :
"max unsafe append-only seq# was assigned already [" + maxSeqNoOfNonAppendOnlyOperations.get() + "]";
maxSeqNoOfNonAppendOnlyOperations.set(Long.parseLong(entry.getValue()));
}
}
maxUnsafeAutoIdTimestamp.set(Math.max(maxUnsafeAutoIdTimestamp.get(), commitMaxUnsafeAutoIdTimestamp));
}
@Override
@ -802,11 +809,24 @@ public class InternalEngine extends Engine {
protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException {
assertNonPrimaryOrigin(index);
final IndexingStrategy plan;
if (canOptimizeAddDocument(index) && mayHaveBeenIndexedBefore(index) == false) {
// no need to deal with out of order delivery - we never saw this one
final boolean appendOnlyRequest = canOptimizeAddDocument(index);
if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) {
/*
* As soon as an append-only request was indexed into the primary, it can be exposed to a search then users can issue
* a follow-up operation on it. In rare cases, the follow up operation can be arrived and processed on a replica before
* the original append-only. In this case we can't simply proceed with the append only without consulting the version map.
* If a replica has seen a non-append-only operation with a higher seqno than the seqno of an append-only, it may have seen
* the document of that append-only request. However if the seqno of an append-only is higher than seqno of any non-append-only
* requests, we can assert the replica have not seen the document of that append-only request, thus we can apply optimization.
*/
assert index.version() == 1L : "can optimize on replicas but incoming version is [" + index.version() + "]";
plan = IndexingStrategy.optimizedAppendOnly(index.seqNo());
} else {
if (appendOnlyRequest == false) {
maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(index.seqNo(), curr));
assert maxSeqNoOfNonAppendOnlyOperations.get() >= index.seqNo() : "max_seqno of non-append-only was not updated;" +
"max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of index [" + index.seqNo() + "]";
}
versionMap.enforceSafeAccess();
// drop out of order operations
assert index.versionType().versionTypeForReplicationAndRecovery() == index.versionType() :
@ -950,6 +970,11 @@ public class InternalEngine extends Engine {
return mayHaveBeenIndexBefore;
}
// for testing
long getMaxSeqNoOfNonAppendOnlyOperations() {
return maxSeqNoOfNonAppendOnlyOperations.get();
}
private static void index(final List<ParseContext.Document> docs, final IndexWriter indexWriter) throws IOException {
if (docs.size() > 1) {
indexWriter.addDocuments(docs);
@ -1109,6 +1134,9 @@ public class InternalEngine extends Engine {
assert delete.versionType().versionTypeForReplicationAndRecovery() == delete.versionType() :
"resolving out of order delivery based on versioning but version type isn't fit for it. got ["
+ delete.versionType() + "]";
maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr));
assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" +
"max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]";
// unlike the primary, replicas don't really care to about found status of documents
// this allows to ignore the case where a document was found in the live version maps in
// a delete state and return true for the found flag in favor of code simplicity
@ -1558,15 +1586,41 @@ public class InternalEngine extends Engine {
}
private void pruneDeletedTombstones() {
/*
* We need to deploy two different trimming strategies for GC deletes on primary and replicas. Delete operations on primary
* are remembered for at least one GC delete cycle and trimmed periodically. This is, at the moment, the best we can do on
* primary for user facing APIs but this arbitrary time limit is problematic for replicas. On replicas however we should
* trim only deletes whose seqno at most the local checkpoint. This requirement is explained as follows.
*
* Suppose o1 and o2 are two operations on the same document with seq#(o1) < seq#(o2), and o2 arrives before o1 on the replica.
* o2 is processed normally since it arrives first; when o1 arrives it should be discarded:
* - If seq#(o1) <= LCP, then it will be not be added to Lucene, as it was already previously added.
* - If seq#(o1) > LCP, then it depends on the nature of o2:
* *) If o2 is a delete then its seq# is recorded in the VersionMap, since seq#(o2) > seq#(o1) > LCP,
* so a lookup can find it and determine that o1 is stale.
* *) If o2 is an indexing then its seq# is either in Lucene (if refreshed) or the VersionMap (if not refreshed yet),
* so a real-time lookup can find it and determine that o1 is stale.
*
* Here we prefer to deploy a single trimming strategy, which satisfies two constraints, on both primary and replicas because:
* - It's simpler - no need to distinguish if an engine is running at primary mode or replica mode or being promoted.
* - If a replica subsequently is promoted, user experience is maintained as that replica remembers deletes for the last GC cycle.
*
* However, the version map may consume less memory if we deploy two different trimming strategies for primary and replicas.
*/
final long timeMSec = engineConfig.getThreadPool().relativeTimeInMillis();
versionMap.pruneTombstones(timeMSec, engineConfig.getIndexSettings().getGcDeletesInMillis());
final long maxTimestampToPrune = timeMSec - engineConfig.getIndexSettings().getGcDeletesInMillis();
versionMap.pruneTombstones(maxTimestampToPrune, localCheckpointTracker.getCheckpoint());
lastDeleteVersionPruneTimeMSec = timeMSec;
}
// testing
void clearDeletedTombstones() {
// clean with current time Long.MAX_VALUE and interval 0 since we use a greater than relationship here.
versionMap.pruneTombstones(Long.MAX_VALUE, 0);
versionMap.pruneTombstones(Long.MAX_VALUE, localCheckpointTracker.getMaxSeqNo());
}
// for testing
final Collection<DeleteVersionValue> getDeletedTombstones() {
return versionMap.getAllTombstones().values();
}
@Override

View File

@ -375,21 +375,25 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
}
}
private boolean canRemoveTombstone(long currentTime, long pruneInterval, DeleteVersionValue versionValue) {
// check if the value is old enough to be removed
final boolean isTooOld = currentTime - versionValue.time > pruneInterval;
private boolean canRemoveTombstone(long maxTimestampToPrune, long maxSeqNoToPrune, DeleteVersionValue versionValue) {
// check if the value is old enough and safe to be removed
final boolean isTooOld = versionValue.time < maxTimestampToPrune;
final boolean isSafeToPrune = versionValue.seqNo <= maxSeqNoToPrune;
// version value can't be removed it's
// not yet flushed to lucene ie. it's part of this current maps object
final boolean isNotTrackedByCurrentMaps = versionValue.time < maps.getMinDeleteTimestamp();
return isTooOld && isNotTrackedByCurrentMaps;
return isTooOld && isSafeToPrune && isNotTrackedByCurrentMaps;
}
void pruneTombstones(long currentTime, long pruneInterval) {
/**
* Try to prune tombstones whose timestamp is less than maxTimestampToPrune and seqno at most the maxSeqNoToPrune.
*/
void pruneTombstones(long maxTimestampToPrune, long maxSeqNoToPrune) {
for (Map.Entry<BytesRef, DeleteVersionValue> entry : tombstones.entrySet()) {
// we do check before we actually lock the key - this way we don't need to acquire the lock for tombstones that are not
// prune-able. If the tombstone changes concurrently we will re-read and step out below since if we can't collect it now w
// we won't collect the tombstone below since it must be newer than this one.
if (canRemoveTombstone(currentTime, pruneInterval, entry.getValue())) {
if (canRemoveTombstone(maxTimestampToPrune, maxSeqNoToPrune, entry.getValue())) {
final BytesRef uid = entry.getKey();
try (Releasable lock = keyedLock.tryAcquire(uid)) {
// we use tryAcquire here since this is a best effort and we try to be least disruptive
@ -399,7 +403,7 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta
// Must re-get it here, vs using entry.getValue(), in case the uid was indexed/deleted since we pulled the iterator:
final DeleteVersionValue versionValue = tombstones.get(uid);
if (versionValue != null) {
if (canRemoveTombstone(currentTime, pruneInterval, versionValue)) {
if (canRemoveTombstone(maxTimestampToPrune, maxSeqNoToPrune, versionValue)) {
removeTombstoneUnderLock(uid);
}
}

View File

@ -291,22 +291,22 @@ public class SegmentsStats implements Streamable, ToXContentFragment {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.SEGMENTS);
builder.field(Fields.COUNT, count);
builder.byteSizeField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, memoryInBytes);
builder.byteSizeField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, termsMemoryInBytes);
builder.byteSizeField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, storedFieldsMemoryInBytes);
builder.byteSizeField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, termVectorsMemoryInBytes);
builder.byteSizeField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, normsMemoryInBytes);
builder.byteSizeField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, pointsMemoryInBytes);
builder.byteSizeField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, docValuesMemoryInBytes);
builder.byteSizeField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, indexWriterMemoryInBytes);
builder.byteSizeField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, versionMapMemoryInBytes);
builder.byteSizeField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, bitsetMemoryInBytes);
builder.humanReadableField(Fields.MEMORY_IN_BYTES, Fields.MEMORY, getMemory());
builder.humanReadableField(Fields.TERMS_MEMORY_IN_BYTES, Fields.TERMS_MEMORY, getTermsMemory());
builder.humanReadableField(Fields.STORED_FIELDS_MEMORY_IN_BYTES, Fields.STORED_FIELDS_MEMORY, getStoredFieldsMemory());
builder.humanReadableField(Fields.TERM_VECTORS_MEMORY_IN_BYTES, Fields.TERM_VECTORS_MEMORY, getTermVectorsMemory());
builder.humanReadableField(Fields.NORMS_MEMORY_IN_BYTES, Fields.NORMS_MEMORY, getNormsMemory());
builder.humanReadableField(Fields.POINTS_MEMORY_IN_BYTES, Fields.POINTS_MEMORY, getPointsMemory());
builder.humanReadableField(Fields.DOC_VALUES_MEMORY_IN_BYTES, Fields.DOC_VALUES_MEMORY, getDocValuesMemory());
builder.humanReadableField(Fields.INDEX_WRITER_MEMORY_IN_BYTES, Fields.INDEX_WRITER_MEMORY, getIndexWriterMemory());
builder.humanReadableField(Fields.VERSION_MAP_MEMORY_IN_BYTES, Fields.VERSION_MAP_MEMORY, getVersionMapMemory());
builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory());
builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp);
builder.startObject(Fields.FILE_SIZES);
for (Iterator<ObjectObjectCursor<String, Long>> it = fileSizes.iterator(); it.hasNext();) {
ObjectObjectCursor<String, Long> entry = it.next();
builder.startObject(entry.key);
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, entry.value);
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value));
builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others"));
builder.endObject();
}

View File

@ -99,7 +99,7 @@ public class FieldDataStats implements Streamable, ToXContentFragment {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(FIELDDATA);
builder.byteSizeField(MEMORY_SIZE_IN_BYTES, MEMORY_SIZE, memorySize);
builder.humanReadableField(MEMORY_SIZE_IN_BYTES, MEMORY_SIZE, getMemorySize());
builder.field(EVICTIONS, getEvictions());
if (fields != null) {
fields.toXContent(builder, FIELDS, MEMORY_SIZE_IN_BYTES, MEMORY_SIZE);

View File

View File

@ -187,14 +187,17 @@ public class MergeStats implements Streamable, ToXContentFragment {
builder.startObject(Fields.MERGES);
builder.field(Fields.CURRENT, current);
builder.field(Fields.CURRENT_DOCS, currentNumDocs);
builder.byteSizeField(Fields.CURRENT_SIZE_IN_BYTES, Fields.CURRENT_SIZE, currentSizeInBytes);
builder.humanReadableField(Fields.CURRENT_SIZE_IN_BYTES, Fields.CURRENT_SIZE, getCurrentSize());
builder.field(Fields.TOTAL, total);
builder.humanReadableField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, getTotalTime());
builder.field(Fields.TOTAL_DOCS, totalNumDocs);
builder.byteSizeField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, totalSizeInBytes);
builder.humanReadableField(Fields.TOTAL_SIZE_IN_BYTES, Fields.TOTAL_SIZE, getTotalSize());
builder.humanReadableField(Fields.TOTAL_STOPPED_TIME_IN_MILLIS, Fields.TOTAL_STOPPED_TIME, getTotalStoppedTime());
builder.humanReadableField(Fields.TOTAL_THROTTLED_TIME_IN_MILLIS, Fields.TOTAL_THROTTLED_TIME, getTotalThrottledTime());
builder.byteSizeField(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, Fields.TOTAL_THROTTLE_BYTES_PER_SEC, totalBytesPerSecAutoThrottle);
if (builder.humanReadable() && totalBytesPerSecAutoThrottle != -1) {
builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC).value(new ByteSizeValue(totalBytesPerSecAutoThrottle).toString());
}
builder.field(Fields.TOTAL_THROTTLE_BYTES_PER_SEC_IN_BYTES, totalBytesPerSecAutoThrottle);
builder.endObject();
return builder;
}

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.xcontent.AbstractObjectParser;
import org.elasticsearch.common.xcontent.UnknownNamedObjectException;
import org.elasticsearch.common.xcontent.NamedObjectNotFoundException;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser;
@ -316,11 +316,11 @@ public abstract class AbstractQueryBuilder<QB extends AbstractQueryBuilder<QB>>
QueryBuilder result;
try {
result = parser.namedObject(QueryBuilder.class, queryName, null);
} catch (UnknownNamedObjectException e) {
} catch (NamedObjectNotFoundException e) {
// Preserve the error message from 5.0 until we have a compellingly better message so we don't break BWC.
// This intentionally doesn't include the causing exception because that'd change the "root_cause" of any unknown query errors
throw new ParsingException(new XContentLocation(e.getLineNumber(), e.getColumnNumber()),
"no [query] registered for [" + e.getName() + "]");
"no [query] registered for [" + queryName + "]");
}
//end_object of the specific query (e.g. match, multi_match etc.) element
if (parser.currentToken() != XContentParser.Token.END_OBJECT) {

View File

@ -132,6 +132,7 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
public NestedQueryBuilder innerHit(InnerHitBuilder innerHitBuilder) {
this.innerHitBuilder = innerHitBuilder;
innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped);
return this;
}
@ -149,6 +150,9 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
*/
public NestedQueryBuilder ignoreUnmapped(boolean ignoreUnmapped) {
this.ignoreUnmapped = ignoreUnmapped;
if (innerHitBuilder != null) {
innerHitBuilder.setIgnoreUnmapped(ignoreUnmapped);
}
return this;
}

View File

@ -23,19 +23,20 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class SearchStats implements Streamable, ToXContentFragment {
public class SearchStats implements Writeable, ToXContentFragment {
public static class Stats implements Streamable, ToXContentFragment {
public static class Stats implements Writeable, ToXContentFragment {
private long queryCount;
private long queryTimeInMillis;
@ -53,8 +54,8 @@ public class SearchStats implements Streamable, ToXContentFragment {
private long suggestTimeInMillis;
private long suggestCurrent;
Stats() {
private Stats() {
// for internal use, initializes all counts to 0
}
public Stats(
@ -78,16 +79,24 @@ public class SearchStats implements Streamable, ToXContentFragment {
this.suggestCount = suggestCount;
this.suggestTimeInMillis = suggestTimeInMillis;
this.suggestCurrent = suggestCurrent;
}
public Stats(Stats stats) {
this(
stats.queryCount, stats.queryTimeInMillis, stats.queryCurrent,
stats.fetchCount, stats.fetchTimeInMillis, stats.fetchCurrent,
stats.scrollCount, stats.scrollTimeInMillis, stats.scrollCurrent,
stats.suggestCount, stats.suggestTimeInMillis, stats.suggestCurrent
);
private Stats(StreamInput in) throws IOException {
queryCount = in.readVLong();
queryTimeInMillis = in.readVLong();
queryCurrent = in.readVLong();
fetchCount = in.readVLong();
fetchTimeInMillis = in.readVLong();
fetchCurrent = in.readVLong();
scrollCount = in.readVLong();
scrollTimeInMillis = in.readVLong();
scrollCurrent = in.readVLong();
suggestCount = in.readVLong();
suggestTimeInMillis = in.readVLong();
suggestCurrent = in.readVLong();
}
public void add(Stats stats) {
@ -173,28 +182,7 @@ public class SearchStats implements Streamable, ToXContentFragment {
}
public static Stats readStats(StreamInput in) throws IOException {
Stats stats = new Stats();
stats.readFrom(in);
return stats;
}
@Override
public void readFrom(StreamInput in) throws IOException {
queryCount = in.readVLong();
queryTimeInMillis = in.readVLong();
queryCurrent = in.readVLong();
fetchCount = in.readVLong();
fetchTimeInMillis = in.readVLong();
fetchCurrent = in.readVLong();
scrollCount = in.readVLong();
scrollTimeInMillis = in.readVLong();
scrollCurrent = in.readVLong();
suggestCount = in.readVLong();
suggestTimeInMillis = in.readVLong();
suggestCurrent = in.readVLong();
return new Stats(in);
}
@Override
@ -238,11 +226,11 @@ public class SearchStats implements Streamable, ToXContentFragment {
}
}
Stats totalStats;
long openContexts;
private final Stats totalStats;
private long openContexts;
@Nullable
Map<String, Stats> groupStats;
private Map<String, Stats> groupStats;
public SearchStats() {
totalStats = new Stats();
@ -254,27 +242,27 @@ public class SearchStats implements Streamable, ToXContentFragment {
this.groupStats = groupStats;
}
public void add(SearchStats searchStats) {
add(searchStats, true);
public SearchStats(StreamInput in) throws IOException {
totalStats = Stats.readStats(in);
openContexts = in.readVLong();
if (in.readBoolean()) {
groupStats = in.readMap(StreamInput::readString, Stats::readStats);
}
}
public void add(SearchStats searchStats, boolean includeTypes) {
public void add(SearchStats searchStats) {
if (searchStats == null) {
return;
}
addTotals(searchStats);
openContexts += searchStats.openContexts;
if (includeTypes && searchStats.groupStats != null && !searchStats.groupStats.isEmpty()) {
if (searchStats.groupStats != null && !searchStats.groupStats.isEmpty()) {
if (groupStats == null) {
groupStats = new HashMap<>(searchStats.groupStats.size());
}
for (Map.Entry<String, Stats> entry : searchStats.groupStats.entrySet()) {
Stats stats = groupStats.get(entry.getKey());
if (stats == null) {
groupStats.put(entry.getKey(), new Stats(entry.getValue()));
} else {
stats.add(entry.getValue());
}
groupStats.putIfAbsent(entry.getKey(), new Stats());
groupStats.get(entry.getKey()).add(entry.getValue());
}
}
}
@ -296,7 +284,7 @@ public class SearchStats implements Streamable, ToXContentFragment {
@Nullable
public Map<String, Stats> getGroupStats() {
return this.groupStats;
return this.groupStats != null ? Collections.unmodifiableMap(this.groupStats) : null;
}
@Override
@ -344,15 +332,6 @@ public class SearchStats implements Streamable, ToXContentFragment {
static final String SUGGEST_CURRENT = "suggest_current";
}
@Override
public void readFrom(StreamInput in) throws IOException {
totalStats = Stats.readStats(in);
openContexts = in.readVLong();
if (in.readBoolean()) {
groupStats = in.readMap(StreamInput::readString, Stats::readStats);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
totalStats.writeTo(out);

View File

@ -40,13 +40,13 @@ import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.engine.EngineDiskUtils;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.snapshots.IndexShardRestoreFailedException;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
@ -390,7 +390,11 @@ final class StoreRecovery {
recoveryState.getIndex().updateVersion(version);
if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
assert indexShouldExists;
EngineDiskUtils.bootstrapNewHistoryFromLuceneIndex(store.directory(), indexShard.shardPath().resolveTranslog(), shardId);
store.bootstrapNewHistory();
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId);
store.associateIndexWithNewTranslog(translogUUID);
} else if (indexShouldExists) {
// since we recover from local, just fill the files and size
try {
@ -402,7 +406,10 @@ final class StoreRecovery {
logger.debug("failed to list file details", e);
}
} else {
EngineDiskUtils.createEmpty(store.directory(), indexShard.shardPath().resolveTranslog(), shardId);
store.createEmpty();
final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(),
SequenceNumbers.NO_OPS_PERFORMED, shardId);
store.associateIndexWithNewTranslog(translogUUID);
}
indexShard.openEngineAndRecoverFromTranslog();
indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm());
@ -445,8 +452,12 @@ final class StoreRecovery {
}
final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName);
repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
EngineDiskUtils.bootstrapNewHistoryFromLuceneIndex(indexShard.store().directory(), indexShard.shardPath().resolveTranslog(),
shardId);
final Store store = indexShard.store();
store.bootstrapNewHistory();
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId);
store.associateIndexWithNewTranslog(translogUUID);
assert indexShard.shardRouting.primary() : "only primary shards can recover from store";
indexShard.openEngineAndRecoverFromTranslog();
indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm());

View File

@ -30,6 +30,8 @@ import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.index.IndexNotFoundException;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.NoMergePolicy;
import org.apache.lucene.index.SegmentCommitInfo;
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.store.AlreadyClosedException;
@ -46,7 +48,6 @@ import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.core.internal.io.IOUtils;
import org.apache.lucene.util.Version;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
@ -69,11 +70,13 @@ import org.elasticsearch.common.util.SingleObjectCache;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.RefCounted;
import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.env.ShardLockObtainFailedException;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.IndexShard;
@ -155,7 +158,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
this(shardId, indexSettings, directoryService, shardLock, OnClose.EMPTY);
}
public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock, OnClose onClose) throws IOException {
public Store(ShardId shardId, IndexSettings indexSettings, DirectoryService directoryService, ShardLock shardLock,
OnClose onClose) throws IOException {
super(shardId, indexSettings);
final Settings settings = indexSettings.getSettings();
this.directory = new StoreDirectory(directoryService.newDirectory(), Loggers.getLogger("index.store.deletes", settings, shardId));
@ -1454,4 +1458,100 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
}
}
/**
* creates an empty lucene index and a corresponding empty translog. Any existing data will be deleted.
*/
public void createEmpty() throws IOException {
metadataLock.writeLock().lock();
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.CREATE, directory)) {
final Map<String, String> map = new HashMap<>();
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
map.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(SequenceNumbers.NO_OPS_PERFORMED));
map.put(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID, "-1");
updateCommitData(writer, map);
} finally {
metadataLock.writeLock().unlock();
}
}
/**
* Marks an existing lucene index with a new history uuid.
* This is used to make sure no existing shard will recovery from this index using ops based recovery.
*/
public void bootstrapNewHistory() throws IOException {
metadataLock.writeLock().lock();
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) {
final Map<String, String> userData = getUserData(writer);
final long maxSeqNo = Long.parseLong(userData.get(SequenceNumbers.MAX_SEQ_NO));
final Map<String, String> map = new HashMap<>();
map.put(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID());
map.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, Long.toString(maxSeqNo));
updateCommitData(writer, map);
} finally {
metadataLock.writeLock().unlock();
}
}
/**
* Force bakes the given translog generation as recovery information in the lucene index. This is
* used when recovering from a snapshot or peer file based recovery where a new empty translog is
* created and the existing lucene index needs should be changed to use it.
*/
public void associateIndexWithNewTranslog(final String translogUUID) throws IOException {
metadataLock.writeLock().lock();
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) {
if (translogUUID.equals(getUserData(writer).get(Translog.TRANSLOG_UUID_KEY))) {
throw new IllegalArgumentException("a new translog uuid can't be equal to existing one. got [" + translogUUID + "]");
}
final Map<String, String> map = new HashMap<>();
map.put(Translog.TRANSLOG_GENERATION_KEY, "1");
map.put(Translog.TRANSLOG_UUID_KEY, translogUUID);
updateCommitData(writer, map);
} finally {
metadataLock.writeLock().unlock();
}
}
/**
* Checks that the Lucene index contains a history uuid marker. If not, a new one is generated and committed.
*/
public void ensureIndexHasHistoryUUID() throws IOException {
metadataLock.writeLock().lock();
try (IndexWriter writer = newIndexWriter(IndexWriterConfig.OpenMode.APPEND, directory)) {
final Map<String, String> userData = getUserData(writer);
if (userData.containsKey(Engine.HISTORY_UUID_KEY) == false) {
updateCommitData(writer, Collections.singletonMap(Engine.HISTORY_UUID_KEY, UUIDs.randomBase64UUID()));
}
} finally {
metadataLock.writeLock().unlock();
}
}
private void updateCommitData(IndexWriter writer, Map<String, String> keysToUpdate) throws IOException {
final Map<String, String> userData = getUserData(writer);
userData.putAll(keysToUpdate);
writer.setLiveCommitData(userData.entrySet());
writer.commit();
}
private Map<String, String> getUserData(IndexWriter writer) {
final Map<String, String> userData = new HashMap<>();
writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue()));
return userData;
}
private IndexWriter newIndexWriter(IndexWriterConfig.OpenMode openMode, final Directory dir) throws IOException {
IndexWriterConfig iwc = new IndexWriterConfig(null)
.setCommitOnClose(false)
// we don't want merges to happen here - we call maybe merge on the engine
// later once we stared it up otherwise we would need to wait for it here
// we also don't specify a codec here and merges should use the engines for this index
.setMergePolicy(NoMergePolicy.INSTANCE)
.setOpenMode(openMode);
return new IndexWriter(dir, iwc);
}
}

View File

@ -85,7 +85,7 @@ public class StoreStats implements Streamable, ToXContentFragment {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.STORE);
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, sizeInBytes);
builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, size());
builder.endObject();
return builder;
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -100,9 +101,9 @@ public class TranslogStats implements Streamable, ToXContentFragment {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("translog");
builder.field("operations", numberOfOperations);
builder.byteSizeField("size_in_bytes", "size", translogSizeInBytes);
builder.humanReadableField("size_in_bytes", "size", new ByteSizeValue(translogSizeInBytes));
builder.field("uncommitted_operations", uncommittedOperations);
builder.byteSizeField("uncommitted_size_in_bytes", "uncommitted_size", uncommittedSizeInBytes);
builder.humanReadableField("uncommitted_size_in_bytes", "uncommitted_size", new ByteSizeValue(uncommittedSizeInBytes));
builder.field("earliest_last_modified_age", earliestLastModifiedAge);
builder.endObject();
return builder;

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.ToXContentObject;
@ -262,9 +263,9 @@ public class RecoveryState implements ToXContentFragment, Streamable {
builder.field(Fields.TYPE, recoverySource.getType());
builder.field(Fields.STAGE, stage.toString());
builder.field(Fields.PRIMARY, primary);
builder.dateField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime);
builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, timer.startTime);
if (timer.stopTime > 0) {
builder.dateField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime);
builder.timeField(Fields.STOP_TIME_IN_MILLIS, Fields.STOP_TIME, timer.stopTime);
}
builder.humanReadableField(Fields.TOTAL_TIME_IN_MILLIS, Fields.TOTAL_TIME, new TimeValue(timer.time()));
@ -634,9 +635,9 @@ public class RecoveryState implements ToXContentFragment, Streamable {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(Fields.NAME, name);
builder.byteSizeField(Fields.LENGTH_IN_BYTES, Fields.LENGTH, length);
builder.humanReadableField(Fields.LENGTH_IN_BYTES, Fields.LENGTH, new ByteSizeValue(length));
builder.field(Fields.REUSED, reused);
builder.byteSizeField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, recovered);
builder.humanReadableField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, new ByteSizeValue(recovered));
builder.endObject();
return builder;
}
@ -905,9 +906,9 @@ public class RecoveryState implements ToXContentFragment, Streamable {
public synchronized XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
// stream size first, as it matters more and the files section can be long
builder.startObject(Fields.SIZE);
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, totalBytes());
builder.byteSizeField(Fields.REUSED_IN_BYTES, Fields.REUSED, reusedBytes());
builder.byteSizeField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, recoveredBytes());
builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, new ByteSizeValue(totalBytes()));
builder.humanReadableField(Fields.REUSED_IN_BYTES, Fields.REUSED, new ByteSizeValue(reusedBytes()));
builder.humanReadableField(Fields.RECOVERED_IN_BYTES, Fields.RECOVERED, new ByteSizeValue(recoveredBytes()));
builder.field(Fields.PERCENT, String.format(Locale.ROOT, "%1.1f%%", recoveredBytesPercent()));
builder.endObject();

View File

@ -40,7 +40,6 @@ import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineDiskUtils;
import org.elasticsearch.index.mapper.MapperException;
import org.elasticsearch.index.seqno.ReplicationTracker;
import org.elasticsearch.index.seqno.SequenceNumbers;
@ -439,11 +438,12 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
try {
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
if (indexShard.indexSettings().getIndexVersionCreated().before(Version.V_6_0_0_rc1)) {
EngineDiskUtils.ensureIndexHasHistoryUUID(store.directory());
store.ensureIndexHasHistoryUUID();
}
// TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2
EngineDiskUtils.createNewTranslog(store.directory(), indexShard.shardPath().resolveTranslog(),
SequenceNumbers.UNASSIGNED_SEQ_NO, shardId);
final String translogUUID =
Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId);
store.associateIndexWithNewTranslog(translogUUID);
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
// this is a fatal exception at this stage.
// this means we transferred files from the remote that have not be checksummed and they are

View File

@ -165,13 +165,13 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragm
}
if (total != -1) {
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, total);
builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, getTotal());
}
if (free != -1) {
builder.byteSizeField(Fields.FREE_IN_BYTES, Fields.FREE, free);
builder.humanReadableField(Fields.FREE_IN_BYTES, Fields.FREE, getFree());
}
if (available != -1) {
builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, available);
builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, getAvailable());
}
builder.endObject();
@ -530,8 +530,9 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragm
builder.startObject(Fields.LEAST_ESTIMATE);
{
builder.field(Fields.PATH, leastDiskEstimate.getPath());
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, leastDiskEstimate.getTotalBytes());
builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, leastDiskEstimate.getFreeBytes());
builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, new ByteSizeValue(leastDiskEstimate.getTotalBytes()));
builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE,
new ByteSizeValue(leastDiskEstimate.getFreeBytes()));
builder.field(Fields.USAGE_PERCENTAGE, leastDiskEstimate.getUsedDiskAsPercentage());
}
builder.endObject();
@ -541,8 +542,8 @@ public class FsInfo implements Iterable<FsInfo.Path>, Writeable, ToXContentFragm
builder.startObject(Fields.MOST_ESTIMATE);
{
builder.field(Fields.PATH, mostDiskEstimate.getPath());
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, mostDiskEstimate.getTotalBytes());
builder.byteSizeField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, mostDiskEstimate.getFreeBytes());
builder.humanReadableField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, new ByteSizeValue(mostDiskEstimate.getTotalBytes()));
builder.humanReadableField(Fields.AVAILABLE_IN_BYTES, Fields.AVAILABLE, new ByteSizeValue(mostDiskEstimate.getFreeBytes()));
builder.field(Fields.USAGE_PERCENTAGE, mostDiskEstimate.getUsedDiskAsPercentage());
}
builder.endObject();

View File

@ -436,14 +436,14 @@ public class JvmInfo implements Writeable, ToXContentFragment {
builder.field(Fields.VM_NAME, vmName);
builder.field(Fields.VM_VERSION, vmVersion);
builder.field(Fields.VM_VENDOR, vmVendor);
builder.dateField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime);
builder.timeField(Fields.START_TIME_IN_MILLIS, Fields.START_TIME, startTime);
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_INIT_IN_BYTES, Fields.HEAP_INIT, mem.heapInit);
builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax);
builder.byteSizeField(Fields.NON_HEAP_INIT_IN_BYTES, Fields.NON_HEAP_INIT, mem.nonHeapInit);
builder.byteSizeField(Fields.NON_HEAP_MAX_IN_BYTES, Fields.NON_HEAP_MAX, mem.nonHeapMax);
builder.byteSizeField(Fields.DIRECT_MAX_IN_BYTES, Fields.DIRECT_MAX, mem.directMemoryMax);
builder.humanReadableField(Fields.HEAP_INIT_IN_BYTES, Fields.HEAP_INIT, new ByteSizeValue(mem.heapInit));
builder.humanReadableField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, new ByteSizeValue(mem.heapMax));
builder.humanReadableField(Fields.NON_HEAP_INIT_IN_BYTES, Fields.NON_HEAP_INIT, new ByteSizeValue(mem.nonHeapInit));
builder.humanReadableField(Fields.NON_HEAP_MAX_IN_BYTES, Fields.NON_HEAP_MAX, new ByteSizeValue(mem.nonHeapMax));
builder.humanReadableField(Fields.DIRECT_MAX_IN_BYTES, Fields.DIRECT_MAX, new ByteSizeValue(mem.directMemoryMax));
builder.endObject();
builder.array(Fields.GC_COLLECTORS, gcCollectors);

View File

@ -194,23 +194,23 @@ public class JvmStats implements Writeable, ToXContentFragment {
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, mem.heapUsed);
builder.humanReadableField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, new ByteSizeValue(mem.heapUsed));
if (mem.getHeapUsedPercent() >= 0) {
builder.field(Fields.HEAP_USED_PERCENT, mem.getHeapUsedPercent());
}
builder.byteSizeField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, mem.heapCommitted);
builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, mem.heapMax);
builder.byteSizeField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, mem.nonHeapUsed);
builder.byteSizeField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, mem.nonHeapCommitted);
builder.humanReadableField(Fields.HEAP_COMMITTED_IN_BYTES, Fields.HEAP_COMMITTED, new ByteSizeValue(mem.heapCommitted));
builder.humanReadableField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, new ByteSizeValue(mem.heapMax));
builder.humanReadableField(Fields.NON_HEAP_USED_IN_BYTES, Fields.NON_HEAP_USED, new ByteSizeValue(mem.nonHeapUsed));
builder.humanReadableField(Fields.NON_HEAP_COMMITTED_IN_BYTES, Fields.NON_HEAP_COMMITTED, new ByteSizeValue(mem.nonHeapCommitted));
builder.startObject(Fields.POOLS);
for (MemoryPool pool : mem) {
builder.startObject(pool.getName());
builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, pool.used);
builder.byteSizeField(Fields.MAX_IN_BYTES, Fields.MAX, pool.max);
builder.humanReadableField(Fields.USED_IN_BYTES, Fields.USED, new ByteSizeValue(pool.used));
builder.humanReadableField(Fields.MAX_IN_BYTES, Fields.MAX, new ByteSizeValue(pool.max));
builder.byteSizeField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, pool.peakUsed);
builder.byteSizeField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, pool.peakMax);
builder.humanReadableField(Fields.PEAK_USED_IN_BYTES, Fields.PEAK_USED, new ByteSizeValue(pool.peakUsed));
builder.humanReadableField(Fields.PEAK_MAX_IN_BYTES, Fields.PEAK_MAX, new ByteSizeValue(pool.peakMax));
builder.endObject();
}
@ -241,8 +241,9 @@ public class JvmStats implements Writeable, ToXContentFragment {
for (BufferPool bufferPool : bufferPools) {
builder.startObject(bufferPool.getName());
builder.field(Fields.COUNT, bufferPool.getCount());
builder.byteSizeField(Fields.USED_IN_BYTES, Fields.USED, bufferPool.used);
builder.byteSizeField(Fields.TOTAL_CAPACITY_IN_BYTES, Fields.TOTAL_CAPACITY, bufferPool.totalCapacity);
builder.humanReadableField(Fields.USED_IN_BYTES, Fields.USED, new ByteSizeValue(bufferPool.used));
builder.humanReadableField(Fields.TOTAL_CAPACITY_IN_BYTES, Fields.TOTAL_CAPACITY,
new ByteSizeValue(bufferPool.totalCapacity));
builder.endObject();
}
builder.endObject();

View File

@ -113,7 +113,7 @@ public class ProcessStats implements Writeable, ToXContentFragment {
}
if (mem != null) {
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.TOTAL_VIRTUAL_IN_BYTES, Fields.TOTAL_VIRTUAL, mem.totalVirtual);
builder.humanReadableField(Fields.TOTAL_VIRTUAL_IN_BYTES, Fields.TOTAL_VIRTUAL, new ByteSizeValue(mem.totalVirtual));
builder.endObject();
}
builder.endObject();

View File

@ -0,0 +1,131 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
/**
* A {@link SingleDimensionValuesSource} for binary source ({@link BytesRef}).
*/
class BinaryValuesSource extends SingleDimensionValuesSource<BytesRef> {
private final CheckedFunction<LeafReaderContext, SortedBinaryDocValues, IOException> docValuesFunc;
private final BytesRef[] values;
private BytesRef currentValue;
BinaryValuesSource(MappedFieldType fieldType, CheckedFunction<LeafReaderContext, SortedBinaryDocValues, IOException> docValuesFunc,
int size, int reverseMul) {
super(fieldType, size, reverseMul);
this.docValuesFunc = docValuesFunc;
this.values = new BytesRef[size];
}
@Override
public void copyCurrent(int slot) {
values[slot] = BytesRef.deepCopyOf(currentValue);
}
@Override
public int compare(int from, int to) {
return compareValues(values[from], values[to]);
}
@Override
int compareCurrent(int slot) {
return compareValues(currentValue, values[slot]);
}
@Override
int compareCurrentWithAfter() {
return compareValues(currentValue, afterValue);
}
int compareValues(BytesRef v1, BytesRef v2) {
return v1.compareTo(v2) * reverseMul;
}
@Override
void setAfter(Comparable<?> value) {
if (value.getClass() == BytesRef.class) {
afterValue = (BytesRef) value;
} else if (value.getClass() == String.class) {
afterValue = new BytesRef((String) value);
} else {
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
}
}
@Override
BytesRef toComparable(int slot) {
return values[slot];
}
@Override
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException {
final SortedBinaryDocValues dvs = docValuesFunc.apply(context);
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
currentValue = dvs.nextValue();
next.collect(doc, bucket);
}
}
}
};
}
@Override
LeafBucketCollector getLeafCollector(Comparable<?> value, LeafReaderContext context, LeafBucketCollector next) {
if (value.getClass() != BytesRef.class) {
throw new IllegalArgumentException("Expected BytesRef, got " + value.getClass());
}
currentValue = (BytesRef) value;
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
next.collect(doc, bucket);
}
};
}
@Override
SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) {
if (checkIfSortedDocsIsApplicable(reader, fieldType) == false ||
(query != null && query.getClass() != MatchAllDocsQuery.class)) {
return null;
}
return new TermsSortedDocsProducer(fieldType.name());
}
@Override
public void close() {}
}

View File

@ -19,16 +19,12 @@
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.IndexSortConfig;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.aggregations.AbstractAggregationBuilder;
import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.AggregatorFactories;
@ -154,16 +150,9 @@ public class CompositeAggregationBuilder extends AbstractAggregationBuilder<Comp
if (parent != null) {
throw new IllegalArgumentException("[composite] aggregation cannot be used with a parent aggregation");
}
final QueryShardContext shardContext = context.getQueryShardContext();
CompositeValuesSourceConfig[] configs = new CompositeValuesSourceConfig[sources.size()];
SortField[] sortFields = new SortField[configs.length];
IndexSortConfig indexSortConfig = shardContext.getIndexSettings().getIndexSortConfig();
if (indexSortConfig.hasIndexSort()) {
Sort sort = indexSortConfig.buildIndexSort(shardContext::fieldMapper, shardContext::getForField);
System.arraycopy(sort.getSort(), 0, sortFields, 0, sortFields.length);
}
for (int i = 0; i < configs.length; i++) {
configs[i] = sources.get(i).build(context, i, configs.length, sortFields[i]);
configs[i] = sources.get(i).build(context);
if (configs[i].valuesSource().needsScores()) {
throw new IllegalArgumentException("[sources] cannot access _score");
}

View File

@ -19,22 +19,29 @@
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.RoaringDocIdSet;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.aggregations.BucketCollector;
import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import org.elasticsearch.search.aggregations.bucket.BucketsAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
@ -43,97 +50,74 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.stream.Collectors;
final class CompositeAggregator extends BucketsAggregator {
private final int size;
private final CompositeValuesSourceConfig[] sources;
private final SortedDocsProducer sortedDocsProducer;
private final List<String> sourceNames;
private final int[] reverseMuls;
private final List<DocValueFormat> formats;
private final boolean canEarlyTerminate;
private final TreeMap<Integer, Integer> keys;
private final CompositeValuesComparator array;
private final CompositeValuesCollectorQueue queue;
private final List<LeafContext> contexts = new ArrayList<>();
private LeafContext leaf;
private RoaringDocIdSet.Builder builder;
private final List<Entry> entries;
private LeafReaderContext currentLeaf;
private RoaringDocIdSet.Builder docIdSetBuilder;
private BucketCollector deferredCollectors;
CompositeAggregator(String name, AggregatorFactories factories, SearchContext context, Aggregator parent,
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
int size, CompositeValuesSourceConfig[] sources, CompositeKey rawAfterKey) throws IOException {
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData,
int size, CompositeValuesSourceConfig[] sourceConfigs, CompositeKey rawAfterKey) throws IOException {
super(name, factories, context, parent, pipelineAggregators, metaData);
this.size = size;
this.sources = sources;
this.sourceNames = Arrays.stream(sources).map(CompositeValuesSourceConfig::name).collect(Collectors.toList());
this.formats = Arrays.stream(sources).map(CompositeValuesSourceConfig::format).collect(Collectors.toList());
// we use slot 0 to fill the current document (size+1).
this.array = new CompositeValuesComparator(context.searcher().getIndexReader(), sources, size+1);
this.sourceNames = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::name).collect(Collectors.toList());
this.reverseMuls = Arrays.stream(sourceConfigs).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray();
this.formats = Arrays.stream(sourceConfigs).map(CompositeValuesSourceConfig::format).collect(Collectors.toList());
final SingleDimensionValuesSource<?>[] sources =
createValuesSources(context.bigArrays(), context.searcher().getIndexReader(), context.query(), sourceConfigs, size);
this.queue = new CompositeValuesCollectorQueue(sources, size);
this.sortedDocsProducer = sources[0].createSortedDocsProducerOrNull(context.searcher().getIndexReader(), context.query());
if (rawAfterKey != null) {
array.setTop(rawAfterKey.values());
queue.setAfter(rawAfterKey.values());
}
this.keys = new TreeMap<>(array::compare);
this.canEarlyTerminate = Arrays.stream(sources)
.allMatch(CompositeValuesSourceConfig::canEarlyTerminate);
this.entries = new ArrayList<>();
}
boolean canEarlyTerminate() {
return canEarlyTerminate;
@Override
protected void doClose() {
Releasables.close(queue);
}
private int[] getReverseMuls() {
return Arrays.stream(sources).mapToInt(CompositeValuesSourceConfig::reverseMul).toArray();
@Override
protected void doPreCollection() throws IOException {
List<BucketCollector> collectors = Arrays.asList(subAggregators);
deferredCollectors = BucketCollector.wrap(collectors);
collectableSubAggregators = BucketCollector.NO_OP_COLLECTOR;
}
@Override
protected void doPostCollection() throws IOException {
finishLeaf();
}
@Override
public InternalAggregation buildAggregation(long zeroBucket) throws IOException {
assert zeroBucket == 0L;
consumeBucketsAndMaybeBreak(keys.size());
consumeBucketsAndMaybeBreak(queue.size());
// Replay all documents that contain at least one top bucket (collected during the first pass).
grow(keys.size()+1);
final boolean needsScores = needsScores();
Weight weight = null;
if (needsScores) {
Query query = context.query();
weight = context.searcher().createNormalizedWeight(query, true);
}
for (LeafContext context : contexts) {
DocIdSetIterator docIdSetIterator = context.docIdSet.iterator();
if (docIdSetIterator == null) {
continue;
}
final CompositeValuesSource.Collector collector =
array.getLeafCollector(context.ctx, getSecondPassCollector(context.subCollector));
int docID;
DocIdSetIterator scorerIt = null;
if (needsScores) {
Scorer scorer = weight.scorer(context.ctx);
// We don't need to check if the scorer is null
// since we are sure that there are documents to replay (docIdSetIterator it not empty).
scorerIt = scorer.iterator();
context.subCollector.setScorer(scorer);
}
while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (needsScores) {
assert scorerIt.docID() < docID;
scorerIt.advance(docID);
// aggregations should only be replayed on matching documents
assert scorerIt.docID() == docID;
}
collector.collect(docID);
}
if (deferredCollectors != NO_OP_COLLECTOR) {
// Replay all documents that contain at least one top bucket (collected during the first pass).
runDeferredCollections();
}
int num = Math.min(size, keys.size());
int num = Math.min(size, queue.size());
final InternalComposite.InternalBucket[] buckets = new InternalComposite.InternalBucket[num];
final int[] reverseMuls = getReverseMuls();
int pos = 0;
for (int slot : keys.keySet()) {
CompositeKey key = array.toCompositeKey(slot);
for (int slot : queue.getSortedSlot()) {
CompositeKey key = queue.toCompositeKey(slot);
InternalAggregations aggs = bucketAggregations(slot);
int docCount = bucketDocCount(slot);
int docCount = queue.getDocCount(slot);
buckets[pos++] = new InternalComposite.InternalBucket(sourceNames, formats, key, reverseMuls, docCount, aggs);
}
CompositeKey lastBucket = num > 0 ? buckets[num-1].getRawKey() : null;
@ -143,125 +127,179 @@ final class CompositeAggregator extends BucketsAggregator {
@Override
public InternalAggregation buildEmptyAggregation() {
final int[] reverseMuls = getReverseMuls();
return new InternalComposite(name, size, sourceNames, formats, Collections.emptyList(), null, reverseMuls,
pipelineAggregators(), metaData());
}
@Override
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
if (leaf != null) {
leaf.docIdSet = builder.build();
contexts.add(leaf);
private void finishLeaf() {
if (currentLeaf != null) {
DocIdSet docIdSet = docIdSetBuilder.build();
entries.add(new Entry(currentLeaf, docIdSet));
currentLeaf = null;
docIdSetBuilder = null;
}
leaf = new LeafContext(ctx, sub);
builder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc());
final CompositeValuesSource.Collector inner = array.getLeafCollector(ctx, getFirstPassCollector());
return new LeafBucketCollector() {
@Override
public void collect(int doc, long zeroBucket) throws IOException {
assert zeroBucket == 0L;
inner.collect(doc);
}
};
}
@Override
protected void doPostCollection() throws IOException {
if (leaf != null) {
leaf.docIdSet = builder.build();
contexts.add(leaf);
protected LeafBucketCollector getLeafCollector(LeafReaderContext ctx, LeafBucketCollector sub) throws IOException {
finishLeaf();
boolean fillDocIdSet = deferredCollectors != NO_OP_COLLECTOR;
if (sortedDocsProducer != null) {
/**
* The producer will visit documents sorted by the leading source of the composite definition
* and terminates when the leading source value is guaranteed to be greater than the lowest
* composite bucket in the queue.
*/
DocIdSet docIdSet = sortedDocsProducer.processLeaf(context.query(), queue, ctx, fillDocIdSet);
if (fillDocIdSet) {
entries.add(new Entry(ctx, docIdSet));
}
/**
* We can bypass search entirely for this segment, all the processing has been done in the previous call.
* Throwing this exception will terminate the execution of the search for this root aggregation,
* see {@link MultiCollector} for more details on how we handle early termination in aggregations.
*/
throw new CollectionTerminatedException();
} else {
if (fillDocIdSet) {
currentLeaf = ctx;
docIdSetBuilder = new RoaringDocIdSet.Builder(ctx.reader().maxDoc());
}
final LeafBucketCollector inner = queue.getLeafCollector(ctx, getFirstPassCollector(docIdSetBuilder));
return new LeafBucketCollector() {
@Override
public void collect(int doc, long zeroBucket) throws IOException {
assert zeroBucket == 0L;
inner.collect(doc);
}
};
}
}
/**
* The first pass selects the top N composite buckets from all matching documents.
* It also records all doc ids that contain a top N composite bucket in a {@link RoaringDocIdSet} in order to be
* able to replay the collection filtered on the best buckets only.
* The first pass selects the top composite buckets from all matching documents.
*/
private CompositeValuesSource.Collector getFirstPassCollector() {
return new CompositeValuesSource.Collector() {
private LeafBucketCollector getFirstPassCollector(RoaringDocIdSet.Builder builder) {
return new LeafBucketCollector() {
int lastDoc = -1;
@Override
public void collect(int doc) throws IOException {
// Checks if the candidate key in slot 0 is competitive.
if (keys.containsKey(0)) {
// This key is already in the top N, skip it for now.
if (doc != lastDoc) {
public void collect(int doc, long bucket) throws IOException {
int slot = queue.addIfCompetitive();
if (slot != -1) {
if (builder != null && lastDoc != doc) {
builder.add(doc);
lastDoc = doc;
}
return;
}
if (array.hasTop() && array.compareTop(0) <= 0) {
// This key is greater than the top value collected in the previous round.
if (canEarlyTerminate) {
// The index sort matches the composite sort, we can early terminate this segment.
throw new CollectionTerminatedException();
}
// just skip this key for now
return;
}
if (keys.size() >= size) {
// The tree map is full, check if the candidate key should be kept.
if (array.compare(0, keys.lastKey()) > 0) {
// The candidate key is not competitive
if (canEarlyTerminate) {
// The index sort matches the composite sort, we can early terminate this segment.
throw new CollectionTerminatedException();
}
// just skip this key
return;
}
}
// The candidate key is competitive
final int newSlot;
if (keys.size() >= size) {
// the tree map is full, we replace the last key with this candidate.
int slot = keys.pollLastEntry().getKey();
// and we recycle the deleted slot
newSlot = slot;
} else {
newSlot = keys.size() + 1;
}
// move the candidate key to its new slot.
array.move(0, newSlot);
keys.put(newSlot, newSlot);
if (doc != lastDoc) {
builder.add(doc);
lastDoc = doc;
}
}
};
}
/**
* The second pass delegates the collection to sub-aggregations but only if the collected composite bucket is a top bucket (selected
* in the first pass).
* Replay the documents that might contain a top bucket and pass top buckets to
* the {@link this#deferredCollectors}.
*/
private CompositeValuesSource.Collector getSecondPassCollector(LeafBucketCollector subCollector) throws IOException {
return doc -> {
Integer bucket = keys.get(0);
if (bucket != null) {
// The candidate key in slot 0 is a top bucket.
// We can defer the collection of this document/bucket to the sub collector
collectExistingBucket(subCollector, doc, bucket);
private void runDeferredCollections() throws IOException {
final boolean needsScores = needsScores();
Weight weight = null;
if (needsScores) {
Query query = context.query();
weight = context.searcher().createNormalizedWeight(query, true);
}
deferredCollectors.preCollection();
for (Entry entry : entries) {
DocIdSetIterator docIdSetIterator = entry.docIdSet.iterator();
if (docIdSetIterator == null) {
continue;
}
final LeafBucketCollector subCollector = deferredCollectors.getLeafCollector(entry.context);
final LeafBucketCollector collector = queue.getLeafCollector(entry.context, getSecondPassCollector(subCollector));
DocIdSetIterator scorerIt = null;
if (needsScores) {
Scorer scorer = weight.scorer(entry.context);
if (scorer != null) {
scorerIt = scorer.iterator();
subCollector.setScorer(scorer);
}
}
int docID;
while ((docID = docIdSetIterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (needsScores) {
assert scorerIt != null && scorerIt.docID() < docID;
scorerIt.advance(docID);
// aggregations should only be replayed on matching documents
assert scorerIt.docID() == docID;
}
collector.collect(docID);
}
}
deferredCollectors.postCollection();
}
/**
* Replay the top buckets from the matching documents.
*/
private LeafBucketCollector getSecondPassCollector(LeafBucketCollector subCollector) {
return new LeafBucketCollector() {
@Override
public void collect(int doc, long zeroBucket) throws IOException {
assert zeroBucket == 0;
Integer slot = queue.compareCurrent();
if (slot != null) {
// The candidate key is a top bucket.
// We can defer the collection of this document/bucket to the sub collector
subCollector.collect(doc, slot);
}
}
};
}
static class LeafContext {
final LeafReaderContext ctx;
final LeafBucketCollector subCollector;
DocIdSet docIdSet;
private static SingleDimensionValuesSource<?>[] createValuesSources(BigArrays bigArrays, IndexReader reader, Query query,
CompositeValuesSourceConfig[] configs, int size) {
final SingleDimensionValuesSource<?>[] sources = new SingleDimensionValuesSource[configs.length];
for (int i = 0; i < sources.length; i++) {
final int reverseMul = configs[i].reverseMul();
if (configs[i].valuesSource() instanceof ValuesSource.Bytes.WithOrdinals && reader instanceof DirectoryReader) {
ValuesSource.Bytes.WithOrdinals vs = (ValuesSource.Bytes.WithOrdinals) configs[i].valuesSource();
sources[i] = new GlobalOrdinalValuesSource(bigArrays, configs[i].fieldType(), vs::globalOrdinalsValues, size, reverseMul);
if (i == 0 && sources[i].createSortedDocsProducerOrNull(reader, query) != null) {
// this the leading source and we can optimize it with the sorted docs producer but
// we don't want to use global ordinals because the number of visited documents
// should be low and global ordinals need one lookup per visited term.
Releasables.close(sources[i]);
sources[i] = new BinaryValuesSource(configs[i].fieldType(), vs::bytesValues, size, reverseMul);
}
} else if (configs[i].valuesSource() instanceof ValuesSource.Bytes) {
ValuesSource.Bytes vs = (ValuesSource.Bytes) configs[i].valuesSource();
sources[i] = new BinaryValuesSource(configs[i].fieldType(), vs::bytesValues, size, reverseMul);
} else if (configs[i].valuesSource() instanceof ValuesSource.Numeric) {
final ValuesSource.Numeric vs = (ValuesSource.Numeric) configs[i].valuesSource();
if (vs.isFloatingPoint()) {
sources[i] = new DoubleValuesSource(bigArrays, configs[i].fieldType(), vs::doubleValues, size, reverseMul);
} else {
if (vs instanceof RoundingValuesSource) {
sources[i] = new LongValuesSource(bigArrays, configs[i].fieldType(), vs::longValues,
((RoundingValuesSource) vs)::round, configs[i].format(), size, reverseMul);
} else {
sources[i] = new LongValuesSource(bigArrays, configs[i].fieldType(), vs::longValues,
(value) -> value, configs[i].format(), size, reverseMul);
}
}
}
}
return sources;
}
LeafContext(LeafReaderContext ctx, LeafBucketCollector subCollector) {
this.ctx = ctx;
this.subCollector = subCollector;
private static class Entry {
final LeafReaderContext context;
final DocIdSet docIdSet;
Entry(LeafReaderContext context, DocIdSet docIdSet) {
this.context = context;
this.docIdSet = docIdSet;
}
}
}

View File

@ -0,0 +1,247 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
import java.util.Arrays;
import java.util.Set;
import java.util.TreeMap;
/**
* A specialized queue implementation for composite buckets
*/
final class CompositeValuesCollectorQueue implements Releasable {
// the slot for the current candidate
private static final int CANDIDATE_SLOT = Integer.MAX_VALUE;
private final int maxSize;
private final TreeMap<Integer, Integer> keys;
private final SingleDimensionValuesSource<?>[] arrays;
private final int[] docCounts;
private boolean afterValueSet = false;
/**
* Constructs a composite queue with the specified size and sources.
*
* @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets.
* @param size The number of composite buckets to keep.
*/
CompositeValuesCollectorQueue(SingleDimensionValuesSource<?>[] sources, int size) {
this.maxSize = size;
this.arrays = sources;
this.docCounts = new int[size];
this.keys = new TreeMap<>(this::compare);
}
void clear() {
keys.clear();
Arrays.fill(docCounts, 0);
afterValueSet = false;
}
/**
* The current size of the queue.
*/
int size() {
return keys.size();
}
/**
* Whether the queue is full or not.
*/
boolean isFull() {
return keys.size() == maxSize;
}
/**
* Returns a sorted {@link Set} view of the slots contained in this queue.
*/
Set<Integer> getSortedSlot() {
return keys.keySet();
}
/**
* Compares the current candidate with the values in the queue and returns
* the slot if the candidate is already in the queue or null if the candidate is not present.
*/
Integer compareCurrent() {
return keys.get(CANDIDATE_SLOT);
}
/**
* Returns the lowest value (exclusive) of the leading source.
*/
Comparable<?> getLowerValueLeadSource() {
return afterValueSet ? arrays[0].getAfter() : null;
}
/**
* Returns the upper value (inclusive) of the leading source.
*/
Comparable<?> getUpperValueLeadSource() throws IOException {
return size() >= maxSize ? arrays[0].toComparable(keys.lastKey()) : null;
}
/**
* Returns the document count in <code>slot</code>.
*/
int getDocCount(int slot) {
return docCounts[slot];
}
/**
* Copies the current value in <code>slot</code>.
*/
private void copyCurrent(int slot) {
for (int i = 0; i < arrays.length; i++) {
arrays[i].copyCurrent(slot);
}
docCounts[slot] = 1;
}
/**
* Compares the values in <code>slot1</code> with <code>slot2</code>.
*/
int compare(int slot1, int slot2) {
for (int i = 0; i < arrays.length; i++) {
int cmp = (slot1 == CANDIDATE_SLOT) ? arrays[i].compareCurrent(slot2) :
arrays[i].compare(slot1, slot2);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Sets the after values for this comparator.
*/
void setAfter(Comparable<?>[] values) {
assert values.length == arrays.length;
afterValueSet = true;
for (int i = 0; i < arrays.length; i++) {
arrays[i].setAfter(values[i]);
}
}
/**
* Compares the after values with the values in <code>slot</code>.
*/
private int compareCurrentWithAfter() {
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compareCurrentWithAfter();
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Builds the {@link CompositeKey} for <code>slot</code>.
*/
CompositeKey toCompositeKey(int slot) throws IOException {
assert slot < maxSize;
Comparable<?>[] values = new Comparable<?>[arrays.length];
for (int i = 0; i < values.length; i++) {
values[i] = arrays[i].toComparable(slot);
}
return new CompositeKey(values);
}
/**
* Creates the collector that will visit the composite buckets of the matching documents.
* The provided collector <code>in</code> is called on each composite bucket.
*/
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector in) throws IOException {
return getLeafCollector(null, context, in);
}
/**
* Creates the collector that will visit the composite buckets of the matching documents.
* If <code>forceLeadSourceValue</code> is not null, the leading source will use this value
* for each document.
* The provided collector <code>in</code> is called on each composite bucket.
*/
LeafBucketCollector getLeafCollector(Comparable<?> forceLeadSourceValue,
LeafReaderContext context, LeafBucketCollector in) throws IOException {
int last = arrays.length - 1;
LeafBucketCollector collector = in;
while (last > 0) {
collector = arrays[last--].getLeafCollector(context, collector);
}
if (forceLeadSourceValue != null) {
collector = arrays[last].getLeafCollector(forceLeadSourceValue, context, collector);
} else {
collector = arrays[last].getLeafCollector(context, collector);
}
return collector;
}
/**
* Check if the current candidate should be added in the queue.
* @return The target slot of the candidate or -1 is the candidate is not competitive.
*/
int addIfCompetitive() {
// checks if the candidate key is competitive
Integer topSlot = compareCurrent();
if (topSlot != null) {
// this key is already in the top N, skip it
docCounts[topSlot] += 1;
return topSlot;
}
if (afterValueSet && compareCurrentWithAfter() <= 0) {
// this key is greater than the top value collected in the previous round, skip it
return -1;
}
if (keys.size() >= maxSize) {
// the tree map is full, check if the candidate key should be kept
if (compare(CANDIDATE_SLOT, keys.lastKey()) > 0) {
// the candidate key is not competitive, skip it
return -1;
}
}
// the candidate key is competitive
final int newSlot;
if (keys.size() >= maxSize) {
// the tree map is full, we replace the last key with this candidate
int slot = keys.pollLastEntry().getKey();
// and we recycle the deleted slot
newSlot = slot;
} else {
newSlot = keys.size();
assert newSlot < maxSize;
}
// move the candidate key to its new slot
copyCurrent(newSlot);
keys.put(newSlot, newSlot);
return newSlot;
}
@Override
public void close() {
Releasables.close(arrays);
}
}

View File

@ -1,144 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Numeric;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes;
import static org.elasticsearch.search.aggregations.support.ValuesSource.Bytes.WithOrdinals;
final class CompositeValuesComparator {
private final int size;
private final CompositeValuesSource<?, ?>[] arrays;
private boolean topValueSet = false;
/**
*
* @param sources The list of {@link CompositeValuesSourceConfig} to build the composite buckets.
* @param size The number of composite buckets to keep.
*/
CompositeValuesComparator(IndexReader reader, CompositeValuesSourceConfig[] sources, int size) {
this.size = size;
this.arrays = new CompositeValuesSource<?, ?>[sources.length];
for (int i = 0; i < sources.length; i++) {
final int reverseMul = sources[i].reverseMul();
if (sources[i].valuesSource() instanceof WithOrdinals && reader instanceof DirectoryReader) {
WithOrdinals vs = (WithOrdinals) sources[i].valuesSource();
arrays[i] = CompositeValuesSource.wrapGlobalOrdinals(vs, size, reverseMul);
} else if (sources[i].valuesSource() instanceof Bytes) {
Bytes vs = (Bytes) sources[i].valuesSource();
arrays[i] = CompositeValuesSource.wrapBinary(vs, size, reverseMul);
} else if (sources[i].valuesSource() instanceof Numeric) {
final Numeric vs = (Numeric) sources[i].valuesSource();
if (vs.isFloatingPoint()) {
arrays[i] = CompositeValuesSource.wrapDouble(vs, size, reverseMul);
} else {
arrays[i] = CompositeValuesSource.wrapLong(vs, sources[i].format(), size, reverseMul);
}
}
}
}
/**
* Moves the values in <code>slot1</code> to <code>slot2</code>.
*/
void move(int slot1, int slot2) {
assert slot1 < size && slot2 < size;
for (int i = 0; i < arrays.length; i++) {
arrays[i].move(slot1, slot2);
}
}
/**
* Compares the values in <code>slot1</code> with <code>slot2</code>.
*/
int compare(int slot1, int slot2) {
assert slot1 < size && slot2 < size;
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compare(slot1, slot2);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Returns true if a top value has been set for this comparator.
*/
boolean hasTop() {
return topValueSet;
}
/**
* Sets the top values for this comparator.
*/
void setTop(Comparable<?>[] values) {
assert values.length == arrays.length;
topValueSet = true;
for (int i = 0; i < arrays.length; i++) {
arrays[i].setTop(values[i]);
}
}
/**
* Compares the top values with the values in <code>slot</code>.
*/
int compareTop(int slot) {
assert slot < size;
for (int i = 0; i < arrays.length; i++) {
int cmp = arrays[i].compareTop(slot);
if (cmp != 0) {
return cmp;
}
}
return 0;
}
/**
* Builds the {@link CompositeKey} for <code>slot</code>.
*/
CompositeKey toCompositeKey(int slot) throws IOException {
assert slot < size;
Comparable<?>[] values = new Comparable<?>[arrays.length];
for (int i = 0; i < values.length; i++) {
values[i] = arrays[i].toComparable(slot);
}
return new CompositeKey(values);
}
/**
* Gets the {@link LeafBucketCollector} that will record the composite buckets of the visited documents.
*/
CompositeValuesSource.Collector getLeafCollector(LeafReaderContext context, CompositeValuesSource.Collector in) throws IOException {
int last = arrays.length - 1;
CompositeValuesSource.Collector next = arrays[last].getLeafCollector(context, in);
for (int i = last - 1; i >= 0; i--) {
next = arrays[i].getLeafCollector(context, next);
}
return next;
}
}

View File

@ -1,400 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.LeafCollector;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
/**
* A wrapper for {@link ValuesSource} that can record and compare values produced during a collection.
*/
abstract class CompositeValuesSource<VS extends ValuesSource, T extends Comparable<T>> {
interface Collector {
void collect(int doc) throws IOException;
}
protected final VS vs;
protected final int size;
protected final int reverseMul;
protected T topValue;
/**
*
* @param vs The original {@link ValuesSource}.
* @param size The number of values to record.
* @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed.
*/
CompositeValuesSource(VS vs, int size, int reverseMul) {
this.vs = vs;
this.size = size;
this.reverseMul = reverseMul;
}
/**
* The type of this source.
*/
abstract String type();
/**
* Moves the value in <code>from</code> in <code>to</code>.
* The value present in <code>to</code> is overridden.
*/
abstract void move(int from, int to);
/**
* Compares the value in <code>from</code> with the value in <code>to</code>.
*/
abstract int compare(int from, int to);
/**
* Compares the value in <code>slot</code> with the top value in this source.
*/
abstract int compareTop(int slot);
/**
* Sets the top value for this source. Values that compares smaller should not be recorded.
*/
abstract void setTop(Comparable<?> value);
/**
* Transforms the value in <code>slot</code> to a {@link Comparable} object.
*/
abstract Comparable<T> toComparable(int slot) throws IOException;
/**
* Gets the {@link LeafCollector} that will record the values of the visited documents.
*/
abstract Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException;
/**
* Creates a {@link CompositeValuesSource} that generates long values.
*/
static CompositeValuesSource<ValuesSource.Numeric, Long> wrapLong(ValuesSource.Numeric vs, DocValueFormat format,
int size, int reverseMul) {
return new LongValuesSource(vs, format, size, reverseMul);
}
/**
* Creates a {@link CompositeValuesSource} that generates double values.
*/
static CompositeValuesSource<ValuesSource.Numeric, Double> wrapDouble(ValuesSource.Numeric vs, int size, int reverseMul) {
return new DoubleValuesSource(vs, size, reverseMul);
}
/**
* Creates a {@link CompositeValuesSource} that generates binary values.
*/
static CompositeValuesSource<ValuesSource.Bytes, BytesRef> wrapBinary(ValuesSource.Bytes vs, int size, int reverseMul) {
return new BinaryValuesSource(vs, size, reverseMul);
}
/**
* Creates a {@link CompositeValuesSource} that generates global ordinal values.
*/
static CompositeValuesSource<ValuesSource.Bytes.WithOrdinals, BytesRef> wrapGlobalOrdinals(ValuesSource.Bytes.WithOrdinals vs,
int size,
int reverseMul) {
return new GlobalOrdinalValuesSource(vs, size, reverseMul);
}
/**
* A {@link CompositeValuesSource} for global ordinals
*/
private static class GlobalOrdinalValuesSource extends CompositeValuesSource<ValuesSource.Bytes.WithOrdinals, BytesRef> {
private final long[] values;
private SortedSetDocValues lookup;
private Long topValueGlobalOrd;
private boolean isTopValueInsertionPoint;
GlobalOrdinalValuesSource(ValuesSource.Bytes.WithOrdinals vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new long[size];
}
@Override
String type() {
return "global_ordinals";
}
@Override
void move(int from, int to) {
values[to] = values[from];
}
@Override
int compare(int from, int to) {
return Long.compare(values[from], values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
int cmp = Long.compare(values[slot], topValueGlobalOrd);
if (cmp == 0 && isTopValueInsertionPoint) {
// the top value is missing in this shard, the comparison is against
// the insertion point of the top value so equality means that the value
// is "after" the insertion point.
return reverseMul;
}
return cmp * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value instanceof BytesRef) {
topValue = (BytesRef) value;
} else if (value instanceof String) {
topValue = new BytesRef(value.toString());
} else {
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
}
}
@Override
Comparable<BytesRef> toComparable(int slot) throws IOException {
return BytesRef.deepCopyOf(lookup.lookupOrd(values[slot]));
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedSetDocValues dvs = vs.globalOrdinalsValues(context);
if (lookup == null) {
lookup = dvs;
if (topValue != null && topValueGlobalOrd == null) {
topValueGlobalOrd = lookup.lookupTerm(topValue);
if (topValueGlobalOrd < 0) {
// convert negative insert position
topValueGlobalOrd = -topValueGlobalOrd - 1;
isTopValueInsertionPoint = true;
}
}
}
return doc -> {
if (dvs.advanceExact(doc)) {
long ord;
while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) {
values[0] = ord;
next.collect(doc);
}
}
};
}
}
/**
* A {@link CompositeValuesSource} for binary source ({@link BytesRef})
*/
private static class BinaryValuesSource extends CompositeValuesSource<ValuesSource.Bytes, BytesRef> {
private final BytesRef[] values;
BinaryValuesSource(ValuesSource.Bytes vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new BytesRef[size];
}
@Override
String type() {
return "binary";
}
@Override
public void move(int from, int to) {
values[to] = BytesRef.deepCopyOf(values[from]);
}
@Override
public int compare(int from, int to) {
return values[from].compareTo(values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return values[slot].compareTo(topValue) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value.getClass() == BytesRef.class) {
topValue = (BytesRef) value;
} else if (value.getClass() == String.class) {
topValue = new BytesRef((String) value);
} else {
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
}
}
@Override
Comparable<BytesRef> toComparable(int slot) {
return values[slot];
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedBinaryDocValues dvs = vs.bytesValues(context);
return doc -> {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
values[0] = dvs.nextValue();
next.collect(doc);
}
}
};
}
}
/**
* A {@link CompositeValuesSource} for longs.
*/
private static class LongValuesSource extends CompositeValuesSource<ValuesSource.Numeric, Long> {
private final long[] values;
// handles "format" for date histogram source
private final DocValueFormat format;
LongValuesSource(ValuesSource.Numeric vs, DocValueFormat format, int size, int reverseMul) {
super(vs, size, reverseMul);
this.format = format;
this.values = new long[size];
}
@Override
String type() {
return "long";
}
@Override
void move(int from, int to) {
values[to] = values[from];
}
@Override
int compare(int from, int to) {
return Long.compare(values[from], values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return Long.compare(values[slot], topValue) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value instanceof Number) {
topValue = ((Number) value).longValue();
} else {
// for date histogram source with "format", the after value is formatted
// as a string so we need to retrieve the original value in milliseconds.
topValue = format.parseLong(value.toString(), false, () -> {
throw new IllegalArgumentException("now() is not supported in [after] key");
});
}
}
@Override
Comparable<Long> toComparable(int slot) {
return values[slot];
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedNumericDocValues dvs = vs.longValues(context);
return doc -> {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
values[0] = dvs.nextValue();
next.collect(doc);
}
}
};
}
}
/**
* A {@link CompositeValuesSource} for doubles.
*/
private static class DoubleValuesSource extends CompositeValuesSource<ValuesSource.Numeric, Double> {
private final double[] values;
DoubleValuesSource(ValuesSource.Numeric vs, int size, int reverseMul) {
super(vs, size, reverseMul);
this.values = new double[size];
}
@Override
String type() {
return "long";
}
@Override
void move(int from, int to) {
values[to] = values[from];
}
@Override
int compare(int from, int to) {
return Double.compare(values[from], values[to]) * reverseMul;
}
@Override
int compareTop(int slot) {
return Double.compare(values[slot], topValue) * reverseMul;
}
@Override
void setTop(Comparable<?> value) {
if (value instanceof Number) {
topValue = ((Number) value).doubleValue();
} else {
topValue = Double.parseDouble(value.toString());
}
}
@Override
Comparable<Double> toComparable(int slot) {
return values[slot];
}
@Override
Collector getLeafCollector(LeafReaderContext context, Collector next) throws IOException {
final SortedNumericDoubleValues dvs = vs.doubleValues(context);
return doc -> {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
values[0] = dvs.nextValue();
next.collect(doc);
}
}
};
}
}
}

View File

@ -19,19 +19,13 @@
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.SortField;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexSortConfig;
import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
@ -291,46 +285,18 @@ public abstract class CompositeValuesSourceBuilder<AB extends CompositeValuesSou
*
* @param context The search context for this source.
* @param config The {@link ValuesSourceConfig} for this source.
* @param pos The position of this source in the composite key.
* @param numPos The total number of positions in the composite key.
* @param sortField The {@link SortField} of the index sort at this position or null if not present.
*/
protected abstract CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException;
protected abstract CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig<?> config) throws IOException;
public final CompositeValuesSourceConfig build(SearchContext context, int pos, int numPos, SortField sortField) throws IOException {
public final CompositeValuesSourceConfig build(SearchContext context) throws IOException {
ValuesSourceConfig<?> config = ValuesSourceConfig.resolve(context.getQueryShardContext(),
valueType, field, script, missing, null, format);
return innerBuild(context, config, pos, numPos, sortField);
}
protected boolean checkCanEarlyTerminate(IndexReader reader,
String fieldName,
boolean reverse,
SortField sortField) throws IOException {
return sortField.getField().equals(fieldName) &&
sortField.getReverse() == reverse &&
isSingleValued(reader, sortField);
}
private static boolean isSingleValued(IndexReader reader, SortField field) throws IOException {
SortField.Type type = IndexSortConfig.getSortFieldType(field);
for (LeafReaderContext context : reader.leaves()) {
if (type == SortField.Type.STRING) {
final SortedSetDocValues values = DocValues.getSortedSet(context.reader(), field.getField());
if (values.cost() > 0 && DocValues.unwrapSingleton(values) == null) {
return false;
}
} else {
final SortedNumericDocValues values = DocValues.getSortedNumeric(context.reader(), field.getField());
if (values.cost() > 0 && DocValues.unwrapSingleton(values) == null) {
return false;
}
}
if (config.unmapped() && field != null && config.missing() == null) {
// this source cannot produce any values so we refuse to build
// since composite buckets are not created on null values
throw new QueryShardException(context.getQueryShardContext(),
"failed to find field [" + field + "] and [missing] is not provided");
}
return true;
return innerBuild(context, config);
}
}

View File

@ -19,22 +19,25 @@
package org.elasticsearch.search.aggregations.bucket.composite;
import org.elasticsearch.common.inject.internal.Nullable;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.sort.SortOrder;
class CompositeValuesSourceConfig {
private final String name;
@Nullable
private final MappedFieldType fieldType;
private final ValuesSource vs;
private final DocValueFormat format;
private final int reverseMul;
private final boolean canEarlyTerminate;
CompositeValuesSourceConfig(String name, ValuesSource vs, DocValueFormat format, SortOrder order, boolean canEarlyTerminate) {
CompositeValuesSourceConfig(String name, @Nullable MappedFieldType fieldType, ValuesSource vs, DocValueFormat format, SortOrder order) {
this.name = name;
this.fieldType = fieldType;
this.vs = vs;
this.format = format;
this.canEarlyTerminate = canEarlyTerminate;
this.reverseMul = order == SortOrder.ASC ? 1 : -1;
}
@ -45,6 +48,13 @@ class CompositeValuesSourceConfig {
return name;
}
/**
* Returns the {@link MappedFieldType} for this config.
*/
MappedFieldType fieldType() {
return fieldType;
}
/**
* Returns the {@link ValuesSource} for this configuration.
*/
@ -67,11 +77,4 @@ class CompositeValuesSourceConfig {
assert reverseMul == -1 || reverseMul == 1;
return reverseMul;
}
/**
* Returns whether this {@link ValuesSource} is used to sort the index.
*/
boolean canEarlyTerminate() {
return canEarlyTerminate;
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -29,9 +28,9 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.support.FieldContext;
@ -39,7 +38,6 @@ import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortOrder;
import org.joda.time.DateTimeZone;
import java.io.IOException;
@ -217,11 +215,7 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild
}
@Override
protected CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException {
protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig<?> config) throws IOException {
Rounding rounding = createRounding();
ValuesSource orig = config.toValuesSource(context.getQueryShardContext());
if (orig == null) {
@ -230,19 +224,10 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild
if (orig instanceof ValuesSource.Numeric) {
ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig;
RoundingValuesSource vs = new RoundingValuesSource(numeric, rounding);
boolean canEarlyTerminate = false;
final FieldContext fieldContext = config.fieldContext();
if (sortField != null &&
pos == numPos-1 &&
fieldContext != null) {
canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(),
fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField);
}
// dates are returned as timestamp in milliseconds-since-the-epoch unless a specific date format
// is specified in the builder.
final DocValueFormat docValueFormat = format() == null ? DocValueFormat.RAW : config.format();
return new CompositeValuesSourceConfig(name, vs, docValueFormat,
order(), canEarlyTerminate);
final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
return new CompositeValuesSourceConfig(name, fieldType, vs, docValueFormat, order());
} else {
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
}

View File

@ -0,0 +1,129 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.DoubleArray;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
/**
* A {@link SingleDimensionValuesSource} for doubles.
*/
class DoubleValuesSource extends SingleDimensionValuesSource<Double> {
private final CheckedFunction<LeafReaderContext, SortedNumericDoubleValues, IOException> docValuesFunc;
private final DoubleArray values;
private double currentValue;
DoubleValuesSource(BigArrays bigArrays, MappedFieldType fieldType,
CheckedFunction<LeafReaderContext, SortedNumericDoubleValues, IOException> docValuesFunc,
int size, int reverseMul) {
super(fieldType, size, reverseMul);
this.docValuesFunc = docValuesFunc;
this.values = bigArrays.newDoubleArray(size, false);
}
@Override
void copyCurrent(int slot) {
values.set(slot, currentValue);
}
@Override
int compare(int from, int to) {
return compareValues(values.get(from), values.get(to));
}
@Override
int compareCurrent(int slot) {
return compareValues(currentValue, values.get(slot));
}
@Override
int compareCurrentWithAfter() {
return compareValues(currentValue, afterValue);
}
private int compareValues(double v1, double v2) {
return Double.compare(v1, v2) * reverseMul;
}
@Override
void setAfter(Comparable<?> value) {
if (value instanceof Number) {
afterValue = ((Number) value).doubleValue();
} else {
afterValue = Double.parseDouble(value.toString());
}
}
@Override
Double toComparable(int slot) {
return values.get(slot);
}
@Override
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException {
final SortedNumericDoubleValues dvs = docValuesFunc.apply(context);
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
currentValue = dvs.nextValue();
next.collect(doc, bucket);
}
}
}
};
}
@Override
LeafBucketCollector getLeafCollector(Comparable<?> value, LeafReaderContext context, LeafBucketCollector next) {
if (value.getClass() != Double.class) {
throw new IllegalArgumentException("Expected Double, got " + value.getClass());
}
currentValue = (Double) value;
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
next.collect(doc, bucket);
}
};
}
@Override
SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) {
return null;
}
@Override
public void close() {
Releasables.close(values);
}
}

View File

@ -0,0 +1,189 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedSetDocValues;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
import static org.apache.lucene.index.SortedSetDocValues.NO_MORE_ORDS;
/**
* A {@link SingleDimensionValuesSource} for global ordinals.
*/
class GlobalOrdinalValuesSource extends SingleDimensionValuesSource<BytesRef> {
private final CheckedFunction<LeafReaderContext, SortedSetDocValues, IOException> docValuesFunc;
private final LongArray values;
private SortedSetDocValues lookup;
private long currentValue;
private Long afterValueGlobalOrd;
private boolean isTopValueInsertionPoint;
private long lastLookupOrd = -1;
private BytesRef lastLookupValue;
GlobalOrdinalValuesSource(BigArrays bigArrays,
MappedFieldType type, CheckedFunction<LeafReaderContext, SortedSetDocValues, IOException> docValuesFunc,
int size, int reverseMul) {
super(type, size, reverseMul);
this.docValuesFunc = docValuesFunc;
this.values = bigArrays.newLongArray(size, false);
}
@Override
void copyCurrent(int slot) {
values.set(slot, currentValue);
}
@Override
int compare(int from, int to) {
return Long.compare(values.get(from), values.get(to)) * reverseMul;
}
@Override
int compareCurrent(int slot) {
return Long.compare(currentValue, values.get(slot)) * reverseMul;
}
@Override
int compareCurrentWithAfter() {
int cmp = Long.compare(currentValue, afterValueGlobalOrd);
if (cmp == 0 && isTopValueInsertionPoint) {
// the top value is missing in this shard, the comparison is against
// the insertion point of the top value so equality means that the value
// is "after" the insertion point.
return reverseMul;
}
return cmp * reverseMul;
}
@Override
void setAfter(Comparable<?> value) {
if (value instanceof BytesRef) {
afterValue = (BytesRef) value;
} else if (value instanceof String) {
afterValue = new BytesRef(value.toString());
} else {
throw new IllegalArgumentException("invalid value, expected string, got " + value.getClass().getSimpleName());
}
}
@Override
BytesRef toComparable(int slot) throws IOException {
long globalOrd = values.get(slot);
if (globalOrd == lastLookupOrd) {
return lastLookupValue;
} else {
lastLookupOrd= globalOrd;
lastLookupValue = BytesRef.deepCopyOf(lookup.lookupOrd(values.get(slot)));
return lastLookupValue;
}
}
@Override
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException {
final SortedSetDocValues dvs = docValuesFunc.apply(context);
if (lookup == null) {
initLookup(dvs);
}
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
if (dvs.advanceExact(doc)) {
long ord;
while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) {
currentValue = ord;
next.collect(doc, bucket);
}
}
}
};
}
@Override
LeafBucketCollector getLeafCollector(Comparable<?> value, LeafReaderContext context, LeafBucketCollector next) throws IOException {
if (value.getClass() != BytesRef.class) {
throw new IllegalArgumentException("Expected BytesRef, got " + value.getClass());
}
BytesRef term = (BytesRef) value;
final SortedSetDocValues dvs = docValuesFunc.apply(context);
if (lookup == null) {
initLookup(dvs);
}
return new LeafBucketCollector() {
boolean currentValueIsSet = false;
@Override
public void collect(int doc, long bucket) throws IOException {
if (!currentValueIsSet) {
if (dvs.advanceExact(doc)) {
long ord;
while ((ord = dvs.nextOrd()) != NO_MORE_ORDS) {
if (term.equals(lookup.lookupOrd(ord))) {
currentValueIsSet = true;
currentValue = ord;
break;
}
}
}
}
assert currentValueIsSet;
next.collect(doc, bucket);
}
};
}
@Override
SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) {
if (checkIfSortedDocsIsApplicable(reader, fieldType) == false ||
(query != null && query.getClass() != MatchAllDocsQuery.class)) {
return null;
}
return new TermsSortedDocsProducer(fieldType.name());
}
@Override
public void close() {
Releasables.close(values);
}
private void initLookup(SortedSetDocValues dvs) throws IOException {
lookup = dvs;
if (afterValue != null && afterValueGlobalOrd == null) {
afterValueGlobalOrd = lookup.lookupTerm(afterValue);
if (afterValueGlobalOrd < 0) {
// convert negative insert position
afterValueGlobalOrd = -afterValueGlobalOrd - 1;
isTopValueInsertionPoint = true;
}
}
}
}

View File

@ -19,19 +19,17 @@
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.search.SortField;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
import org.elasticsearch.search.aggregations.support.FieldContext;
import org.elasticsearch.search.aggregations.support.ValueType;
import org.elasticsearch.search.aggregations.support.ValuesSource;
import org.elasticsearch.search.aggregations.support.ValuesSourceConfig;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.sort.SortOrder;
import java.io.IOException;
import java.util.Objects;
@ -108,27 +106,16 @@ public class HistogramValuesSourceBuilder extends CompositeValuesSourceBuilder<H
}
@Override
protected CompositeValuesSourceConfig innerBuild(SearchContext context,
ValuesSourceConfig<?> config,
int pos,
int numPos,
SortField sortField) throws IOException {
protected CompositeValuesSourceConfig innerBuild(SearchContext context, ValuesSourceConfig<?> config) throws IOException {
ValuesSource orig = config.toValuesSource(context.getQueryShardContext());
if (orig == null) {
orig = ValuesSource.Numeric.EMPTY;
}
if (orig instanceof ValuesSource.Numeric) {
ValuesSource.Numeric numeric = (ValuesSource.Numeric) orig;
HistogramValuesSource vs = new HistogramValuesSource(numeric, interval);
boolean canEarlyTerminate = false;
final FieldContext fieldContext = config.fieldContext();
if (sortField != null &&
pos == numPos-1 &&
fieldContext != null) {
canEarlyTerminate = checkCanEarlyTerminate(context.searcher().getIndexReader(),
fieldContext.field(), order() == SortOrder.ASC ? false : true, sortField);
}
return new CompositeValuesSourceConfig(name, vs, config.format(), order(), canEarlyTerminate);
final HistogramValuesSource vs = new HistogramValuesSource(numeric, interval);
final MappedFieldType fieldType = config.fieldContext() != null ? config.fieldContext().fieldType() : null;
return new CompositeValuesSourceConfig(name, fieldType, vs, config.format(), order());
} else {
throw new IllegalArgumentException("invalid source, expected numeric, got " + orig.getClass().getSimpleName());
}

View File

@ -0,0 +1,190 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.document.IntPoint;
import org.apache.lucene.document.LongPoint;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.PointRangeQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.common.CheckedFunction;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.LongArray;
import org.elasticsearch.index.mapper.DateFieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.NumberFieldMapper;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.aggregations.LeafBucketCollector;
import java.io.IOException;
import java.util.function.LongUnaryOperator;
import java.util.function.ToLongFunction;
/**
* A {@link SingleDimensionValuesSource} for longs.
*/
class LongValuesSource extends SingleDimensionValuesSource<Long> {
private final CheckedFunction<LeafReaderContext, SortedNumericDocValues, IOException> docValuesFunc;
private final LongUnaryOperator rounding;
// handles "format" for date histogram source
private final DocValueFormat format;
private final LongArray values;
private long currentValue;
LongValuesSource(BigArrays bigArrays, MappedFieldType fieldType,
CheckedFunction<LeafReaderContext, SortedNumericDocValues, IOException> docValuesFunc,
LongUnaryOperator rounding, DocValueFormat format, int size, int reverseMul) {
super(fieldType, size, reverseMul);
this.docValuesFunc = docValuesFunc;
this.rounding = rounding;
this.format = format;
this.values = bigArrays.newLongArray(size, false);
}
@Override
void copyCurrent(int slot) {
values.set(slot, currentValue);
}
@Override
int compare(int from, int to) {
return compareValues(values.get(from), values.get(to));
}
@Override
int compareCurrent(int slot) {
return compareValues(currentValue, values.get(slot));
}
@Override
int compareCurrentWithAfter() {
return compareValues(currentValue, afterValue);
}
private int compareValues(long v1, long v2) {
return Long.compare(v1, v2) * reverseMul;
}
@Override
void setAfter(Comparable<?> value) {
if (value instanceof Number) {
afterValue = ((Number) value).longValue();
} else {
// for date histogram source with "format", the after value is formatted
// as a string so we need to retrieve the original value in milliseconds.
afterValue = format.parseLong(value.toString(), false, () -> {
throw new IllegalArgumentException("now() is not supported in [after] key");
});
}
}
@Override
Long toComparable(int slot) {
return values.get(slot);
}
@Override
LeafBucketCollector getLeafCollector(LeafReaderContext context, LeafBucketCollector next) throws IOException {
final SortedNumericDocValues dvs = docValuesFunc.apply(context);
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
if (dvs.advanceExact(doc)) {
int num = dvs.docValueCount();
for (int i = 0; i < num; i++) {
currentValue = dvs.nextValue();
next.collect(doc, bucket);
}
}
}
};
}
@Override
LeafBucketCollector getLeafCollector(Comparable<?> value, LeafReaderContext context, LeafBucketCollector next) {
if (value.getClass() != Long.class) {
throw new IllegalArgumentException("Expected Long, got " + value.getClass());
}
currentValue = (Long) value;
return new LeafBucketCollector() {
@Override
public void collect(int doc, long bucket) throws IOException {
next.collect(doc, bucket);
}
};
}
@Override
SortedDocsProducer createSortedDocsProducerOrNull(IndexReader reader, Query query) {
if (checkIfSortedDocsIsApplicable(reader, fieldType) == false ||
(query != null &&
query.getClass() != MatchAllDocsQuery.class &&
// if the query is a range query over the same field
(query instanceof PointRangeQuery && fieldType.name().equals((((PointRangeQuery) query).getField()))) == false)) {
return null;
}
final byte[] lowerPoint;
final byte[] upperPoint;
if (query instanceof PointRangeQuery) {
final PointRangeQuery rangeQuery = (PointRangeQuery) query;
lowerPoint = rangeQuery.getLowerPoint();
upperPoint = rangeQuery.getUpperPoint();
} else {
lowerPoint = null;
upperPoint = null;
}
if (fieldType instanceof NumberFieldMapper.NumberFieldType) {
NumberFieldMapper.NumberFieldType ft = (NumberFieldMapper.NumberFieldType) fieldType;
final ToLongFunction<byte[]> toBucketFunction;
switch (ft.typeName()) {
case "long":
toBucketFunction = (value) -> rounding.applyAsLong(LongPoint.decodeDimension(value, 0));
break;
case "int":
case "short":
case "byte":
toBucketFunction = (value) -> rounding.applyAsLong(IntPoint.decodeDimension(value, 0));
break;
default:
return null;
}
return new PointsSortedDocsProducer(fieldType.name(), toBucketFunction, lowerPoint, upperPoint);
} else if (fieldType instanceof DateFieldMapper.DateFieldType) {
final ToLongFunction<byte[]> toBucketFunction = (value) -> rounding.applyAsLong(LongPoint.decodeDimension(value, 0));
return new PointsSortedDocsProducer(fieldType.name(), toBucketFunction, lowerPoint, upperPoint);
} else {
return null;
}
}
@Override
public void close() {
Releasables.close(values);
}
}

View File

@ -0,0 +1,181 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.aggregations.bucket.composite;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.PointValues;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.DocIdSet;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.DocIdSetBuilder;
import org.apache.lucene.util.StringHelper;
import java.io.IOException;
import java.util.function.ToLongFunction;
/**
* A {@link SortedDocsProducer} that can sort documents based on numerics indexed in the provided field.
*/
class PointsSortedDocsProducer extends SortedDocsProducer {
private final ToLongFunction<byte[]> bucketFunction;
private final byte[] lowerPointQuery;
private final byte[] upperPointQuery;
PointsSortedDocsProducer(String field, ToLongFunction<byte[]> bucketFunction, byte[] lowerPointQuery, byte[] upperPointQuery) {
super(field);
this.bucketFunction = bucketFunction;
this.lowerPointQuery = lowerPointQuery;
this.upperPointQuery = upperPointQuery;
}
@Override
DocIdSet processLeaf(Query query, CompositeValuesCollectorQueue queue,
LeafReaderContext context, boolean fillDocIdSet) throws IOException {
final PointValues values = context.reader().getPointValues(field);
if (values == null) {
// no value for the field
return DocIdSet.EMPTY;
}
long lowerBucket = Long.MIN_VALUE;
Comparable<?> lowerValue = queue.getLowerValueLeadSource();
if (lowerValue != null) {
if (lowerValue.getClass() != Long.class) {
throw new IllegalStateException("expected Long, got " + lowerValue.getClass());
}
lowerBucket = (Long) lowerValue;
}
long upperBucket = Long.MAX_VALUE;
Comparable<?> upperValue = queue.getUpperValueLeadSource();
if (upperValue != null) {
if (upperValue.getClass() != Long.class) {
throw new IllegalStateException("expected Long, got " + upperValue.getClass());
}
upperBucket = (Long) upperValue;
}
DocIdSetBuilder builder = fillDocIdSet ? new DocIdSetBuilder(context.reader().maxDoc(), values, field) : null;
Visitor visitor = new Visitor(context, queue, builder, values.getBytesPerDimension(), lowerBucket, upperBucket);
try {
values.intersect(visitor);
visitor.flush();
} catch (CollectionTerminatedException exc) {}
return fillDocIdSet ? builder.build() : DocIdSet.EMPTY;
}
private class Visitor implements PointValues.IntersectVisitor {
final LeafReaderContext context;
final CompositeValuesCollectorQueue queue;
final DocIdSetBuilder builder;
final int maxDoc;
final int bytesPerDim;
final long lowerBucket;
final long upperBucket;
DocIdSetBuilder bucketDocsBuilder;
DocIdSetBuilder.BulkAdder adder;
int remaining;
long lastBucket;
boolean first = true;
Visitor(LeafReaderContext context, CompositeValuesCollectorQueue queue, DocIdSetBuilder builder,
int bytesPerDim, long lowerBucket, long upperBucket) {
this.context = context;
this.maxDoc = context.reader().maxDoc();
this.queue = queue;
this.builder = builder;
this.lowerBucket = lowerBucket;
this.upperBucket = upperBucket;
this.bucketDocsBuilder = new DocIdSetBuilder(maxDoc);
this.bytesPerDim = bytesPerDim;
}
@Override
public void grow(int count) {
remaining = count;
adder = bucketDocsBuilder.grow(count);
}
@Override
public void visit(int docID) throws IOException {
throw new IllegalStateException("should never be called");
}
@Override
public void visit(int docID, byte[] packedValue) throws IOException {
if (compare(packedValue, packedValue) != PointValues.Relation.CELL_CROSSES_QUERY) {
remaining --;
return;
}
long bucket = bucketFunction.applyAsLong(packedValue);
if (first == false && bucket != lastBucket) {
final DocIdSet docIdSet = bucketDocsBuilder.build();
if (processBucket(queue, context, docIdSet.iterator(), lastBucket, builder) &&
// lower bucket is inclusive
lowerBucket != lastBucket) {
// this bucket does not have any competitive composite buckets,
// we can early terminate the collection because the remaining buckets are guaranteed
// to be greater than this bucket.
throw new CollectionTerminatedException();
}
bucketDocsBuilder = new DocIdSetBuilder(maxDoc);
assert remaining > 0;
adder = bucketDocsBuilder.grow(remaining);
}
lastBucket = bucket;
first = false;
adder.add(docID);
remaining --;
}
@Override
public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue) {
if ((upperPointQuery != null && StringHelper.compare(bytesPerDim, minPackedValue, 0, upperPointQuery, 0) > 0) ||
(lowerPointQuery != null && StringHelper.compare(bytesPerDim, maxPackedValue, 0, lowerPointQuery, 0) < 0)) {
// does not match the query
return PointValues.Relation.CELL_OUTSIDE_QUERY;
}
// check the current bounds
if (lowerBucket != Long.MIN_VALUE) {
long maxBucket = bucketFunction.applyAsLong(maxPackedValue);
if (maxBucket < lowerBucket) {
return PointValues.Relation.CELL_OUTSIDE_QUERY;
}
}
if (upperBucket != Long.MAX_VALUE) {
long minBucket = bucketFunction.applyAsLong(minPackedValue);
if (minBucket > upperBucket) {
return PointValues.Relation.CELL_OUTSIDE_QUERY;
}
}
return PointValues.Relation.CELL_CROSSES_QUERY;
}
public void flush() throws IOException {
if (first == false) {
final DocIdSet docIdSet = bucketDocsBuilder.build();
processBucket(queue, context, docIdSet.iterator(), lastBucket, builder);
bucketDocsBuilder = null;
}
}
}
}

View File

@ -51,13 +51,17 @@ class RoundingValuesSource extends ValuesSource.Numeric {
return false;
}
public long round(long value) {
return rounding.round(value);
}
@Override
public SortedNumericDocValues longValues(LeafReaderContext context) throws IOException {
SortedNumericDocValues values = vs.longValues(context);
return new SortedNumericDocValues() {
@Override
public long nextValue() throws IOException {
return rounding.round(values.nextValue());
return round(values.nextValue());
}
@Override

Some files were not shown because too many files have changed in this diff Show More