Merge remote-tracking branch 'es/7.x' into enrich-7.x
This commit is contained in:
commit
855f5cc6a5
|
@ -70,7 +70,10 @@ Vagrant.configure(2) do |config|
|
|||
'debian-8'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/debian-8-x86_64'
|
||||
deb_common config, box
|
||||
deb_common config, box, extra: <<-SHELL
|
||||
# this sometimes gets a bad ip, and doesn't appear to be needed
|
||||
rm -f /etc/apt/sources.list.d/http_debian_net_debian.list
|
||||
SHELL
|
||||
end
|
||||
end
|
||||
'debian-9'.tap do |box|
|
||||
|
@ -162,8 +165,8 @@ def deb_common(config, name, extra: '')
|
|||
s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile"
|
||||
end
|
||||
extra_with_lintian = <<-SHELL
|
||||
install lintian
|
||||
#{extra}
|
||||
install lintian
|
||||
SHELL
|
||||
linux_common(
|
||||
config,
|
||||
|
|
|
@ -27,15 +27,15 @@ import org.gradle.api.tasks.Input
|
|||
public class BatsOverVagrantTask extends VagrantCommandTask {
|
||||
|
||||
@Input
|
||||
String remoteCommand
|
||||
Object remoteCommand
|
||||
|
||||
BatsOverVagrantTask() {
|
||||
command = 'ssh'
|
||||
}
|
||||
|
||||
void setRemoteCommand(String remoteCommand) {
|
||||
void setRemoteCommand(Object remoteCommand) {
|
||||
this.remoteCommand = Objects.requireNonNull(remoteCommand)
|
||||
setArgs(['--command', remoteCommand])
|
||||
setArgs((Iterable<?>) ['--command', remoteCommand])
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1,11 +1,18 @@
|
|||
package org.elasticsearch.gradle.vagrant
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.BwcVersions
|
||||
import org.elasticsearch.gradle.FileContentsTask
|
||||
import org.elasticsearch.gradle.Jdk
|
||||
import org.elasticsearch.gradle.JdkDownloadPlugin
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.Version
|
||||
import org.elasticsearch.gradle.BwcVersions
|
||||
import org.gradle.api.*
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.InvalidUserDataException
|
||||
import org.gradle.api.NamedDomainObjectContainer
|
||||
import org.gradle.api.Plugin
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.execution.TaskExecutionAdapter
|
||||
import org.gradle.api.internal.artifacts.dependencies.DefaultProjectDependency
|
||||
|
@ -15,6 +22,8 @@ import org.gradle.api.tasks.Exec
|
|||
import org.gradle.api.tasks.StopExecutionException
|
||||
import org.gradle.api.tasks.TaskState
|
||||
|
||||
import java.nio.file.Paths
|
||||
|
||||
import static java.util.Collections.unmodifiableList
|
||||
|
||||
class VagrantTestPlugin implements Plugin<Project> {
|
||||
|
@ -30,7 +39,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
'oel-6',
|
||||
'oel-7',
|
||||
'opensuse-42',
|
||||
'rhel-8',
|
||||
/* TODO: need a real RHEL license now that it is out of beta 'rhel-8',*/
|
||||
'sles-12',
|
||||
'ubuntu-1604',
|
||||
'ubuntu-1804'
|
||||
|
@ -85,8 +94,22 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
/** extra env vars to pass to vagrant for box configuration **/
|
||||
Map<String, String> vagrantBoxEnvVars = [:]
|
||||
|
||||
private static final String GRADLE_JDK_VERSION = "12.0.1+12@69cfe15208a647278a19ef0990eea691"
|
||||
private Jdk linuxGradleJdk;
|
||||
private Jdk windowsGradleJdk;
|
||||
|
||||
@Override
|
||||
void apply(Project project) {
|
||||
project.pluginManager.apply(JdkDownloadPlugin.class)
|
||||
NamedDomainObjectContainer<Jdk> jdksContainer = (NamedDomainObjectContainer<Jdk>) project.getExtensions().getByName("jdks");
|
||||
linuxGradleJdk = jdksContainer.create("linux_gradle") {
|
||||
version = GRADLE_JDK_VERSION
|
||||
platform = "linux"
|
||||
}
|
||||
windowsGradleJdk = jdksContainer.create("windows_gradle") {
|
||||
version = GRADLE_JDK_VERSION
|
||||
platform = "windows"
|
||||
}
|
||||
|
||||
collectAvailableBoxes(project)
|
||||
|
||||
|
@ -264,7 +287,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
|
||||
private static void createPrepareVagrantTestEnvTask(Project project) {
|
||||
private void createPrepareVagrantTestEnvTask(Project project) {
|
||||
File packagingDir = new File(project.buildDir, PACKAGING_CONFIGURATION)
|
||||
|
||||
File archivesDir = new File(packagingDir, 'archives')
|
||||
|
@ -280,7 +303,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
|
||||
Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) {
|
||||
dependsOn copyPackagingTests
|
||||
dependsOn copyPackagingTests, linuxGradleJdk
|
||||
file "${testsDir}/run-tests.sh"
|
||||
contents """\
|
||||
if [ "\$#" -eq 0 ]; then
|
||||
|
@ -288,11 +311,12 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
else
|
||||
test_args=( "\$@" )
|
||||
fi
|
||||
java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}"
|
||||
|
||||
"${-> convertPath(project, linuxGradleJdk.toString()) }"/bin/java -cp "\$PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner "\${test_args[@]}"
|
||||
"""
|
||||
}
|
||||
Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) {
|
||||
dependsOn copyPackagingTests
|
||||
dependsOn copyPackagingTests, windowsGradleJdk
|
||||
file "${testsDir}/run-tests.ps1"
|
||||
// the use of $args rather than param() here is deliberate because the syntax for array (multivalued) parameters is likely
|
||||
// a little trappy for those unfamiliar with powershell
|
||||
|
@ -302,7 +326,7 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
} else {
|
||||
\$testArgs = \$args
|
||||
}
|
||||
java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs
|
||||
& "${-> convertPath(project, windowsGradleJdk.toString()) }"/bin/java -cp "\$Env:PACKAGING_TESTS/*" org.elasticsearch.packaging.VMTestRunner @testArgs
|
||||
exit \$LASTEXITCODE
|
||||
"""
|
||||
}
|
||||
|
@ -617,4 +641,9 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// convert the given path from an elasticsearch repo path to a VM path
|
||||
private String convertPath(Project project, String path) {
|
||||
return "/elasticsearch/" + project.rootDir.toPath().relativize(Paths.get(path));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.gradle.api.UnknownTaskException;
|
|||
import org.gradle.api.artifacts.Configuration;
|
||||
import org.gradle.api.artifacts.ConfigurationContainer;
|
||||
import org.gradle.api.artifacts.dsl.DependencyHandler;
|
||||
import org.gradle.api.artifacts.dsl.RepositoryHandler;
|
||||
import org.gradle.api.artifacts.repositories.IvyArtifactRepository;
|
||||
import org.gradle.api.file.CopySpec;
|
||||
import org.gradle.api.file.FileTree;
|
||||
|
@ -45,6 +46,8 @@ import java.util.regex.Matcher;
|
|||
|
||||
public class JdkDownloadPlugin implements Plugin<Project> {
|
||||
|
||||
private static final String REPO_NAME_PREFIX = "jdk_repo_";
|
||||
|
||||
@Override
|
||||
public void apply(Project project) {
|
||||
NamedDomainObjectContainer<Jdk> jdksContainer = project.container(Jdk.class, name ->
|
||||
|
@ -69,6 +72,13 @@ public class JdkDownloadPlugin implements Plugin<Project> {
|
|||
setupRootJdkDownload(project.getRootProject(), platform, version);
|
||||
}
|
||||
});
|
||||
|
||||
// all other repos should ignore the special jdk artifacts
|
||||
project.getRootProject().getRepositories().all(repo -> {
|
||||
if (repo.getName().startsWith(REPO_NAME_PREFIX) == false) {
|
||||
repo.content(content -> content.excludeGroup("jdk"));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void setupRootJdkDownload(Project rootProject, String platform, String version) {
|
||||
|
@ -94,26 +104,30 @@ public class JdkDownloadPlugin implements Plugin<Project> {
|
|||
String hash = jdkVersionMatcher.group(5);
|
||||
|
||||
// add fake ivy repo for jdk url
|
||||
String repoName = "jdk_repo_" + version;
|
||||
String repoName = REPO_NAME_PREFIX + version;
|
||||
RepositoryHandler repositories = rootProject.getRepositories();
|
||||
if (rootProject.getRepositories().findByName(repoName) == null) {
|
||||
// simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back
|
||||
rootProject.getRepositories().ivy(ivyRepo -> {
|
||||
ivyRepo.setName(repoName);
|
||||
ivyRepo.setUrl("https://download.oracle.com");
|
||||
ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact);
|
||||
ivyRepo.patternLayout(layout ->
|
||||
layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]"));
|
||||
ivyRepo.content(content -> content.includeGroup("jdk"));
|
||||
});
|
||||
// current pattern since 12.0.1
|
||||
rootProject.getRepositories().ivy(ivyRepo -> {
|
||||
ivyRepo.setName(repoName + "_with_hash");
|
||||
ivyRepo.setUrl("https://download.oracle.com");
|
||||
ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact);
|
||||
ivyRepo.patternLayout(layout -> layout.artifact(
|
||||
"java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]"));
|
||||
ivyRepo.content(content -> content.includeGroup("jdk"));
|
||||
});
|
||||
if (hash != null) {
|
||||
// current pattern since 12.0.1
|
||||
repositories.ivy(ivyRepo -> {
|
||||
ivyRepo.setName(repoName);
|
||||
ivyRepo.setUrl("https://download.oracle.com");
|
||||
ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact);
|
||||
ivyRepo.patternLayout(layout -> layout.artifact(
|
||||
"java/GA/jdk" + jdkVersion + "/" + hash + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]"));
|
||||
ivyRepo.content(content -> content.includeGroup("jdk"));
|
||||
});
|
||||
} else {
|
||||
// simpler legacy pattern from JDK 9 to JDK 12 that we are advocating to Oracle to bring back
|
||||
repositories.ivy(ivyRepo -> {
|
||||
ivyRepo.setName(repoName);
|
||||
ivyRepo.setUrl("https://download.oracle.com");
|
||||
ivyRepo.metadataSources(IvyArtifactRepository.MetadataSources::artifact);
|
||||
ivyRepo.patternLayout(layout ->
|
||||
layout.artifact("java/GA/jdk" + jdkMajor + "/" + jdkBuild + "/GPL/openjdk-[revision]_[module]-x64_bin.[ext]"));
|
||||
ivyRepo.content(content -> content.includeGroup("jdk"));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// add the jdk as a "dependency"
|
||||
|
|
|
@ -28,11 +28,13 @@ import org.gradle.api.artifacts.Dependency;
|
|||
import org.gradle.api.file.FileTree;
|
||||
import org.gradle.api.specs.Spec;
|
||||
import org.gradle.api.tasks.CacheableTask;
|
||||
import org.gradle.api.tasks.Classpath;
|
||||
import org.gradle.api.tasks.Input;
|
||||
import org.gradle.api.tasks.InputFile;
|
||||
import org.gradle.api.tasks.InputFiles;
|
||||
import org.gradle.api.tasks.Internal;
|
||||
import org.gradle.api.tasks.Optional;
|
||||
import org.gradle.api.tasks.OutputDirectory;
|
||||
import org.gradle.api.tasks.OutputFile;
|
||||
import org.gradle.api.tasks.PathSensitive;
|
||||
import org.gradle.api.tasks.PathSensitivity;
|
||||
import org.gradle.api.tasks.SkipWhenEmpty;
|
||||
|
@ -45,6 +47,7 @@ import java.io.IOException;
|
|||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
@ -113,7 +116,7 @@ public class ThirdPartyAuditTask extends DefaultTask {
|
|||
this.javaHome = javaHome;
|
||||
}
|
||||
|
||||
@OutputDirectory
|
||||
@Internal
|
||||
public File getJarExpandDir() {
|
||||
return new File(
|
||||
new File(getProject().getBuildDir(), "precommit/thirdPartyAudit"),
|
||||
|
@ -121,6 +124,11 @@ public class ThirdPartyAuditTask extends DefaultTask {
|
|||
);
|
||||
}
|
||||
|
||||
@OutputFile
|
||||
public File getSuccessMarker() {
|
||||
return new File(getProject().getBuildDir(), "markers/" + getName());
|
||||
}
|
||||
|
||||
public void ignoreMissingClasses(String... classesOrPackages) {
|
||||
if (classesOrPackages.length == 0) {
|
||||
missingClassExcludes = null;
|
||||
|
@ -157,8 +165,7 @@ public class ThirdPartyAuditTask extends DefaultTask {
|
|||
return missingClassExcludes;
|
||||
}
|
||||
|
||||
@InputFiles
|
||||
@PathSensitive(PathSensitivity.NAME_ONLY)
|
||||
@Classpath
|
||||
@SkipWhenEmpty
|
||||
public Set<File> getJarsToScan() {
|
||||
// These are SelfResolvingDependency, and some of them backed by file collections, like the Gradle API files,
|
||||
|
@ -241,6 +248,10 @@ public class ThirdPartyAuditTask extends DefaultTask {
|
|||
}
|
||||
|
||||
assertNoJarHell(jdkJarHellClasses);
|
||||
|
||||
// Mark successful third party audit check
|
||||
getSuccessMarker().getParentFile().mkdirs();
|
||||
Files.write(getSuccessMarker().toPath(), new byte[]{});
|
||||
}
|
||||
|
||||
private void logForbiddenAPIsOutput(String forbiddenApisOutput) {
|
||||
|
|
|
@ -182,8 +182,9 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
claimsInventory.put(elasticsearchCluster, claimsInventory.getOrDefault(elasticsearchCluster, 0) + 1);
|
||||
}
|
||||
}));
|
||||
|
||||
logger.info("Claims inventory: {}", claimsInventory);
|
||||
if (claimsInventory.isEmpty() == false) {
|
||||
logger.info("Claims inventory: {}", claimsInventory);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -279,8 +280,14 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
// the clusters will look for artifacts there based on the naming conventions.
|
||||
// Tasks that use a cluster will add this as a dependency automatically so it's guaranteed to run early in
|
||||
// the build.
|
||||
Task sync = Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> {
|
||||
Boilerplate.maybeCreate(rootProject.getTasks(), SYNC_ARTIFACTS_TASK_NAME, onCreate -> {
|
||||
onCreate.getOutputs().dir(getExtractDir(rootProject));
|
||||
onCreate.getInputs().files(
|
||||
project.getRootProject().getConfigurations().matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX))
|
||||
);
|
||||
onCreate.dependsOn(project.getRootProject().getConfigurations()
|
||||
.matching(conf -> conf.getName().startsWith(HELPER_CONFIGURATION_PREFIX))
|
||||
);
|
||||
// NOTE: Gradle doesn't allow a lambda here ( fails at runtime )
|
||||
onCreate.doFirst(new Action<Task>() {
|
||||
@Override
|
||||
|
@ -290,6 +297,31 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
project.delete(getExtractDir(rootProject));
|
||||
}
|
||||
});
|
||||
onCreate.doLast(new Action<Task>() {
|
||||
@Override
|
||||
public void execute(Task task) {
|
||||
project.getRootProject().getConfigurations()
|
||||
.matching(config -> config.getName().startsWith(HELPER_CONFIGURATION_PREFIX))
|
||||
.forEach(config -> project.copy(spec ->
|
||||
config.getResolvedConfiguration()
|
||||
.getResolvedArtifacts()
|
||||
.forEach(resolvedArtifact -> {
|
||||
final FileTree files;
|
||||
File file = resolvedArtifact.getFile();
|
||||
if (file.getName().endsWith(".zip")) {
|
||||
files = project.zipTree(file);
|
||||
} else if (file.getName().endsWith("tar.gz")) {
|
||||
files = project.tarTree(file);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Can't extract " + file + " unknown file extension");
|
||||
}
|
||||
logger.info("Extracting {}@{}", resolvedArtifact, config);
|
||||
spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup()));
|
||||
spec.into(getExtractDir(project));
|
||||
}))
|
||||
);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// When the project evaluated we know of all tasks that use clusters.
|
||||
|
@ -347,29 +379,6 @@ public class TestClustersPlugin implements Plugin<Project> {
|
|||
distribution.getFileExtension());
|
||||
|
||||
}
|
||||
|
||||
sync.getInputs().files(helperConfiguration);
|
||||
// NOTE: Gradle doesn't allow a lambda here ( fails at runtime )
|
||||
sync.doLast(new Action<Task>() {
|
||||
@Override
|
||||
public void execute(Task task) {
|
||||
project.copy(spec ->
|
||||
helperConfiguration.getResolvedConfiguration().getResolvedArtifacts().forEach(resolvedArtifact -> {
|
||||
final FileTree files;
|
||||
File file = resolvedArtifact.getFile();
|
||||
if (file.getName().endsWith(".zip")) {
|
||||
files = project.zipTree(file);
|
||||
} else if (file.getName().endsWith("tar.gz")) {
|
||||
files = project.tarTree(file);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Can't extract " + file + " unknown file extension");
|
||||
}
|
||||
|
||||
spec.from(files, s -> s.into(resolvedArtifact.getModuleVersion().getId().getGroup()));
|
||||
spec.into(getExtractDir(project));
|
||||
}));
|
||||
}
|
||||
});
|
||||
})));
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
||||
/**
|
||||
* An outputstream to a File that is lazily opened on the first write.
|
||||
*/
|
||||
class LazyFileOutputStream extends OutputStream {
|
||||
private OutputStream delegate;
|
||||
|
||||
LazyFileOutputStream(File file) {
|
||||
// use an initial dummy delegate to avoid doing a conditional on every write
|
||||
this.delegate = new OutputStream() {
|
||||
private void bootstrap() throws IOException {
|
||||
file.getParentFile().mkdirs();
|
||||
delegate = new FileOutputStream(file);
|
||||
}
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
bootstrap();
|
||||
delegate.write(b);
|
||||
}
|
||||
@Override
|
||||
public void write(byte b[], int off, int len) throws IOException {
|
||||
bootstrap();
|
||||
delegate.write(b, off, len);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(int b) throws IOException {
|
||||
delegate.write(b);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte b[], int off, int len) throws IOException {
|
||||
delegate.write(b, off, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
delegate.close();
|
||||
}
|
||||
}
|
|
@ -3,14 +3,22 @@ package org.elasticsearch.gradle;
|
|||
import org.gradle.api.Action;
|
||||
import org.gradle.api.GradleException;
|
||||
import org.gradle.api.Project;
|
||||
import org.gradle.api.logging.Logger;
|
||||
import org.gradle.api.tasks.Exec;
|
||||
import org.gradle.api.tasks.Internal;
|
||||
import org.gradle.process.BaseExecSpec;
|
||||
import org.gradle.process.ExecResult;
|
||||
import org.gradle.process.ExecSpec;
|
||||
import org.gradle.process.JavaExecSpec;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
|
@ -19,37 +27,58 @@ import java.util.function.Function;
|
|||
@SuppressWarnings("unchecked")
|
||||
public class LoggedExec extends Exec {
|
||||
|
||||
private Consumer<Logger> outputLogger;
|
||||
|
||||
public LoggedExec() {
|
||||
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
||||
ByteArrayOutputStream error = new ByteArrayOutputStream();
|
||||
|
||||
if (getLogger().isInfoEnabled() == false) {
|
||||
setStandardOutput(output);
|
||||
setErrorOutput(error);
|
||||
setIgnoreExitValue(true);
|
||||
doLast((unused) -> {
|
||||
if (getExecResult().getExitValue() != 0) {
|
||||
try {
|
||||
getLogger().error("Standard output:");
|
||||
getLogger().error(output.toString("UTF-8"));
|
||||
getLogger().error("Standard error:");
|
||||
getLogger().error(error.toString("UTF-8"));
|
||||
} catch (UnsupportedEncodingException e) {
|
||||
throw new GradleException("Failed to read exec output", e);
|
||||
}
|
||||
throw new GradleException(
|
||||
String.format(
|
||||
"Process '%s %s' finished with non-zero exit value %d",
|
||||
getExecutable(),
|
||||
getArgs(),
|
||||
getExecResult().getExitValue()
|
||||
)
|
||||
);
|
||||
setSpoolOutput(false);
|
||||
doLast(task -> {
|
||||
if (getExecResult().getExitValue() != 0) {
|
||||
try {
|
||||
getLogger().error("Output for " + getExecutable() + ":");
|
||||
outputLogger.accept(getLogger());
|
||||
} catch (Exception e) {
|
||||
throw new GradleException("Failed to read exec output", e);
|
||||
}
|
||||
throw new GradleException(
|
||||
String.format(
|
||||
"Process '%s %s' finished with non-zero exit value %d",
|
||||
getExecutable(),
|
||||
getArgs(),
|
||||
getExecResult().getExitValue()
|
||||
)
|
||||
);
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Internal
|
||||
public void setSpoolOutput(boolean spoolOutput) {
|
||||
final OutputStream out;
|
||||
if (spoolOutput) {
|
||||
File spoolFile = new File(getProject().getBuildDir() + "/buffered-output/" + this.getName());
|
||||
out = new LazyFileOutputStream(spoolFile);
|
||||
outputLogger = logger -> {
|
||||
try {
|
||||
// the file may not exist if the command never output anything
|
||||
if (Files.exists(spoolFile.toPath())) {
|
||||
Files.lines(spoolFile.toPath()).forEach(logger::error);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("could not log", e);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
out = new ByteArrayOutputStream();
|
||||
outputLogger = logger -> logger.error(((ByteArrayOutputStream) out).toString(StandardCharsets.UTF_8));
|
||||
}
|
||||
setStandardOutput(out);
|
||||
setErrorOutput(out);
|
||||
}
|
||||
|
||||
public static ExecResult exec(Project project, Action<ExecSpec> action) {
|
||||
return genericExec(project, project::exec, action);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase {
|
|||
}
|
||||
|
||||
public void testCurrentExamplePlugin() throws IOException {
|
||||
FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot());
|
||||
FileUtils.copyDirectory(examplePlugin, tmpDir.getRoot(), pathname -> pathname.getPath().contains("/build/") == false);
|
||||
|
||||
adaptBuildScriptForTest();
|
||||
|
||||
|
@ -156,5 +156,4 @@ public class BuildExamplePluginsIT extends GradleIntegrationTestCase {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -41,20 +41,33 @@ import static org.hamcrest.CoreMatchers.equalTo;
|
|||
|
||||
public class JdkDownloadPluginIT extends GradleIntegrationTestCase {
|
||||
|
||||
private static final String FAKE_JDK_VERSION = "1.0.2+99";
|
||||
private static final String OLD_JDK_VERSION = "1+99";
|
||||
private static final String JDK_VERSION = "12.0.1+99@123456789123456789123456789abcde";
|
||||
private static final Pattern JDK_HOME_LOGLINE = Pattern.compile("JDK HOME: (.*)");
|
||||
private static final Pattern NUM_CONFIGS_LOGLINE = Pattern.compile("NUM CONFIGS: (.*)");
|
||||
|
||||
public void testLinuxExtraction() throws IOException {
|
||||
assertExtraction("getLinuxJdk", "linux", "bin/java");
|
||||
assertExtraction("getLinuxJdk", "linux", "bin/java", JDK_VERSION);
|
||||
}
|
||||
|
||||
public void testDarwinExtraction() throws IOException {
|
||||
assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java");
|
||||
assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", JDK_VERSION);
|
||||
}
|
||||
|
||||
public void testWindowsExtraction() throws IOException {
|
||||
assertExtraction("getWindowsJdk", "windows", "bin/java");
|
||||
assertExtraction("getWindowsJdk", "windows", "bin/java", JDK_VERSION);
|
||||
}
|
||||
|
||||
public void testLinuxExtractionOldVersion() throws IOException {
|
||||
assertExtraction("getLinuxJdk", "linux", "bin/java", OLD_JDK_VERSION);
|
||||
}
|
||||
|
||||
public void testDarwinExtractionOldVersion() throws IOException {
|
||||
assertExtraction("getDarwinJdk", "osx", "Contents/Home/bin/java", OLD_JDK_VERSION);
|
||||
}
|
||||
|
||||
public void testWindowsExtractionOldVersion() throws IOException {
|
||||
assertExtraction("getWindowsJdk", "windows", "bin/java", OLD_JDK_VERSION);
|
||||
}
|
||||
|
||||
public void testCrossProjectReuse() throws IOException {
|
||||
|
@ -62,39 +75,41 @@ public class JdkDownloadPluginIT extends GradleIntegrationTestCase {
|
|||
Matcher matcher = NUM_CONFIGS_LOGLINE.matcher(result.getOutput());
|
||||
assertTrue("could not find num configs in output: " + result.getOutput(), matcher.find());
|
||||
assertThat(Integer.parseInt(matcher.group(1)), equalTo(6)); // 3 import configs, 3 export configs
|
||||
});
|
||||
}, JDK_VERSION);
|
||||
}
|
||||
|
||||
public void assertExtraction(String taskname, String platform, String javaBin) throws IOException {
|
||||
public void assertExtraction(String taskname, String platform, String javaBin, String version) throws IOException {
|
||||
runBuild(taskname, platform, result -> {
|
||||
Matcher matcher = JDK_HOME_LOGLINE.matcher(result.getOutput());
|
||||
assertTrue("could not find jdk home in output: " + result.getOutput(), matcher.find());
|
||||
String jdkHome = matcher.group(1);
|
||||
Path javaPath = Paths.get(jdkHome, javaBin);
|
||||
assertTrue(javaPath.toString(), Files.exists(javaPath));
|
||||
});
|
||||
}, version);
|
||||
}
|
||||
|
||||
private void runBuild(String taskname, String platform, Consumer<BuildResult> assertions) throws IOException {
|
||||
private void runBuild(String taskname, String platform, Consumer<BuildResult> assertions, String version) throws IOException {
|
||||
WireMockServer wireMock = new WireMockServer(0);
|
||||
try {
|
||||
String extension = platform.equals("windows") ? "zip" : "tar.gz";
|
||||
String filename = "openjdk-1.0.2_" + platform + "-x64_bin." + extension;
|
||||
wireMock.stubFor(head(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename))
|
||||
.willReturn(aResponse().withStatus(200)));
|
||||
boolean isOld = version.equals(OLD_JDK_VERSION);
|
||||
String filename = "openjdk-" + (isOld ? "1" : "12.0.1") + "_" + platform + "-x64_bin." + extension;
|
||||
final byte[] filebytes;
|
||||
try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream(filename)) {
|
||||
try (InputStream stream = JdkDownloadPluginIT.class.getResourceAsStream("fake_openjdk_" + platform + "." + extension)) {
|
||||
filebytes = stream.readAllBytes();
|
||||
}
|
||||
wireMock.stubFor(get(urlEqualTo("/java/GA/jdk1/99/GPL/" + filename))
|
||||
.willReturn(aResponse().withStatus(200).withBody(filebytes)));
|
||||
String versionPath = isOld ? "jdk1/99" : "jdk12.0.1/123456789123456789123456789abcde/99";
|
||||
String urlPath = "/java/GA/" + versionPath + "/GPL/" + filename;
|
||||
wireMock.stubFor(head(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200)));
|
||||
wireMock.stubFor(get(urlEqualTo(urlPath)).willReturn(aResponse().withStatus(200).withBody(filebytes)));
|
||||
wireMock.start();
|
||||
|
||||
GradleRunner runner = GradleRunner.create().withProjectDir(getProjectDir("jdk-download"))
|
||||
.withArguments(taskname,
|
||||
"-Dlocal.repo.path=" + getLocalTestRepoPath(),
|
||||
"-Dtests.jdk_version=" + FAKE_JDK_VERSION,
|
||||
"-Dtests.jdk_repo=" + wireMock.baseUrl())
|
||||
"-Dtests.jdk_version=" + version,
|
||||
"-Dtests.jdk_repo=" + wireMock.baseUrl(),
|
||||
"-i")
|
||||
.withPluginClasspath();
|
||||
|
||||
BuildResult result = runner.build();
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.gradle.testclusters;
|
|||
import org.elasticsearch.gradle.test.GradleIntegrationTestCase;
|
||||
import org.gradle.testkit.runner.BuildResult;
|
||||
import org.gradle.testkit.runner.GradleRunner;
|
||||
import org.junit.Ignore;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
|
@ -81,6 +82,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
@Ignore // https://github.com/elastic/elasticsearch/issues/41256
|
||||
public void testMultiProject() {
|
||||
BuildResult result = getTestClustersRunner(
|
||||
"user1", "user2", "-s", "-i", "--parallel", "-Dlocal.repo.path=" + getLocalTestRepoPath()
|
||||
|
@ -158,6 +160,7 @@ public class TestClustersPluginIT extends GradleIntegrationTestCase {
|
|||
);
|
||||
}
|
||||
|
||||
@Ignore // https://github.com/elastic/elasticsearch/issues/41256
|
||||
public void testMultiNode() {
|
||||
BuildResult result = getTestClustersRunner(":multiNode").build();
|
||||
assertTaskSuccessful(result, ":multiNode");
|
||||
|
|
|
@ -39,25 +39,29 @@ public class PivotConfig implements ToXContentObject {
|
|||
|
||||
private static final ParseField GROUP_BY = new ParseField("group_by");
|
||||
private static final ParseField AGGREGATIONS = new ParseField("aggregations");
|
||||
private static final ParseField MAX_PAGE_SEARCH_SIZE = new ParseField("max_page_search_size");
|
||||
|
||||
private final GroupConfig groups;
|
||||
private final AggregationConfig aggregationConfig;
|
||||
private final Integer maxPageSearchSize;
|
||||
|
||||
private static final ConstructingObjectParser<PivotConfig, Void> PARSER = new ConstructingObjectParser<>("pivot_config", true,
|
||||
args -> new PivotConfig((GroupConfig) args[0], (AggregationConfig) args[1]));
|
||||
args -> new PivotConfig((GroupConfig) args[0], (AggregationConfig) args[1], (Integer) args[2]));
|
||||
|
||||
static {
|
||||
PARSER.declareObject(constructorArg(), (p, c) -> (GroupConfig.fromXContent(p)), GROUP_BY);
|
||||
PARSER.declareObject(optionalConstructorArg(), (p, c) -> AggregationConfig.fromXContent(p), AGGREGATIONS);
|
||||
PARSER.declareInt(optionalConstructorArg(), MAX_PAGE_SEARCH_SIZE);
|
||||
}
|
||||
|
||||
public static PivotConfig fromXContent(final XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
PivotConfig(GroupConfig groups, final AggregationConfig aggregationConfig) {
|
||||
PivotConfig(GroupConfig groups, final AggregationConfig aggregationConfig, Integer maxPageSearchSize) {
|
||||
this.groups = groups;
|
||||
this.aggregationConfig = aggregationConfig;
|
||||
this.maxPageSearchSize = maxPageSearchSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -65,6 +69,9 @@ public class PivotConfig implements ToXContentObject {
|
|||
builder.startObject();
|
||||
builder.field(GROUP_BY.getPreferredName(), groups);
|
||||
builder.field(AGGREGATIONS.getPreferredName(), aggregationConfig);
|
||||
if (maxPageSearchSize != null) {
|
||||
builder.field(MAX_PAGE_SEARCH_SIZE.getPreferredName(), maxPageSearchSize);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -77,6 +84,10 @@ public class PivotConfig implements ToXContentObject {
|
|||
return groups;
|
||||
}
|
||||
|
||||
public Integer getMaxPageSearchSize() {
|
||||
return maxPageSearchSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
|
@ -89,12 +100,14 @@ public class PivotConfig implements ToXContentObject {
|
|||
|
||||
final PivotConfig that = (PivotConfig) other;
|
||||
|
||||
return Objects.equals(this.groups, that.groups) && Objects.equals(this.aggregationConfig, that.aggregationConfig);
|
||||
return Objects.equals(this.groups, that.groups)
|
||||
&& Objects.equals(this.aggregationConfig, that.aggregationConfig)
|
||||
&& Objects.equals(this.maxPageSearchSize, that.maxPageSearchSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(groups, aggregationConfig);
|
||||
return Objects.hash(groups, aggregationConfig, maxPageSearchSize);
|
||||
}
|
||||
|
||||
public static Builder builder() {
|
||||
|
@ -104,6 +117,7 @@ public class PivotConfig implements ToXContentObject {
|
|||
public static class Builder {
|
||||
private GroupConfig groups;
|
||||
private AggregationConfig aggregationConfig;
|
||||
private Integer maxPageSearchSize;
|
||||
|
||||
/**
|
||||
* Set how to group the source data
|
||||
|
@ -135,8 +149,22 @@ public class PivotConfig implements ToXContentObject {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the paging maximum paging maxPageSearchSize that date frame transform can use when
|
||||
* pulling the data from the source index.
|
||||
*
|
||||
* If OOM is triggered, the paging maxPageSearchSize is dynamically reduced so that the transform can continue to gather data.
|
||||
*
|
||||
* @param maxPageSearchSize Integer value between 10 and 10_000
|
||||
* @return the {@link Builder} with the paging maxPageSearchSize set.
|
||||
*/
|
||||
public Builder setMaxPageSearchSize(Integer maxPageSearchSize) {
|
||||
this.maxPageSearchSize = maxPageSearchSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
public PivotConfig build() {
|
||||
return new PivotConfig(groups, aggregationConfig);
|
||||
return new PivotConfig(groups, aggregationConfig, maxPageSearchSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
* Field Names
|
||||
*/
|
||||
public static final ParseField MODEL_BYTES_FIELD = new ParseField("model_bytes");
|
||||
public static final ParseField MODEL_BYTES_EXCEEDED_FIELD = new ParseField("model_bytes_exceeded");
|
||||
public static final ParseField MODEL_BYTES_MEMORY_LIMIT_FIELD = new ParseField("model_bytes_memory_limit");
|
||||
public static final ParseField TOTAL_BY_FIELD_COUNT_FIELD = new ParseField("total_by_field_count");
|
||||
public static final ParseField TOTAL_OVER_FIELD_COUNT_FIELD = new ParseField("total_over_field_count");
|
||||
public static final ParseField TOTAL_PARTITION_FIELD_COUNT_FIELD = new ParseField("total_partition_field_count");
|
||||
|
@ -61,6 +63,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID);
|
||||
PARSER.declareLong(Builder::setModelBytes, MODEL_BYTES_FIELD);
|
||||
PARSER.declareLong(Builder::setModelBytesExceeded, MODEL_BYTES_EXCEEDED_FIELD);
|
||||
PARSER.declareLong(Builder::setModelBytesMemoryLimit, MODEL_BYTES_MEMORY_LIMIT_FIELD);
|
||||
PARSER.declareLong(Builder::setBucketAllocationFailuresCount, BUCKET_ALLOCATION_FAILURES_COUNT_FIELD);
|
||||
PARSER.declareLong(Builder::setTotalByFieldCount, TOTAL_BY_FIELD_COUNT_FIELD);
|
||||
PARSER.declareLong(Builder::setTotalOverFieldCount, TOTAL_OVER_FIELD_COUNT_FIELD);
|
||||
|
@ -97,6 +101,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
|
||||
private final String jobId;
|
||||
private final long modelBytes;
|
||||
private final Long modelBytesExceeded;
|
||||
private final Long modelBytesMemoryLimit;
|
||||
private final long totalByFieldCount;
|
||||
private final long totalOverFieldCount;
|
||||
private final long totalPartitionFieldCount;
|
||||
|
@ -105,11 +111,13 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
private final Date timestamp;
|
||||
private final Date logTime;
|
||||
|
||||
private ModelSizeStats(String jobId, long modelBytes, long totalByFieldCount, long totalOverFieldCount,
|
||||
long totalPartitionFieldCount, long bucketAllocationFailuresCount, MemoryStatus memoryStatus,
|
||||
Date timestamp, Date logTime) {
|
||||
private ModelSizeStats(String jobId, long modelBytes, Long modelBytesExceeded, Long modelBytesMemoryLimit, long totalByFieldCount,
|
||||
long totalOverFieldCount, long totalPartitionFieldCount, long bucketAllocationFailuresCount,
|
||||
MemoryStatus memoryStatus, Date timestamp, Date logTime) {
|
||||
this.jobId = jobId;
|
||||
this.modelBytes = modelBytes;
|
||||
this.modelBytesExceeded = modelBytesExceeded;
|
||||
this.modelBytesMemoryLimit = modelBytesMemoryLimit;
|
||||
this.totalByFieldCount = totalByFieldCount;
|
||||
this.totalOverFieldCount = totalOverFieldCount;
|
||||
this.totalPartitionFieldCount = totalPartitionFieldCount;
|
||||
|
@ -126,6 +134,12 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
builder.field(Job.ID.getPreferredName(), jobId);
|
||||
builder.field(Result.RESULT_TYPE.getPreferredName(), RESULT_TYPE_VALUE);
|
||||
builder.field(MODEL_BYTES_FIELD.getPreferredName(), modelBytes);
|
||||
if (modelBytesExceeded != null) {
|
||||
builder.field(MODEL_BYTES_EXCEEDED_FIELD.getPreferredName(), modelBytesExceeded);
|
||||
}
|
||||
if (modelBytesMemoryLimit != null) {
|
||||
builder.field(MODEL_BYTES_MEMORY_LIMIT_FIELD.getPreferredName(), modelBytesMemoryLimit);
|
||||
}
|
||||
builder.field(TOTAL_BY_FIELD_COUNT_FIELD.getPreferredName(), totalByFieldCount);
|
||||
builder.field(TOTAL_OVER_FIELD_COUNT_FIELD.getPreferredName(), totalOverFieldCount);
|
||||
builder.field(TOTAL_PARTITION_FIELD_COUNT_FIELD.getPreferredName(), totalPartitionFieldCount);
|
||||
|
@ -148,6 +162,14 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
return modelBytes;
|
||||
}
|
||||
|
||||
public Long getModelBytesExceeded() {
|
||||
return modelBytesExceeded;
|
||||
}
|
||||
|
||||
public Long getModelBytesMemoryLimit() {
|
||||
return modelBytesMemoryLimit;
|
||||
}
|
||||
|
||||
public long getTotalByFieldCount() {
|
||||
return totalByFieldCount;
|
||||
}
|
||||
|
@ -188,8 +210,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount,
|
||||
this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime);
|
||||
return Objects.hash(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount,
|
||||
totalPartitionFieldCount, this.bucketAllocationFailuresCount, memoryStatus, timestamp, logTime);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -207,7 +229,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
|
||||
ModelSizeStats that = (ModelSizeStats) other;
|
||||
|
||||
return this.modelBytes == that.modelBytes && this.totalByFieldCount == that.totalByFieldCount
|
||||
return this.modelBytes == that.modelBytes && Objects.equals(this.modelBytesExceeded, that.modelBytesExceeded)
|
||||
&& Objects.equals(this.modelBytesMemoryLimit, that.modelBytesMemoryLimit) && this.totalByFieldCount == that.totalByFieldCount
|
||||
&& this.totalOverFieldCount == that.totalOverFieldCount && this.totalPartitionFieldCount == that.totalPartitionFieldCount
|
||||
&& this.bucketAllocationFailuresCount == that.bucketAllocationFailuresCount
|
||||
&& Objects.equals(this.memoryStatus, that.memoryStatus) && Objects.equals(this.timestamp, that.timestamp)
|
||||
|
@ -219,6 +242,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
|
||||
private final String jobId;
|
||||
private long modelBytes;
|
||||
private Long modelBytesExceeded;
|
||||
private Long modelBytesMemoryLimit;
|
||||
private long totalByFieldCount;
|
||||
private long totalOverFieldCount;
|
||||
private long totalPartitionFieldCount;
|
||||
|
@ -236,6 +261,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
public Builder(ModelSizeStats modelSizeStats) {
|
||||
this.jobId = modelSizeStats.jobId;
|
||||
this.modelBytes = modelSizeStats.modelBytes;
|
||||
this.modelBytesExceeded = modelSizeStats.modelBytesExceeded;
|
||||
this.modelBytesMemoryLimit = modelSizeStats.modelBytesMemoryLimit;
|
||||
this.totalByFieldCount = modelSizeStats.totalByFieldCount;
|
||||
this.totalOverFieldCount = modelSizeStats.totalOverFieldCount;
|
||||
this.totalPartitionFieldCount = modelSizeStats.totalPartitionFieldCount;
|
||||
|
@ -250,6 +277,16 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setModelBytesExceeded(long modelBytesExceeded) {
|
||||
this.modelBytesExceeded = modelBytesExceeded;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setModelBytesMemoryLimit(long modelBytesMemoryLimit) {
|
||||
this.modelBytesMemoryLimit = modelBytesMemoryLimit;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setTotalByFieldCount(long totalByFieldCount) {
|
||||
this.totalByFieldCount = totalByFieldCount;
|
||||
return this;
|
||||
|
@ -287,8 +324,8 @@ public class ModelSizeStats implements ToXContentObject {
|
|||
}
|
||||
|
||||
public ModelSizeStats build() {
|
||||
return new ModelSizeStats(jobId, modelBytes, totalByFieldCount, totalOverFieldCount, totalPartitionFieldCount,
|
||||
bucketAllocationFailuresCount, memoryStatus, timestamp, logTime);
|
||||
return new ModelSizeStats(jobId, modelBytes, modelBytesExceeded, modelBytesMemoryLimit, totalByFieldCount, totalOverFieldCount,
|
||||
totalPartitionFieldCount, bucketAllocationFailuresCount, memoryStatus, timestamp, logTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -150,11 +150,15 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
|
||||
private void addModelSnapshotIndexRequests(BulkRequest bulkRequest) {
|
||||
{
|
||||
// Index a number of model snapshots, one of which contains the new model_size_stats fields
|
||||
// 'model_bytes_exceeded' and 'model_bytes_memory_limit' that were introduced in 7.2.0.
|
||||
// We want to verify that we can parse the snapshots whether or not these fields are present.
|
||||
IndexRequest indexRequest = new IndexRequest(RESULTS_INDEX);
|
||||
indexRequest.source("{\"job_id\":\"" + JOB_ID + "\", \"timestamp\":1541587919000, " +
|
||||
"\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", \"snapshot_id\":\"1541587919\"," +
|
||||
"\"snapshot_doc_count\":1, \"model_size_stats\":{\"job_id\":\"" + JOB_ID + "\", \"result_type\":\"model_size_stats\"," +
|
||||
"\"model_bytes\":51722, \"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," +
|
||||
"\"model_bytes\":51722, \"model_bytes_exceeded\":10762, \"model_bytes_memory_limit\":40960, \"total_by_field_count\":3, " +
|
||||
"\"total_over_field_count\":0, \"total_partition_field_count\":2," +
|
||||
"\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000," +
|
||||
" \"timestamp\":1519930800000},\"latest_record_time_stamp\":1519931700000, \"latest_result_time_stamp\":1519930800000," +
|
||||
" \"retain\":false }", XContentType.JSON);
|
||||
|
@ -223,6 +227,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -241,6 +247,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -259,6 +267,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -288,6 +298,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(2).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(2).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -306,6 +318,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -324,6 +338,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -353,6 +369,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -383,6 +401,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -402,6 +422,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -430,6 +452,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -470,6 +494,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(10762L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(40960L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -488,6 +514,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(1).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(1).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
@ -517,6 +545,8 @@ public class MachineLearningGetResultsIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(response.snapshots().get(0).getLatestResultTimeStamp(), equalTo(new Date(1519930800000L)));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getJobId(), equalTo(JOB_ID));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytes(), equalTo(51722L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesExceeded(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getModelBytesMemoryLimit(), equalTo(null));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalByFieldCount(), equalTo(3L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalOverFieldCount(), equalTo(0L));
|
||||
assertThat(response.snapshots().get(0).getModelSizeStats().getTotalPartitionFieldCount(), equalTo(2L));
|
||||
|
|
|
@ -32,7 +32,9 @@ import java.util.function.Predicate;
|
|||
public class PivotConfigTests extends AbstractXContentTestCase<PivotConfig> {
|
||||
|
||||
public static PivotConfig randomPivotConfig() {
|
||||
return new PivotConfig(GroupConfigTests.randomGroupConfig(), AggregationConfigTests.randomAggregationConfig());
|
||||
return new PivotConfig(GroupConfigTests.randomGroupConfig(),
|
||||
AggregationConfigTests.randomAggregationConfig(),
|
||||
randomBoolean() ? null : randomIntBetween(10, 10_000));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -137,8 +137,9 @@ public class DataFrameTransformDocumentationIT extends ESRestHighLevelClientTest
|
|||
// end::put-data-frame-transform-agg-config
|
||||
// tag::put-data-frame-transform-pivot-config
|
||||
PivotConfig pivotConfig = PivotConfig.builder()
|
||||
.setGroups(groupConfig)
|
||||
.setAggregationConfig(aggConfig)
|
||||
.setGroups(groupConfig) // <1>
|
||||
.setAggregationConfig(aggConfig) // <2>
|
||||
.setMaxPageSearchSize(1000) // <3>
|
||||
.build();
|
||||
// end::put-data-frame-transform-pivot-config
|
||||
// tag::put-data-frame-transform-config
|
||||
|
|
|
@ -31,6 +31,8 @@ public class ModelSizeStatsTests extends AbstractXContentTestCase<ModelSizeStats
|
|||
public void testDefaultConstructor() {
|
||||
ModelSizeStats stats = new ModelSizeStats.Builder("foo").build();
|
||||
assertEquals(0, stats.getModelBytes());
|
||||
assertNull(stats.getModelBytesExceeded());
|
||||
assertNull(stats.getModelBytesMemoryLimit());
|
||||
assertEquals(0, stats.getTotalByFieldCount());
|
||||
assertEquals(0, stats.getTotalOverFieldCount());
|
||||
assertEquals(0, stats.getTotalPartitionFieldCount());
|
||||
|
@ -67,6 +69,12 @@ public class ModelSizeStatsTests extends AbstractXContentTestCase<ModelSizeStats
|
|||
if (randomBoolean()) {
|
||||
stats.setModelBytes(randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
stats.setModelBytesExceeded(randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
stats.setModelBytesMemoryLimit(randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
stats.setTotalByFieldCount(randomNonNegativeLong());
|
||||
}
|
||||
|
|
|
@ -121,8 +121,9 @@ bwcVersions.forPreviousUnreleased { BwcVersions.UnreleasedVersionInfo unreleased
|
|||
}
|
||||
|
||||
Closure createRunBwcGradleTask = { name, extraConfig ->
|
||||
return tasks.create(name: "$name", type: Exec) {
|
||||
return tasks.create(name: "$name", type: LoggedExec) {
|
||||
dependsOn checkoutBwcBranch, writeBuildMetadata
|
||||
spoolOutput = true
|
||||
workingDir = checkoutDir
|
||||
doFirst {
|
||||
// Execution time so that the checkouts are available
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
# On RedHat,
|
||||
# $1=0 : indicates a removal
|
||||
# $1=1 : indicates an upgrade
|
||||
|
||||
REMOVE_DIRS=false
|
||||
REMOVE_USER_AND_GROUP=false
|
||||
|
||||
|
@ -55,6 +54,13 @@ if [ "$REMOVE_DIRS" = "true" ]; then
|
|||
echo " OK"
|
||||
fi
|
||||
|
||||
# plugins may have contained bin files
|
||||
if [ -d /usr/share/elasticsearch/bin ]; then
|
||||
echo -n "Deleting plugin bin directories..."
|
||||
rm -rf /usr/share/elasticsearch/bin
|
||||
echo " OK"
|
||||
fi
|
||||
|
||||
if [ -d /var/run/elasticsearch ]; then
|
||||
echo -n "Deleting PID directory..."
|
||||
rm -rf /var/run/elasticsearch
|
||||
|
|
|
@ -16,6 +16,10 @@
|
|||
|
||||
source "`dirname "$0"`"/elasticsearch-env
|
||||
|
||||
if [ -z "$ES_TMPDIR" ]; then
|
||||
ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory`
|
||||
fi
|
||||
|
||||
ES_JVM_OPTIONS="$ES_PATH_CONF"/jvm.options
|
||||
JVM_OPTIONS=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.JvmOptionsParser "$ES_JVM_OPTIONS"`
|
||||
ES_JAVA_OPTS="${JVM_OPTIONS//\$\{ES_TMPDIR\}/$ES_TMPDIR}"
|
||||
|
|
|
@ -84,8 +84,4 @@ ES_DISTRIBUTION_FLAVOR=${es.distribution.flavor}
|
|||
ES_DISTRIBUTION_TYPE=${es.distribution.type}
|
||||
ES_BUNDLED_JDK=${es.bundled_jdk}
|
||||
|
||||
if [ -z "$ES_TMPDIR" ]; then
|
||||
ES_TMPDIR=`"$JAVA" -cp "$ES_CLASSPATH" org.elasticsearch.tools.launchers.TempDirectory`
|
||||
fi
|
||||
|
||||
cd "$ES_HOME"
|
||||
|
|
|
@ -64,6 +64,3 @@ if defined JAVA_OPTS (
|
|||
rem check the Java version
|
||||
%JAVA% -cp "%ES_CLASSPATH%" "org.elasticsearch.tools.java_version_checker.JavaVersionChecker" || exit /b 1
|
||||
|
||||
if not defined ES_TMPDIR (
|
||||
for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a
|
||||
)
|
||||
|
|
|
@ -41,6 +41,10 @@ IF ERRORLEVEL 1 (
|
|||
EXIT /B %ERRORLEVEL%
|
||||
)
|
||||
|
||||
if not defined ES_TMPDIR (
|
||||
for /f "tokens=* usebackq" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.TempDirectory"`) do set ES_TMPDIR=%%a
|
||||
)
|
||||
|
||||
set ES_JVM_OPTIONS=%ES_PATH_CONF%\jvm.options
|
||||
@setlocal
|
||||
for /F "usebackq delims=" %%a in (`CALL %JAVA% -cp "!ES_CLASSPATH!" "org.elasticsearch.tools.launchers.JvmOptionsParser" "!ES_JVM_OPTIONS!" ^|^| echo jvm_options_parser_failed`) do set JVM_OPTIONS=%%a
|
||||
|
|
|
@ -64,11 +64,15 @@ final class JvmErgonomics {
|
|||
ergonomicChoices.add("-Dio.netty.allocator.type=pooled");
|
||||
}
|
||||
}
|
||||
final long maxDirectMemorySize = extractMaxDirectMemorySize(finalJvmOptions);
|
||||
if (maxDirectMemorySize == 0) {
|
||||
ergonomicChoices.add("-XX:MaxDirectMemorySize=" + heapSize / 2);
|
||||
}
|
||||
return ergonomicChoices;
|
||||
}
|
||||
|
||||
private static final Pattern OPTION =
|
||||
Pattern.compile("^\\s*\\S+\\s+(?<flag>\\S+)\\s+:?=\\s+(?<value>\\S+)?\\s+\\{[^}]+?\\}\\s+\\{[^}]+}");
|
||||
Pattern.compile("^\\s*\\S+\\s+(?<flag>\\S+)\\s+:?=\\s+(?<value>\\S+)?\\s+\\{[^}]+?\\}(\\s+\\{[^}]+})?");
|
||||
|
||||
static Map<String, Optional<String>> finalJvmOptions(
|
||||
final List<String> userDefinedJvmOptions) throws InterruptedException, IOException {
|
||||
|
@ -122,6 +126,10 @@ final class JvmErgonomics {
|
|||
return Long.parseLong(finalJvmOptions.get("MaxHeapSize").get());
|
||||
}
|
||||
|
||||
static long extractMaxDirectMemorySize(final Map<String, Optional<String>> finalJvmOptions) {
|
||||
return Long.parseLong(finalJvmOptions.get("MaxDirectMemorySize").get());
|
||||
}
|
||||
|
||||
private static final Pattern SYSTEM_PROPERTY = Pattern.compile("^-D(?<key>[\\w+].*?)=(?<value>.*)$");
|
||||
|
||||
// package private for testing
|
||||
|
|
|
@ -23,13 +23,17 @@ import java.io.IOException;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.everyItem;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
@ -71,7 +75,9 @@ public class JvmErgonomicsTests extends LaunchersTestCase {
|
|||
fail("expected starting java to fail");
|
||||
} catch (final RuntimeException e) {
|
||||
assertThat(e, hasToString(containsString(("starting java failed"))));
|
||||
assertThat(e, hasToString(containsString(("Too small maximum heap"))));
|
||||
assertThat(
|
||||
e,
|
||||
anyOf(hasToString(containsString("Too small initial heap")), hasToString(containsString("Too small maximum heap"))));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,6 +91,19 @@ public class JvmErgonomicsTests extends LaunchersTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testMaxDirectMemorySizeUnset() throws InterruptedException, IOException {
|
||||
assertThat(
|
||||
JvmErgonomics.extractMaxDirectMemorySize(JvmErgonomics.finalJvmOptions(Collections.singletonList("-Xmx1g"))),
|
||||
equalTo(0L));
|
||||
}
|
||||
|
||||
public void testMaxDirectMemorySizeSet() throws InterruptedException, IOException {
|
||||
assertThat(
|
||||
JvmErgonomics.extractMaxDirectMemorySize(JvmErgonomics.finalJvmOptions(
|
||||
Arrays.asList("-Xmx1g", "-XX:MaxDirectMemorySize=512m"))),
|
||||
equalTo(512L << 20));
|
||||
}
|
||||
|
||||
public void testExtractSystemProperties() {
|
||||
Map<String, String> expectedSystemProperties = new HashMap<>();
|
||||
expectedSystemProperties.put("file.encoding", "UTF-8");
|
||||
|
@ -101,16 +120,39 @@ public class JvmErgonomicsTests extends LaunchersTestCase {
|
|||
assertTrue(parsedSystemProperties.isEmpty());
|
||||
}
|
||||
|
||||
public void testLittleMemoryErgonomicChoices() throws InterruptedException, IOException {
|
||||
String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G"));
|
||||
List<String> expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=unpooled");
|
||||
assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)));
|
||||
public void testPooledMemoryChoiceOnSmallHeap() throws InterruptedException, IOException {
|
||||
final String smallHeap = randomFrom(Arrays.asList("64M", "512M", "1024M", "1G"));
|
||||
assertThat(
|
||||
JvmErgonomics.choose(Arrays.asList("-Xms" + smallHeap, "-Xmx" + smallHeap)),
|
||||
hasItem("-Dio.netty.allocator.type=unpooled"));
|
||||
}
|
||||
|
||||
public void testPlentyMemoryErgonomicChoices() throws InterruptedException, IOException {
|
||||
String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G"));
|
||||
List<String> expectedChoices = Collections.singletonList("-Dio.netty.allocator.type=pooled");
|
||||
assertEquals(expectedChoices, JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)));
|
||||
public void testPooledMemoryChoiceOnNotSmallHeap() throws InterruptedException, IOException {
|
||||
final String largeHeap = randomFrom(Arrays.asList("1025M", "2048M", "2G", "8G"));
|
||||
assertThat(
|
||||
JvmErgonomics.choose(Arrays.asList("-Xms" + largeHeap, "-Xmx" + largeHeap)),
|
||||
hasItem("-Dio.netty.allocator.type=pooled"));
|
||||
}
|
||||
|
||||
public void testMaxDirectMemorySizeChoice() throws InterruptedException, IOException {
|
||||
final Map<String, String> heapMaxDirectMemorySize = new HashMap<>();
|
||||
heapMaxDirectMemorySize.put("64M", Long.toString((64L << 20) / 2));
|
||||
heapMaxDirectMemorySize.put("512M", Long.toString((512L << 20) / 2));
|
||||
heapMaxDirectMemorySize.put("1024M", Long.toString((1024L << 20) / 2));
|
||||
heapMaxDirectMemorySize.put("1G", Long.toString((1L << 30) / 2));
|
||||
heapMaxDirectMemorySize.put("2048M", Long.toString((2048L << 20) / 2));
|
||||
heapMaxDirectMemorySize.put("2G", Long.toString((2L << 30) / 2));
|
||||
heapMaxDirectMemorySize.put("8G", Long.toString((8L << 30) / 2));
|
||||
final String heapSize = randomFrom(heapMaxDirectMemorySize.keySet().toArray(new String[0]));
|
||||
assertThat(
|
||||
JvmErgonomics.choose(Arrays.asList("-Xms" + heapSize, "-Xmx" + heapSize)),
|
||||
hasItem("-XX:MaxDirectMemorySize=" + heapMaxDirectMemorySize.get(heapSize)));
|
||||
}
|
||||
|
||||
public void testMaxDirectMemorySizeChoiceWhenSet() throws InterruptedException, IOException {
|
||||
assertThat(
|
||||
JvmErgonomics.choose(Arrays.asList("-Xms1g", "-Xmx1g", "-XX:MaxDirectMemorySize=1g")),
|
||||
everyItem(not(startsWith("-XX:MaxDirectMemorySize="))));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -66,6 +66,11 @@ Defines the pivot function `group by` fields and the aggregation to reduce the d
|
|||
--------------------------------------------------
|
||||
include-tagged::{doc-tests-file}[{api}-pivot-config]
|
||||
--------------------------------------------------
|
||||
<1> The `GroupConfig` to use in the pivot
|
||||
<2> The aggregations to use
|
||||
<3> The maximum paging size for the transform when pulling data
|
||||
from the source. The size dynamically adjusts as the transform
|
||||
is running to recover from and prevent OOM issues.
|
||||
|
||||
===== GroupConfig
|
||||
The grouping terms. Defines the group by and destination fields
|
||||
|
|
|
@ -1253,6 +1253,8 @@ See the <<painless-api-reference-shared, Shared API>> for a high-level overview
|
|||
* String {java11-javadoc}/java.base/java/lang/String.html#replace(java.lang.CharSequence,java.lang.CharSequence)[replace](CharSequence, CharSequence)
|
||||
* String replaceAll(Pattern, Function)
|
||||
* String replaceFirst(Pattern, Function)
|
||||
* String[] splitOnToken(String)
|
||||
* String[] splitOnToken(String, int)
|
||||
* boolean {java11-javadoc}/java.base/java/lang/String.html#startsWith(java.lang.String)[startsWith](String)
|
||||
* boolean {java11-javadoc}/java.base/java/lang/String.html#startsWith(java.lang.String,int)[startsWith](String, int)
|
||||
* CharSequence {java11-javadoc}/java.base/java/lang/CharSequence.html#subSequence(int,int)[subSequence](int, int)
|
||||
|
|
|
@ -29,7 +29,7 @@ include::install_remove.asciidoc[]
|
|||
[[analysis-icu-analyzer]]
|
||||
==== ICU Analyzer
|
||||
|
||||
Performs basic normalization, tokenization and character folding, using the
|
||||
The `icu_analyzer` analyzer performs basic normalization, tokenization and character folding, using the
|
||||
`icu_normalizer` char filter, `icu_tokenizer` and `icu_normalizer` token filter
|
||||
|
||||
The following parameters are accepted:
|
||||
|
|
|
@ -125,11 +125,6 @@ information that concern the file system:
|
|||
`fs.data.available_in_bytes`::
|
||||
Total number of bytes available to this Java virtual machine on this file store
|
||||
|
||||
`fs.data.spins` (Linux only)::
|
||||
Indicates if the file store is backed by spinning storage.
|
||||
`null` means we could not determine it, `true` means the device possibly spins
|
||||
and `false` means it does not (ex: solid-state disks).
|
||||
|
||||
`fs.io_stats.devices` (Linux only)::
|
||||
Array of disk metrics for each device that is backing an
|
||||
Elasticsearch data path. These disk metrics are probed periodically
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
[[migrate-tool]]
|
||||
== elasticsearch-migrate
|
||||
|
||||
deprecated:[7.2.0, "This tool is deprecated. Use the native realm directly."]
|
||||
|
||||
The `elasticsearch-migrate` command migrates existing file-based users and roles
|
||||
to the native realm. From 5.0 onward, you should use the `native` realm to
|
||||
manage roles and local users.
|
||||
|
|
|
@ -65,27 +65,35 @@ The API returns the following results:
|
|||
{
|
||||
"id" : "ecommerce_transform",
|
||||
"state" : {
|
||||
"task_state" : "started",
|
||||
"indexer_state" : "started",
|
||||
"task_state": "started",
|
||||
"current_position" : {
|
||||
"customer_id" : "9"
|
||||
},
|
||||
"generation" : 1
|
||||
"checkpoint" : 1,
|
||||
"progress" : {
|
||||
"total_docs" : 1220,
|
||||
"docs_remaining" : 0,
|
||||
"percent_complete" : 100.0
|
||||
}
|
||||
},
|
||||
"stats" : {
|
||||
"pages_processed" : 0,
|
||||
"documents_processed" : 0,
|
||||
"documents_indexed" : 0,
|
||||
"trigger_count" : 0,
|
||||
"index_time_in_ms" : 0,
|
||||
"index_total" : 0,
|
||||
"pages_processed" : 2,
|
||||
"documents_processed" : 1220,
|
||||
"documents_indexed" : 13,
|
||||
"trigger_count" : 1,
|
||||
"index_time_in_ms" : 19,
|
||||
"index_total" : 1,
|
||||
"index_failures" : 0,
|
||||
"search_time_in_ms" : 0,
|
||||
"search_total" : 0,
|
||||
"search_time_in_ms" : 52,
|
||||
"search_total" : 2,
|
||||
"search_failures" : 0
|
||||
},
|
||||
"checkpointing" : {
|
||||
"current" : {
|
||||
"timestamp_millis" : 1557474786393
|
||||
},
|
||||
"operations_behind" : 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
----
|
||||
// TESTRESPONSE
|
||||
// TESTRESPONSE
|
||||
|
|
|
@ -75,10 +75,20 @@ The API returns the following results:
|
|||
"transforms" : [
|
||||
{
|
||||
"id" : "ecommerce_transform",
|
||||
"source" : "kibana_sample_data_ecommerce",
|
||||
"dest" : "kibana_sample_data_ecommerce_transform",
|
||||
"query" : {
|
||||
"match_all" : { }
|
||||
"source" : {
|
||||
"index" : [
|
||||
"kibana_sample_data_ecommerce"
|
||||
],
|
||||
"query" : {
|
||||
"term" : {
|
||||
"geoip.continent_name" : {
|
||||
"value" : "Asia"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"dest" : {
|
||||
"index" : "kibana_sample_data_ecommerce_transform"
|
||||
},
|
||||
"pivot" : {
|
||||
"group_by" : {
|
||||
|
@ -95,7 +105,8 @@ The API returns the following results:
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"description" : "Maximum priced ecommerce data by customer_id in Asia"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -15,7 +15,13 @@ Instantiates a {dataframe-transform}.
|
|||
|
||||
`PUT _data_frame/transforms/<data_frame_transform_id>`
|
||||
|
||||
//===== Description
|
||||
===== Description
|
||||
|
||||
IMPORTANT: You must use {kib} or this API to create a {dataframe-transform}.
|
||||
Do not put a {dataframe-transform} directly into any
|
||||
`.data-frame-internal*` indices using the Elasticsearch index API.
|
||||
If {es} {security-features} are enabled, do not give users any
|
||||
privileges on `.data-frame-internal*` indices.
|
||||
|
||||
==== Path Parameters
|
||||
|
||||
|
@ -27,12 +33,12 @@ Instantiates a {dataframe-transform}.
|
|||
|
||||
==== Request Body
|
||||
|
||||
`source`:: (object) The source configuration, consisting of `index` and optionally
|
||||
`source` (required):: (object) The source configuration, consisting of `index` and optionally
|
||||
a `query`.
|
||||
|
||||
`dest`:: (object) The destination configuration, consisting of `index`.
|
||||
`dest` (required):: (object) The destination configuration, consisting of `index`.
|
||||
|
||||
`pivot`:: Defines the pivot function `group by` fields and the aggregation to
|
||||
`pivot`:: (object) Defines the pivot function `group by` fields and the aggregation to
|
||||
reduce the data.
|
||||
|
||||
`description`:: Optional free text description of the data frame transform
|
||||
|
|
|
@ -29,13 +29,11 @@ The data in the translog is only persisted to disk when the translog is
|
|||
++fsync++ed and committed. In the event of hardware failure, any data written
|
||||
since the previous translog commit will be lost.
|
||||
|
||||
By default, Elasticsearch ++fsync++s and commits the translog every 5 seconds
|
||||
if `index.translog.durability` is set to `async` or if set to `request`
|
||||
(default) at the end of every <<docs-index_,index>>, <<docs-delete,delete>>,
|
||||
<<docs-update,update>>, or <<docs-bulk,bulk>> request. More precisely, if set
|
||||
to `request`, Elasticsearch will only report success of an index, delete,
|
||||
By default, `index.translog.durability` is set to `request` meaning that Elasticsearch will only report success of an index, delete,
|
||||
update, or bulk request to the client after the translog has been successfully
|
||||
++fsync++ed and committed on the primary and on every allocated replica.
|
||||
++fsync++ed and committed on the primary and on every allocated replica. If
|
||||
`index.translog.durability` is set to `async` then Elasticsearch ++fsync++s
|
||||
and commits the translog every `index.translog.sync_interval` (defaults to 5 seconds).
|
||||
|
||||
The following <<indices-update-settings,dynamically updatable>> per-index
|
||||
settings control the behaviour of the translog:
|
||||
|
|
|
@ -864,6 +864,7 @@ include::processors/foreach.asciidoc[]
|
|||
include::processors/geoip.asciidoc[]
|
||||
include::processors/grok.asciidoc[]
|
||||
include::processors/gsub.asciidoc[]
|
||||
include::processors/html_strip.asciidoc[]
|
||||
include::processors/join.asciidoc[]
|
||||
include::processors/json.asciidoc[]
|
||||
include::processors/kv.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
[[htmlstrip-processor]]
|
||||
=== HTML Strip Processor
|
||||
Removes HTML from field.
|
||||
|
||||
NOTE: Each HTML tag is replaced with a `\n` character.
|
||||
|
||||
[[htmlstrip-options]]
|
||||
.HTML Strip Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `field` | yes | - | The string-valued field to remove HTML tags from
|
||||
| `target_field` | no | `field` | The field to assign the value to, by default `field` is updated in-place
|
||||
| `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document
|
||||
include::common-options.asciidoc[]
|
||||
|======
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"html_strip": {
|
||||
"field": "foo"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
|
@ -39,7 +39,7 @@ Discovery and cluster formation are also affected by the following
|
|||
_expert-level_ settings, although it is not recommended to change any of these
|
||||
from their default values.
|
||||
|
||||
[WARNING] If you adjust these settings then your cluster may not form correctly
|
||||
WARNING: If you adjust these settings then your cluster may not form correctly
|
||||
or may become unstable or intolerant of certain failures.
|
||||
|
||||
`discovery.cluster_formation_warning_timeout`::
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
The following _expert_ setting can be set to manage global search limits.
|
||||
|
||||
[[indices-query-bool-max-clause-count]]
|
||||
`indices.query.bool.max_clause_count`::
|
||||
Defaults to `1024`.
|
||||
|
||||
|
|
|
@ -21,8 +21,5 @@ GET /_search
|
|||
[[ids-query-top-level-parameters]]
|
||||
==== Top-level parameters for `ids`
|
||||
|
||||
[cols="v,v",options="header"]
|
||||
|======
|
||||
|Parameter |Description
|
||||
|`values` |An array of <<mapping-id-field, document IDs>>.
|
||||
|======
|
||||
`values`::
|
||||
An array of <<mapping-id-field, document IDs>>.
|
|
@ -1,45 +1,109 @@
|
|||
[[query-dsl-multi-term-rewrite]]
|
||||
== Multi Term Query Rewrite
|
||||
== `rewrite` Parameter
|
||||
|
||||
Multi term queries, like
|
||||
<<query-dsl-wildcard-query,wildcard>> and
|
||||
<<query-dsl-prefix-query,prefix>> are called
|
||||
multi term queries and end up going through a process of rewrite. This
|
||||
also happens on the
|
||||
<<query-dsl-query-string-query,query_string>>.
|
||||
All of those queries allow to control how they will get rewritten using
|
||||
the `rewrite` parameter:
|
||||
WARNING: This parameter is for expert users only. Changing the value of
|
||||
this parameter can impact search performance and relevance.
|
||||
|
||||
* `constant_score` (default): A rewrite method that performs like
|
||||
`constant_score_boolean` when there are few matching terms and otherwise
|
||||
visits all matching terms in sequence and marks documents for that term.
|
||||
Matching documents are assigned a constant score equal to the query's
|
||||
boost.
|
||||
* `scoring_boolean`: A rewrite method that first translates each term
|
||||
into a should clause in a boolean query, and keeps the scores as
|
||||
computed by the query. Note that typically such scores are meaningless
|
||||
to the user, and require non-trivial CPU to compute, so it's almost
|
||||
always better to use `constant_score`. This rewrite method will hit
|
||||
too many clauses failure if it exceeds the boolean query limit (defaults
|
||||
to `1024`).
|
||||
* `constant_score_boolean`: Similar to `scoring_boolean` except scores
|
||||
are not computed. Instead, each matching document receives a constant
|
||||
score equal to the query's boost. This rewrite method will hit too many
|
||||
clauses failure if it exceeds the boolean query limit (defaults to
|
||||
`1024`).
|
||||
* `top_terms_N`: A rewrite method that first translates each term into
|
||||
should clause in boolean query, and keeps the scores as computed by the
|
||||
query. This rewrite method only uses the top scoring terms so it will
|
||||
not overflow boolean max clause count. The `N` controls the size of the
|
||||
top scoring terms to use.
|
||||
* `top_terms_boost_N`: A rewrite method that first translates each term
|
||||
into should clause in boolean query, but the scores are only computed as
|
||||
the boost. This rewrite method only uses the top scoring terms so it
|
||||
will not overflow the boolean max clause count. The `N` controls the
|
||||
size of the top scoring terms to use.
|
||||
* `top_terms_blended_freqs_N`: A rewrite method that first translates each
|
||||
term into should clause in boolean query, but all term queries compute scores
|
||||
as if they had the same frequency. In practice the frequency which is used
|
||||
is the maximum frequency of all matching terms. This rewrite method only uses
|
||||
the top scoring terms so it will not overflow boolean max clause count. The
|
||||
`N` controls the size of the top scoring terms to use.
|
||||
{es} uses https://lucene.apache.org/core/[Apache Lucene] internally to power
|
||||
indexing and searching. In their original form, Lucene cannot execute the
|
||||
following queries:
|
||||
|
||||
* <<query-dsl-fuzzy-query, `fuzzy`>>
|
||||
* <<query-dsl-prefix-query, `prefix`>>
|
||||
* <<query-dsl-query-string-query, `query_string`>>
|
||||
* <<query-dsl-regexp-query, `regexp`>>
|
||||
* <<query-dsl-wildcard-query, `wildcard`>>
|
||||
|
||||
To execute them, Lucene changes these queries to a simpler form, such as a
|
||||
<<query-dsl-bool-query, `bool` query>> or a
|
||||
https://en.wikipedia.org/wiki/Bit_array[bit set].
|
||||
|
||||
The `rewrite` parameter determines:
|
||||
|
||||
* How Lucene calculates the relevance scores for each matching document
|
||||
* Whether Lucene changes the original query to a `bool`
|
||||
query or bit set
|
||||
* If changed to a `bool` query, which `term` query clauses are included
|
||||
|
||||
[float]
|
||||
[[rewrite-param-valid-values]]
|
||||
=== Valid values
|
||||
|
||||
`constant_score` (Default)::
|
||||
Uses the `constant_score_boolean` method for fewer matching terms. Otherwise,
|
||||
this method finds all matching terms in sequence and returns matching documents
|
||||
using a bit set.
|
||||
|
||||
`constant_score_boolean`::
|
||||
Assigns each document a relevance score equal to the `boost`
|
||||
parameter.
|
||||
+
|
||||
This method changes the original query to a <<query-dsl-bool-query, `bool`
|
||||
query>>. This `bool` query contains a `should` clause and
|
||||
<<query-dsl-term-query, `term` query>> for each matching term.
|
||||
+
|
||||
This method can cause the final `bool` query to exceed the clause limit in the
|
||||
<<indices-query-bool-max-clause-count, `indices.query.bool.max_clause_count`>>
|
||||
setting. If the query exceeds this limit, {es} returns an error.
|
||||
|
||||
`scoring_boolean`::
|
||||
Calculates a relevance score for each matching document.
|
||||
+
|
||||
This method changes the original query to a <<query-dsl-bool-query, `bool`
|
||||
query>>. This `bool` query contains a `should` clause and
|
||||
<<query-dsl-term-query, `term` query>> for each matching term.
|
||||
+
|
||||
This method can cause the final `bool` query to exceed the clause limit in the
|
||||
<<indices-query-bool-max-clause-count, `indices.query.bool.max_clause_count`>>
|
||||
setting. If the query exceeds this limit, {es} returns an error.
|
||||
|
||||
`top_terms_blended_freqs_N`::
|
||||
Calculates a relevance score for each matching document as if all terms had the
|
||||
same frequency. This frequency is the maximum frequency of all matching terms.
|
||||
+
|
||||
This method changes the original query to a <<query-dsl-bool-query, `bool`
|
||||
query>>. This `bool` query contains a `should` clause and
|
||||
<<query-dsl-term-query, `term` query>> for each matching term.
|
||||
+
|
||||
The final `bool` query only includes `term` queries for the top `N` scoring
|
||||
terms.
|
||||
+
|
||||
You can use this method to avoid exceeding the clause limit in the
|
||||
<<indices-query-bool-max-clause-count, `indices.query.bool.max_clause_count`>>
|
||||
setting.
|
||||
|
||||
`top_terms_boost_N`::
|
||||
Assigns each matching document a relevance score equal to the `boost` parameter.
|
||||
+
|
||||
This method changes the original query to a <<query-dsl-bool-query, `bool`
|
||||
query>>. This `bool` query contains a `should` clause and
|
||||
<<query-dsl-term-query, `term` query>> for each matching term.
|
||||
+
|
||||
The final `bool` query only includes `term` queries for the top `N` terms.
|
||||
+
|
||||
You can use this method to avoid exceeding the clause limit in the
|
||||
<<indices-query-bool-max-clause-count, `indices.query.bool.max_clause_count`>>
|
||||
setting.
|
||||
|
||||
`top_terms_N`::
|
||||
Calculates a relevance score for each matching document.
|
||||
+
|
||||
This method changes the original query to a <<query-dsl-bool-query, `bool`
|
||||
query>>. This `bool` query contains a `should` clause and
|
||||
<<query-dsl-term-query, `term` query>> for each matching term.
|
||||
+
|
||||
The final `bool` query
|
||||
only includes `term` queries for the top `N` scoring terms.
|
||||
+
|
||||
You can use this method to avoid exceeding the clause limit in the
|
||||
<<indices-query-bool-max-clause-count, `indices.query.bool.max_clause_count`>>
|
||||
setting.
|
||||
|
||||
[float]
|
||||
[[rewrite-param-perf-considerations]]
|
||||
=== Performance considerations for the `rewrite` parameter
|
||||
For most uses, we recommend using the `constant_score`,
|
||||
`constant_score_boolean`, or `top_terms_boost_N` rewrite methods.
|
||||
|
||||
Other methods calculate relevance scores. These score calculations are often
|
||||
expensive and do not improve query results.
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
This section summarizes the changes in each release.
|
||||
|
||||
* <<release-notes-7.1.0>>
|
||||
* <<release-notes-7.0.0>>
|
||||
* <<release-notes-7.0.0-rc2>>
|
||||
* <<release-notes-7.0.0-rc1>>
|
||||
|
@ -15,6 +16,7 @@ This section summarizes the changes in each release.
|
|||
|
||||
--
|
||||
|
||||
include::release-notes/7.1.0.asciidoc[]
|
||||
include::release-notes/7.0.0.asciidoc[]
|
||||
include::release-notes/7.0.0-rc2.asciidoc[]
|
||||
include::release-notes/7.0.0-rc1.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
////
|
||||
// To add a release, copy and paste the following text, uncomment the relevant
|
||||
// sections, and add a link to the new section in the list of releases in
|
||||
// ../release-notes.asciidoc. Note that release subheads must be floated and
|
||||
// sections cannot be empty.
|
||||
// TEMPLATE
|
||||
|
||||
// [[release-notes-n.n.n]]
|
||||
// == {es} version n.n.n
|
||||
|
||||
// coming[n.n.n]
|
||||
|
||||
// Also see <<breaking-changes-n.n>>.
|
||||
|
||||
// [float]
|
||||
// [[breaking-n.n.n]]
|
||||
// === Breaking Changes
|
||||
|
||||
// [float]
|
||||
// [[breaking-java-n.n.n]]
|
||||
// === Breaking Java Changes
|
||||
|
||||
// [float]
|
||||
// [[deprecation-n.n.n]]
|
||||
// === Deprecations
|
||||
|
||||
// [float]
|
||||
// [[feature-n.n.n]]
|
||||
// === New Features
|
||||
|
||||
// [float]
|
||||
// [[enhancement-n.n.n]]
|
||||
// === Enhancements
|
||||
|
||||
// [float]
|
||||
// [[bug-n.n.n]]
|
||||
// === Bug Fixes
|
||||
|
||||
// [float]
|
||||
// [[regression-n.n.n]]
|
||||
// === Regressions
|
||||
|
||||
// [float]
|
||||
// === Known Issues
|
||||
////
|
||||
|
||||
[[release-notes-7.1.0]]
|
||||
== {es} version 7.1.0
|
||||
|
||||
Also see <<breaking-changes-7.1,Breaking changes in 7.1>>.
|
||||
|
||||
coming[7.1.0]
|
|
@ -7,42 +7,48 @@ to ensure that Elasticsearch has enough heap available.
|
|||
|
||||
Elasticsearch will assign the entire heap specified in
|
||||
<<jvm-options,jvm.options>> via the `Xms` (minimum heap size) and `Xmx` (maximum
|
||||
heap size) settings.
|
||||
heap size) settings. You should set these two settings to be equal to each
|
||||
other.
|
||||
|
||||
The value for these setting depends on the amount of RAM available on your
|
||||
server. Good rules of thumb are:
|
||||
The value for these settings depends on the amount of RAM available on your
|
||||
server:
|
||||
|
||||
* Set the minimum heap size (`Xms`) and maximum heap size (`Xmx`) to be equal to
|
||||
each other.
|
||||
* Set `Xmx` and `Xms` to no more than 50% of your physical RAM. {es} requires
|
||||
memory for purposes other than the JVM heap and it is important to leave
|
||||
space for this. For instance, {es} uses off-heap buffers for efficient
|
||||
network communication, relies on the operating system's filesystem cache for
|
||||
efficient access to files, and the JVM itself requires some memory too. It is
|
||||
normal to observe the {es} process using more memory than the limit
|
||||
configured with the `Xmx` setting.
|
||||
|
||||
* The more heap available to Elasticsearch, the more memory it can use for
|
||||
caching. But note that too much heap can subject you to long garbage
|
||||
collection pauses.
|
||||
|
||||
* Set `Xmx` to no more than 50% of your physical RAM, to ensure that there is
|
||||
enough physical RAM left for kernel file system caches.
|
||||
|
||||
* Don’t set `Xmx` to above the cutoff that the JVM uses for compressed object
|
||||
pointers (compressed oops); the exact cutoff varies but is near 32 GB. You can
|
||||
verify that you are under the limit by looking for a line in the logs like the
|
||||
following:
|
||||
* Set `Xmx` and `Xms` to no more than the threshold that the JVM uses for
|
||||
compressed object pointers (compressed oops); the exact threshold varies but
|
||||
is near 32 GB. You can verify that you are under the threshold by looking for a
|
||||
line in the logs like the following:
|
||||
+
|
||||
heap size [1.9gb], compressed ordinary object pointers [true]
|
||||
|
||||
* Even better, try to stay below the threshold for zero-based compressed oops;
|
||||
the exact cutoff varies but 26 GB is safe on most systems, but can be as large
|
||||
as 30 GB on some systems. You can verify that you are under the limit by
|
||||
starting Elasticsearch with the JVM options `-XX:+UnlockDiagnosticVMOptions
|
||||
-XX:+PrintCompressedOopsMode` and looking for a line like the following:
|
||||
* Ideally set `Xmx` and `Xms` to no more than the threshold for zero-based
|
||||
compressed oops; the exact threshold varies but 26 GB is safe on most
|
||||
systems, but can be as large as 30 GB on some systems. You can verify that
|
||||
you are under this threshold by starting {es} with the JVM options
|
||||
`-XX:+UnlockDiagnosticVMOptions -XX:+PrintCompressedOopsMode` and looking for
|
||||
a line like the following:
|
||||
+
|
||||
--
|
||||
heap address: 0x000000011be00000, size: 27648 MB, zero based Compressed Oops
|
||||
|
||||
showing that zero-based compressed oops are enabled instead of
|
||||
showing that zero-based compressed oops are enabled. If zero-based compressed
|
||||
oops are not enabled then you will see a line like the following instead:
|
||||
|
||||
heap address: 0x0000000118400000, size: 28672 MB, Compressed Oops with base: 0x00000001183ff000
|
||||
--
|
||||
|
||||
The more heap available to {es}, the more memory it can use for its internal
|
||||
caches, but the less memory it leaves available for the operating system to use
|
||||
for the filesystem cache. Also, larger heaps can cause longer garbage
|
||||
collection pauses.
|
||||
|
||||
Here are examples of how to set the heap size via the jvm.options file:
|
||||
|
||||
[source,txt]
|
||||
|
@ -66,7 +72,7 @@ ES_JAVA_OPTS="-Xms4000m -Xmx4000m" ./bin/elasticsearch <2>
|
|||
<2> Set the minimum and maximum heap size to 4000 MB.
|
||||
|
||||
NOTE: Configuring the heap for the <<windows-service,Windows service>> is
|
||||
different than the above. The values initially populated for the Windows service
|
||||
can be configured as above but are different after the service has been
|
||||
different than the above. The values initially populated for the Windows
|
||||
service can be configured as above but are different after the service has been
|
||||
installed. Consult the <<windows-service,Windows service documentation>> for
|
||||
additional details.
|
||||
|
|
|
@ -0,0 +1,194 @@
|
|||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[sql-functions-geo]]
|
||||
=== Geo Functions
|
||||
|
||||
beta[]
|
||||
|
||||
The geo functions work with geometries stored in `geo_point` and `geo_shape` fields, or returned by other geo functions.
|
||||
|
||||
==== Limitations
|
||||
|
||||
Both <<geo-point, `geo_point`>> and <<geo-shape, `geo_shape`>> types are represented in SQL as geometry and can be used
|
||||
interchangeably with the following exceptions:
|
||||
|
||||
* `geo_shape` fields don't have doc values, therefore these fields cannot be used for filtering, grouping or sorting.
|
||||
|
||||
* `geo_points` fields are indexed and have doc values by default, however only latitude and longitude are stored and
|
||||
indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and
|
||||
8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed.
|
||||
Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`.
|
||||
|
||||
==== Geometry Conversion
|
||||
|
||||
[[sql-functions-geo-st-as-wkt]]
|
||||
===== `ST_AsWKT`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_AsWKT(geometry<1>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> geometry
|
||||
|
||||
*Output*: string
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the WKT representation of the `geometry`.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt]
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
[[sql-functions-geo-st-wkt-to-sql]]
|
||||
===== `ST_WKTToSQL`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_WKTToSQL(string<1>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> string WKT representation of geometry
|
||||
|
||||
*Output*: geometry
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the geometry from WKT representation.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[aswkt]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Geometry Properties
|
||||
|
||||
[[sql-functions-geo-st-geometrytype]]
|
||||
===== `ST_GeometryType`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_GeometryType(geometry<1>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> geometry
|
||||
|
||||
*Output*: string
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the type of the `geometry` such as POINT, MULTIPOINT, LINESTRING, MULTILINESTRING, POLYGON, MULTIPOLYGON, GEOMETRYCOLLECTION, ENVELOPE or CIRCLE.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[geometrytype]
|
||||
--------------------------------------------------
|
||||
|
||||
[[sql-functions-geo-st-x]]
|
||||
===== `ST_X`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_X(geometry<1>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> geometry
|
||||
|
||||
*Output*: double
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the longitude of the first point in the geometry.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[x]
|
||||
--------------------------------------------------
|
||||
|
||||
[[sql-functions-geo-st-y]]
|
||||
===== `ST_Y`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_Y(geometry<1>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> geometry
|
||||
|
||||
*Output*: double
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the the latitude of the first point in the geometry.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[y]
|
||||
--------------------------------------------------
|
||||
|
||||
[[sql-functions-geo-st-z]]
|
||||
===== `ST_Z`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_Z(geometry<1>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> geometry
|
||||
|
||||
*Output*: double
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the altitude of the first point in the geometry.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[z]
|
||||
--------------------------------------------------
|
||||
|
||||
[[sql-functions-geo-st-distance]]
|
||||
===== `ST_Distance`
|
||||
|
||||
.Synopsis:
|
||||
[source, sql]
|
||||
--------------------------------------------------
|
||||
ST_Distance(geometry<1>, geometry<2>)
|
||||
--------------------------------------------------
|
||||
|
||||
*Input*:
|
||||
|
||||
<1> source geometry
|
||||
<2> target geometry
|
||||
|
||||
*Output*: Double
|
||||
|
||||
.Description:
|
||||
|
||||
Returns the distance between geometries in meters. Both geometries have to be points.
|
||||
|
||||
["source","sql",subs="attributes,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{sql-specs}/docs/geo.csv-spec[distance]
|
||||
--------------------------------------------------
|
|
@ -136,6 +136,14 @@
|
|||
** <<sql-functions-conditional-least>>
|
||||
** <<sql-functions-conditional-nullif>>
|
||||
** <<sql-functions-conditional-nvl>>
|
||||
* <<sql-functions-geo>>
|
||||
** <<sql-functions-geo-st-as-wkt>>
|
||||
** <<sql-functions-geo-st-distance>>
|
||||
** <<sql-functions-geo-st-geometrytype>>
|
||||
** <<sql-functions-geo-st-wkt-to-sql>>
|
||||
** <<sql-functions-geo-st-x>>
|
||||
** <<sql-functions-geo-st-y>>
|
||||
** <<sql-functions-geo-st-z>>
|
||||
* <<sql-functions-system>>
|
||||
** <<sql-functions-system-database>>
|
||||
** <<sql-functions-system-user>>
|
||||
|
@ -149,5 +157,6 @@ include::search.asciidoc[]
|
|||
include::math.asciidoc[]
|
||||
include::string.asciidoc[]
|
||||
include::type-conversion.asciidoc[]
|
||||
include::geo.asciidoc[]
|
||||
include::conditional.asciidoc[]
|
||||
include::system.asciidoc[]
|
||||
|
|
|
@ -81,6 +81,8 @@ s|SQL precision
|
|||
| interval_hour_to_minute | 23
|
||||
| interval_hour_to_second | 23
|
||||
| interval_minute_to_second | 23
|
||||
| geo_point | 52
|
||||
| geo_shape | 2,147,483,647
|
||||
|
||||
|===
|
||||
|
||||
|
|
|
@ -150,3 +150,14 @@ SELECT count(*) FROM test GROUP BY MINUTE((CAST(date_created AS TIME));
|
|||
-------------------------------------------------------------
|
||||
SELECT HISTOGRAM(CAST(birth_date AS TIME), INTERVAL '10' MINUTES) as h, COUNT(*) FROM t GROUP BY h
|
||||
-------------------------------------------------------------
|
||||
|
||||
[float]
|
||||
[[geo-sql-limitations]]
|
||||
=== Geo-related functions
|
||||
|
||||
Since `geo_shape` fields don't have doc values these fields cannot be used for filtering, grouping or sorting.
|
||||
|
||||
By default,`geo_points` fields are indexed and have doc values. However only latitude and longitude are stored and
|
||||
indexed with some loss of precision from the original values (4.190951585769653E-8 for the latitude and
|
||||
8.381903171539307E-8 for longitude). The altitude component is accepted but not stored in doc values nor indexed.
|
||||
Therefore calling `ST_Z` function in the filtering, grouping or sorting will return `null`.
|
||||
|
|
|
@ -167,14 +167,22 @@ include::open-ml.asciidoc[]
|
|||
|
||||
During a rolling upgrade, the cluster continues to operate normally. However,
|
||||
any new functionality is disabled or operates in a backward compatible mode
|
||||
until all nodes in the cluster are upgraded. New functionality
|
||||
becomes operational once the upgrade is complete and all nodes are running the
|
||||
new version. Once that has happened, there's no way to return to operating
|
||||
in a backward compatible mode. Nodes running the previous major version will
|
||||
not be allowed to join the fully-updated cluster.
|
||||
until all nodes in the cluster are upgraded. New functionality becomes
|
||||
operational once the upgrade is complete and all nodes are running the new
|
||||
version. Once that has happened, there's no way to return to operating in a
|
||||
backward compatible mode. Nodes running the previous major version will not be
|
||||
allowed to join the fully-updated cluster.
|
||||
|
||||
In the unlikely case of a network malfunction during the upgrade process that
|
||||
isolates all remaining old nodes from the cluster, you must take the
|
||||
old nodes offline and upgrade them to enable them to join the cluster.
|
||||
isolates all remaining old nodes from the cluster, you must take the old nodes
|
||||
offline and upgrade them to enable them to join the cluster.
|
||||
|
||||
If you stop half or more of the master-eligible nodes all at once during the
|
||||
upgrade then the cluster will become unavailable, meaning that the upgrade is
|
||||
no longer a _rolling_ upgrade. If this happens, you should upgrade and restart
|
||||
all of the stopped master-eligible nodes to allow the cluster to form again, as
|
||||
if performing a <<restart-upgrade,full-cluster restart upgrade>>. It may also
|
||||
be necessary to upgrade all of the remaining old nodes before they can join the
|
||||
cluster after it re-forms.
|
||||
|
||||
====================================================
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.common;
|
||||
|
||||
import org.apache.lucene.analysis.charfilter.HTMLStripCharFilter;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.util.Map;
|
||||
|
||||
public final class HtmlStripProcessor extends AbstractStringProcessor<String> {
|
||||
|
||||
public static final String TYPE = "html_strip";
|
||||
|
||||
HtmlStripProcessor(String tag, String field, boolean ignoreMissing, String targetField) {
|
||||
super(tag, field, ignoreMissing, targetField);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String process(String value) {
|
||||
// shortcut, no need to create a string builder and go through each char
|
||||
if (value.contains("<") == false || value.contains(">") == false) {
|
||||
return value;
|
||||
}
|
||||
|
||||
HTMLStripCharFilter filter = new HTMLStripCharFilter(new StringReader(value));
|
||||
|
||||
StringBuilder builder = new StringBuilder();
|
||||
int ch;
|
||||
try {
|
||||
while ((ch = filter.read()) != -1) {
|
||||
builder.append((char)ch);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException(e);
|
||||
}
|
||||
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
public static final class Factory extends AbstractStringProcessor.Factory {
|
||||
|
||||
public Factory() {
|
||||
super(TYPE);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected HtmlStripProcessor newProcessor(String tag, Map<String, Object> config, String field,
|
||||
boolean ignoreMissing, String targetField) {
|
||||
return new HtmlStripProcessor(tag, field, ignoreMissing, targetField);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -87,6 +87,7 @@ public class IngestCommonPlugin extends Plugin implements ActionPlugin, IngestPl
|
|||
processors.put(PipelineProcessor.TYPE, new PipelineProcessor.Factory(parameters.ingestService));
|
||||
processors.put(DissectProcessor.TYPE, new DissectProcessor.Factory());
|
||||
processors.put(DropProcessor.TYPE, new DropProcessor.Factory());
|
||||
processors.put(HtmlStripProcessor.TYPE, new HtmlStripProcessor.Factory());
|
||||
return Collections.unmodifiableMap(processors);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.common;
|
||||
|
||||
public class HtmlStripProcessorFactoryTests extends AbstractStringProcessorFactoryTestCase {
|
||||
@Override
|
||||
protected AbstractStringProcessor.Factory newFactory() {
|
||||
return new HtmlStripProcessor.Factory();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest.common;
|
||||
|
||||
public class HtmlStripProcessorTests extends AbstractStringProcessorTestCase<String> {
|
||||
|
||||
@Override
|
||||
protected AbstractStringProcessor<String> newProcessor(String field, boolean ignoreMissing, String targetField) {
|
||||
return new HtmlStripProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String modifyInput(String input) {
|
||||
return "<p><b>test</b>" + input + "<p><b>test</b>";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String expectedResult(String input) {
|
||||
return "\ntest" + input + "\ntest";
|
||||
}
|
||||
}
|
|
@ -23,6 +23,7 @@
|
|||
- contains: { nodes.$master.ingest.processors: { type: foreach } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: grok } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: gsub } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: html_strip } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: join } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: json } }
|
||||
- contains: { nodes.$master.ingest.processors: { type: kv } }
|
||||
|
|
|
@ -76,6 +76,11 @@ teardown:
|
|||
"pattern" : "-",
|
||||
"replacement" : "."
|
||||
}
|
||||
},
|
||||
{
|
||||
"html_strip" : {
|
||||
"field" : "field_to_html_strip"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -96,7 +101,8 @@ teardown:
|
|||
"field_to_split": "127-0-0-1",
|
||||
"field_to_join": ["127","0","0","1"],
|
||||
"field_to_convert": ["127","0","0","1"],
|
||||
"field_to_gsub": "127-0-0-1"
|
||||
"field_to_gsub": "127-0-0-1",
|
||||
"field_to_html_strip": "<p>this <title>is</title> a <b>test</b>"
|
||||
}
|
||||
|
||||
- do:
|
||||
|
@ -114,6 +120,7 @@ teardown:
|
|||
- match: { _source.field_to_join: "127-0-0-1" }
|
||||
- match: { _source.field_to_convert: [127,0,0,1] }
|
||||
- match: { _source.field_to_gsub: "127.0.0.1" }
|
||||
- match: { _source.field_to_html_strip: "\nthis \nis\n a test" }
|
||||
|
||||
---
|
||||
"Test metadata":
|
||||
|
|
|
@ -103,7 +103,7 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
|
|||
|
||||
@Override
|
||||
public Response newResponse() {
|
||||
return new Response();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
public static class Request extends SingleShardRequest<Request> implements ToXContentObject {
|
||||
|
@ -388,20 +388,22 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
|
|||
|
||||
private Object result;
|
||||
|
||||
Response() {}
|
||||
|
||||
Response(Object result) {
|
||||
this.result = result;
|
||||
}
|
||||
|
||||
Response(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
result = in.readGenericValue();
|
||||
}
|
||||
|
||||
public Object getResult() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
result = in.readGenericValue();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -476,8 +478,8 @@ public class PainlessExecuteAction extends Action<PainlessExecuteAction.Response
|
|||
}
|
||||
|
||||
@Override
|
||||
protected Response newResponse() {
|
||||
return new Response();
|
||||
protected Writeable.Reader<Response> getResponseReader() {
|
||||
return Response::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -503,4 +503,53 @@ public class Augmentation {
|
|||
public static String decodeBase64(String receiver) {
|
||||
return new String(Base64.getDecoder().decode(receiver.getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8);
|
||||
}
|
||||
|
||||
/**
|
||||
* Split 'receiver' by 'token' as many times as possible..
|
||||
*/
|
||||
public static String[] splitOnToken(String receiver, String token) {
|
||||
return splitOnToken(receiver, token, -1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Split 'receiver' by 'token' up to 'limit' times. Any limit less than 1 is ignored.
|
||||
*/
|
||||
public static String[] splitOnToken(String receiver, String token, int limit) {
|
||||
// Check if it's even possible to perform a split
|
||||
if (receiver == null || receiver.length() == 0 || token == null || token.length() == 0 || receiver.length() < token.length()) {
|
||||
return new String[] { receiver };
|
||||
}
|
||||
|
||||
// List of string segments we have found
|
||||
ArrayList<String> result = new ArrayList<String>();
|
||||
|
||||
// Keep track of where we are in the string
|
||||
// indexOf(tok, startPos) is faster than creating a new search context ever loop with substring(start, end)
|
||||
int pos = 0;
|
||||
|
||||
// Loop until we hit the limit or forever if we are passed in less than one (signifying no limit)
|
||||
// If Integer.MIN_VALUE is passed in, it will still continue to loop down to 1 from MAX_VALUE
|
||||
// This edge case should be fine as we are limited by receiver length (Integer.MAX_VALUE) even if we split at every char
|
||||
for(;limit != 1; limit--) {
|
||||
|
||||
// Find the next occurrence of token after current pos
|
||||
int idx = receiver.indexOf(token, pos);
|
||||
|
||||
// Reached the end of the string without another match
|
||||
if (idx == -1) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Add the found segment to the result list
|
||||
result.add(receiver.substring(pos, idx));
|
||||
|
||||
// Move our search position to the next possible location
|
||||
pos = idx + token.length();
|
||||
}
|
||||
// Add the remaining string to the result list
|
||||
result.add(receiver.substring(pos));
|
||||
|
||||
// O(N) or faster depending on implementation
|
||||
return result.toArray(new String[0]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -758,6 +758,8 @@ class java.lang.String {
|
|||
String copyValueOf(char[],int,int)
|
||||
String org.elasticsearch.painless.api.Augmentation decodeBase64()
|
||||
String org.elasticsearch.painless.api.Augmentation encodeBase64()
|
||||
String[] org.elasticsearch.painless.api.Augmentation splitOnToken(String)
|
||||
String[] org.elasticsearch.painless.api.Augmentation splitOnToken(String, int)
|
||||
boolean endsWith(String)
|
||||
boolean equalsIgnoreCase(String)
|
||||
String format(Locale,String,def[])
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
public class AugmentationTests extends ScriptTestCase {
|
||||
|
||||
|
@ -199,4 +200,43 @@ public class AugmentationTests extends ScriptTestCase {
|
|||
assertEquals(8, exec("def ft = new org.elasticsearch.painless.FeatureTestObject();" +
|
||||
" ft.setX(3); ft.setY(2); return ft.addToTotal(3)"));
|
||||
}
|
||||
|
||||
private static class SplitCase {
|
||||
final String input;
|
||||
final String token;
|
||||
final int count;
|
||||
|
||||
SplitCase(String input, String token, int count) {
|
||||
this.input = input;
|
||||
this.token = token;
|
||||
this.count = count;
|
||||
}
|
||||
SplitCase(String input, String token) {
|
||||
this(input, token, -1);
|
||||
}
|
||||
}
|
||||
public void testString_SplitOnToken() {
|
||||
SplitCase[] cases = new SplitCase[] {
|
||||
new SplitCase("", ""),
|
||||
new SplitCase("a,b,c", ","),
|
||||
new SplitCase("a,b,c", "|"),
|
||||
new SplitCase("a,,b,,c", ","),
|
||||
new SplitCase("a,,b,,c", ",", 1),
|
||||
new SplitCase("a,,b,,c", ",", 3),
|
||||
new SplitCase("a,,b,,c", ",", 300),
|
||||
new SplitCase("a,b,c", "a,b,c,d"),
|
||||
new SplitCase("aaaaaaa", "a"),
|
||||
new SplitCase("aaaaaaa", "a", 2),
|
||||
new SplitCase("1.1.1.1.111", "1"),
|
||||
new SplitCase("1.1.1.1.111", "."),
|
||||
new SplitCase("1\n1.1.\r\n1\r\n111", "\r\n"),
|
||||
};
|
||||
for (SplitCase split : cases) {
|
||||
//System.out.println(String.format("Splitting '%s' by '%s' %d times", split.input, split.token, split.count));
|
||||
assertArrayEquals(
|
||||
split.input.split(Pattern.quote(split.token), split.count),
|
||||
(String[])exec("return \""+split.input+"\".splitOnToken(\""+split.token+"\", "+split.count+");")
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,17 +18,57 @@
|
|||
*/
|
||||
package org.elasticsearch.painless.action;
|
||||
|
||||
import org.elasticsearch.test.AbstractStreamableTestCase;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
|
||||
public class PainlessExecuteResponseTests extends AbstractStreamableTestCase<PainlessExecuteAction.Response> {
|
||||
import java.io.IOException;
|
||||
|
||||
public class PainlessExecuteResponseTests extends AbstractSerializingTestCase<PainlessExecuteAction.Response> {
|
||||
|
||||
@Override
|
||||
protected PainlessExecuteAction.Response createBlankInstance() {
|
||||
return new PainlessExecuteAction.Response();
|
||||
protected Writeable.Reader<PainlessExecuteAction.Response> instanceReader() {
|
||||
return PainlessExecuteAction.Response::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PainlessExecuteAction.Response createTestInstance() {
|
||||
return new PainlessExecuteAction.Response(randomAlphaOfLength(10));
|
||||
Object result;
|
||||
switch (randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
result = randomAlphaOfLength(10);
|
||||
break;
|
||||
case 1:
|
||||
result = randomBoolean();
|
||||
break;
|
||||
case 2:
|
||||
result = randomDoubleBetween(-10, 10, true);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("invalid branch");
|
||||
}
|
||||
return new PainlessExecuteAction.Response(result);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PainlessExecuteAction.Response doParseInstance(XContentParser parser) throws IOException {
|
||||
parser.nextToken(); // START-OBJECT
|
||||
parser.nextToken(); // FIELD-NAME
|
||||
XContentParser.Token token = parser.nextToken(); // result value
|
||||
Object result;
|
||||
switch (token) {
|
||||
case VALUE_STRING:
|
||||
result = parser.text();
|
||||
break;
|
||||
case VALUE_BOOLEAN:
|
||||
result = parser.booleanValue();
|
||||
break;
|
||||
case VALUE_NUMBER:
|
||||
result = parser.doubleValue();
|
||||
break;
|
||||
default:
|
||||
throw new IOException("invalid response");
|
||||
}
|
||||
return new PainlessExecuteAction.Response(result);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,8 +39,8 @@ public class AnnotatedTextHighlighter extends UnifiedHighlighter {
|
|||
public static final String NAME = "annotated";
|
||||
|
||||
@Override
|
||||
protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) {
|
||||
return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type, hitContext), hitContext);
|
||||
protected Analyzer getAnalyzer(DocumentMapper docMapper, HitContext hitContext) {
|
||||
return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, hitContext), hitContext);
|
||||
}
|
||||
|
||||
// Convert the marked-up values held on-disk to plain-text versions for highlighting
|
||||
|
|
|
@ -170,6 +170,9 @@ public abstract class PackageTestCase extends PackagingTestCase {
|
|||
public void test50Remove() throws Exception {
|
||||
assumeThat(installation, is(notNullValue()));
|
||||
|
||||
// add fake bin directory as if a plugin was installed
|
||||
Files.createDirectories(installation.bin.resolve("myplugin"));
|
||||
|
||||
remove(distribution());
|
||||
|
||||
// removing must stop the service
|
||||
|
|
|
@ -33,10 +33,6 @@
|
|||
"type" : "string",
|
||||
"description" : "Sets the number of shard copies that must be active before proceeding with the index operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "ID of the parent document"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "enum",
|
||||
"options": ["true", "false", "wait_for"],
|
||||
|
|
|
@ -33,10 +33,6 @@
|
|||
"type" : "string",
|
||||
"description" : "Sets the number of shard copies that must be active before proceeding with the delete operation. Defaults to 1, meaning the primary shard only. Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal to the total number of copies for the shard (number of replicas + 1)"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "ID of parent document"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "enum",
|
||||
"options": ["true", "false", "wait_for"],
|
||||
|
|
|
@ -33,10 +33,6 @@
|
|||
"type": "list",
|
||||
"description" : "A comma-separated list of stored fields to return in the response"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "The ID of the parent document"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
|
|
|
@ -30,10 +30,6 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "The ID of the parent document"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
|
|
|
@ -55,10 +55,6 @@
|
|||
"type" : "boolean",
|
||||
"description" : "Specify whether format-based query failures (such as providing text to a numeric field) should be ignored"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "The ID of the parent document"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
|
|
|
@ -33,10 +33,6 @@
|
|||
"type": "list",
|
||||
"description" : "A comma-separated list of stored fields to return in the response"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "The ID of the parent document"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
|
|
|
@ -30,10 +30,6 @@
|
|||
}
|
||||
},
|
||||
"params": {
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "The ID of the parent document"
|
||||
},
|
||||
"preference": {
|
||||
"type" : "string",
|
||||
"description" : "Specify the node or shard the operation should be performed on (default: random)"
|
||||
|
|
|
@ -43,10 +43,6 @@
|
|||
"default" : "index",
|
||||
"description" : "Explicit operation type"
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "ID of the parent document"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "enum",
|
||||
"options": ["true", "false", "wait_for"],
|
||||
|
|
|
@ -73,11 +73,6 @@
|
|||
"description" : "Specific routing value. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
|
||||
"required" : false
|
||||
},
|
||||
"parent" : {
|
||||
"type" : "string",
|
||||
"description" : "Parent id of documents. Applies to all returned documents unless otherwise specified in body \"params\" or \"docs\".",
|
||||
"required" : false
|
||||
},
|
||||
"realtime": {
|
||||
"type": "boolean",
|
||||
"description": "Specifies if requests are real-time as opposed to near-real-time (default: true).",
|
||||
|
|
|
@ -79,11 +79,6 @@
|
|||
"description" : "Specific routing value.",
|
||||
"required" : false
|
||||
},
|
||||
"parent": {
|
||||
"type" : "string",
|
||||
"description" : "Parent id of documents.",
|
||||
"required" : false
|
||||
},
|
||||
"realtime": {
|
||||
"type": "boolean",
|
||||
"description": "Specifies if request is real-time as opposed to near-real-time (default: true).",
|
||||
|
|
|
@ -49,10 +49,6 @@
|
|||
"type": "string",
|
||||
"description": "The script language (default: painless)"
|
||||
},
|
||||
"parent": {
|
||||
"type": "string",
|
||||
"description": "ID of the parent document. Is is only used for routing and when for the upsert request"
|
||||
},
|
||||
"refresh": {
|
||||
"type" : "enum",
|
||||
"options": ["true", "false", "wait_for"],
|
||||
|
|
|
@ -118,7 +118,9 @@ public abstract class BlendedTermQuery extends Query {
|
|||
// otherwise the statistics don't match
|
||||
minSumTTF = Math.min(minSumTTF, reader.getSumTotalTermFreq(terms[i].field()));
|
||||
}
|
||||
|
||||
}
|
||||
if (maxDoc > minSumTTF) {
|
||||
maxDoc = (int)minSumTTF;
|
||||
}
|
||||
if (max == 0) {
|
||||
return; // we are done that term doesn't exist at all
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.analyze;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
public class AnalyzeAction extends Action<AnalyzeResponse> {
|
||||
|
||||
|
@ -30,8 +31,13 @@ public class AnalyzeAction extends Action<AnalyzeResponse> {
|
|||
super(NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writeable.Reader<AnalyzeResponse> getResponseReader() {
|
||||
return AnalyzeResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AnalyzeResponse newResponse() {
|
||||
return new AnalyzeResponse();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -43,17 +43,14 @@ import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpect
|
|||
|
||||
public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeResponse.AnalyzeToken>, ToXContentObject {
|
||||
|
||||
public static class AnalyzeToken implements Streamable, ToXContentObject {
|
||||
private String term;
|
||||
private int startOffset;
|
||||
private int endOffset;
|
||||
private int position;
|
||||
private int positionLength = 1;
|
||||
private Map<String, Object> attributes;
|
||||
private String type;
|
||||
|
||||
AnalyzeToken() {
|
||||
}
|
||||
public static class AnalyzeToken implements Writeable, ToXContentObject {
|
||||
private final String term;
|
||||
private final int startOffset;
|
||||
private final int endOffset;
|
||||
private final int position;
|
||||
private final int positionLength;
|
||||
private final Map<String, Object> attributes;
|
||||
private final String type;
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
@ -74,7 +71,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
return Objects.hash(term, startOffset, endOffset, position, positionLength, attributes, type);
|
||||
}
|
||||
|
||||
public AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength,
|
||||
AnalyzeToken(String term, int position, int startOffset, int endOffset, int positionLength,
|
||||
String type, Map<String, Object> attributes) {
|
||||
this.term = term;
|
||||
this.position = position;
|
||||
|
@ -85,6 +82,21 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
this.attributes = attributes;
|
||||
}
|
||||
|
||||
AnalyzeToken(StreamInput in) throws IOException {
|
||||
term = in.readString();
|
||||
startOffset = in.readInt();
|
||||
endOffset = in.readInt();
|
||||
position = in.readVInt();
|
||||
Integer len = in.readOptionalVInt();
|
||||
if (len != null) {
|
||||
positionLength = len;
|
||||
} else {
|
||||
positionLength = 1;
|
||||
}
|
||||
type = in.readOptionalString();
|
||||
attributes = in.readMap();
|
||||
}
|
||||
|
||||
public String getTerm() {
|
||||
return this.term;
|
||||
}
|
||||
|
@ -134,12 +146,6 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
return builder;
|
||||
}
|
||||
|
||||
public static AnalyzeToken readAnalyzeToken(StreamInput in) throws IOException {
|
||||
AnalyzeToken analyzeToken = new AnalyzeToken();
|
||||
analyzeToken.readFrom(in);
|
||||
return analyzeToken;
|
||||
}
|
||||
|
||||
public static AnalyzeToken fromXContent(XContentParser parser) throws IOException {
|
||||
ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation);
|
||||
String field = null;
|
||||
|
@ -184,22 +190,6 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
return new AnalyzeToken(term, position, startOffset, endOffset, positionLength, type, attributes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
term = in.readString();
|
||||
startOffset = in.readInt();
|
||||
endOffset = in.readInt();
|
||||
position = in.readVInt();
|
||||
Integer len = in.readOptionalVInt();
|
||||
if (len != null) {
|
||||
positionLength = len;
|
||||
} else {
|
||||
positionLength = 1;
|
||||
}
|
||||
type = in.readOptionalString();
|
||||
attributes = in.readMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(term);
|
||||
|
@ -212,18 +202,34 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
}
|
||||
}
|
||||
|
||||
private DetailAnalyzeResponse detail;
|
||||
|
||||
private List<AnalyzeToken> tokens;
|
||||
|
||||
AnalyzeResponse() {
|
||||
}
|
||||
private final DetailAnalyzeResponse detail;
|
||||
private final List<AnalyzeToken> tokens;
|
||||
|
||||
public AnalyzeResponse(List<AnalyzeToken> tokens, DetailAnalyzeResponse detail) {
|
||||
this.tokens = tokens;
|
||||
this.detail = detail;
|
||||
}
|
||||
|
||||
public AnalyzeResponse(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
tokens = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
tokens.add(new AnalyzeToken(in));
|
||||
}
|
||||
}
|
||||
else {
|
||||
tokens = null;
|
||||
}
|
||||
detail = in.readOptionalWriteable(DetailAnalyzeResponse::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
public List<AnalyzeToken> getTokens() {
|
||||
return this.tokens;
|
||||
}
|
||||
|
@ -268,20 +274,6 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
tokens = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
tokens.add(AnalyzeToken.readAnalyzeToken(in));
|
||||
}
|
||||
if (tokens.size() == 0) {
|
||||
tokens = null;
|
||||
}
|
||||
detail = in.readOptionalStreamable(DetailAnalyzeResponse::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
@ -293,7 +285,7 @@ public class AnalyzeResponse extends ActionResponse implements Iterable<AnalyzeR
|
|||
} else {
|
||||
out.writeVInt(0);
|
||||
}
|
||||
out.writeOptionalStreamable(detail);
|
||||
out.writeOptionalWriteable(detail);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
|
@ -40,16 +40,13 @@ import java.util.Objects;
|
|||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
||||
public class DetailAnalyzeResponse implements Writeable, ToXContentFragment {
|
||||
|
||||
private boolean customAnalyzer = false;
|
||||
private AnalyzeTokenList analyzer;
|
||||
private CharFilteredText[] charfilters;
|
||||
private AnalyzeTokenList tokenizer;
|
||||
private AnalyzeTokenList[] tokenfilters;
|
||||
|
||||
DetailAnalyzeResponse() {
|
||||
}
|
||||
private final boolean customAnalyzer;
|
||||
private final AnalyzeTokenList analyzer;
|
||||
private final CharFilteredText[] charfilters;
|
||||
private final AnalyzeTokenList tokenizer;
|
||||
private final AnalyzeTokenList[] tokenfilters;
|
||||
|
||||
public DetailAnalyzeResponse(AnalyzeTokenList analyzer) {
|
||||
this(false, analyzer, null, null, null);
|
||||
|
@ -71,46 +68,55 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
this.tokenfilters = tokenfilters;
|
||||
}
|
||||
|
||||
public AnalyzeTokenList analyzer() {
|
||||
return this.analyzer;
|
||||
public DetailAnalyzeResponse(StreamInput in) throws IOException {
|
||||
this.customAnalyzer = in.readBoolean();
|
||||
if (customAnalyzer) {
|
||||
tokenizer = new AnalyzeTokenList(in);
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
charfilters = new CharFilteredText[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
charfilters[i] = new CharFilteredText(in);
|
||||
}
|
||||
}
|
||||
else {
|
||||
charfilters = null;
|
||||
}
|
||||
size = in.readVInt();
|
||||
if (size > 0) {
|
||||
tokenfilters = new AnalyzeTokenList[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
tokenfilters[i] = new AnalyzeTokenList(in);
|
||||
}
|
||||
}
|
||||
else {
|
||||
tokenfilters = null;
|
||||
}
|
||||
analyzer = null;
|
||||
} else {
|
||||
analyzer = new AnalyzeTokenList(in);
|
||||
tokenfilters = null;
|
||||
tokenizer = null;
|
||||
charfilters = null;
|
||||
}
|
||||
}
|
||||
|
||||
public DetailAnalyzeResponse analyzer(AnalyzeTokenList analyzer) {
|
||||
this.customAnalyzer = false;
|
||||
this.analyzer = analyzer;
|
||||
return this;
|
||||
public AnalyzeTokenList analyzer() {
|
||||
return this.analyzer;
|
||||
}
|
||||
|
||||
public CharFilteredText[] charfilters() {
|
||||
return this.charfilters;
|
||||
}
|
||||
|
||||
public DetailAnalyzeResponse charfilters(CharFilteredText[] charfilters) {
|
||||
this.customAnalyzer = true;
|
||||
this.charfilters = charfilters;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AnalyzeTokenList tokenizer() {
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
public DetailAnalyzeResponse tokenizer(AnalyzeTokenList tokenizer) {
|
||||
this.customAnalyzer = true;
|
||||
this.tokenizer = tokenizer;
|
||||
return this;
|
||||
}
|
||||
|
||||
public AnalyzeTokenList[] tokenfilters() {
|
||||
return tokenfilters;
|
||||
}
|
||||
|
||||
public DetailAnalyzeResponse tokenfilters(AnalyzeTokenList[] tokenfilters) {
|
||||
this.customAnalyzer = true;
|
||||
this.tokenfilters = tokenfilters;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
|
@ -201,30 +207,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
static final String TOKENFILTERS = "tokenfilters";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
this.customAnalyzer = in.readBoolean();
|
||||
if (customAnalyzer) {
|
||||
tokenizer = AnalyzeTokenList.readAnalyzeTokenList(in);
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
charfilters = new CharFilteredText[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
charfilters[i] = CharFilteredText.readCharFilteredText(in);
|
||||
}
|
||||
}
|
||||
size = in.readVInt();
|
||||
if (size > 0) {
|
||||
tokenfilters = new AnalyzeTokenList[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
tokenfilters[i] = AnalyzeTokenList.readAnalyzeTokenList(in);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
analyzer = AnalyzeTokenList.readAnalyzeTokenList(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeBoolean(customAnalyzer);
|
||||
|
@ -251,9 +233,9 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
}
|
||||
}
|
||||
|
||||
public static class AnalyzeTokenList implements Streamable, ToXContentObject {
|
||||
private String name;
|
||||
private AnalyzeResponse.AnalyzeToken[] tokens;
|
||||
public static class AnalyzeTokenList implements Writeable, ToXContentObject {
|
||||
private final String name;
|
||||
private final AnalyzeResponse.AnalyzeToken[] tokens;
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
|
@ -271,14 +253,25 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
return result;
|
||||
}
|
||||
|
||||
AnalyzeTokenList() {
|
||||
}
|
||||
|
||||
public AnalyzeTokenList(String name, AnalyzeResponse.AnalyzeToken[] tokens) {
|
||||
this.name = name;
|
||||
this.tokens = tokens;
|
||||
}
|
||||
|
||||
public AnalyzeTokenList(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
tokens = new AnalyzeResponse.AnalyzeToken[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
tokens[i] = new AnalyzeResponse.AnalyzeToken(in);
|
||||
}
|
||||
}
|
||||
else {
|
||||
tokens = null;
|
||||
}
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
@ -287,12 +280,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
return tokens;
|
||||
}
|
||||
|
||||
public static AnalyzeTokenList readAnalyzeTokenList(StreamInput in) throws IOException {
|
||||
AnalyzeTokenList list = new AnalyzeTokenList();
|
||||
list.readFrom(in);
|
||||
return list;
|
||||
}
|
||||
|
||||
XContentBuilder toXContentWithoutObject(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(Fields.NAME, this.name);
|
||||
builder.startArray(AnalyzeResponse.Fields.TOKENS);
|
||||
|
@ -327,18 +314,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
tokens = new AnalyzeResponse.AnalyzeToken[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
tokens[i] = AnalyzeResponse.AnalyzeToken.readAnalyzeToken(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
|
@ -353,12 +328,9 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
}
|
||||
}
|
||||
|
||||
public static class CharFilteredText implements Streamable, ToXContentObject {
|
||||
private String name;
|
||||
private String[] texts;
|
||||
|
||||
CharFilteredText() {
|
||||
}
|
||||
public static class CharFilteredText implements Writeable, ToXContentObject {
|
||||
private final String name;
|
||||
private final String[] texts;
|
||||
|
||||
public CharFilteredText(String name, String[] texts) {
|
||||
this.name = name;
|
||||
|
@ -369,6 +341,11 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
}
|
||||
}
|
||||
|
||||
public CharFilteredText(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
texts = in.readStringArray();
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
@ -398,18 +375,6 @@ public class DetailAnalyzeResponse implements Streamable, ToXContentFragment {
|
|||
return PARSER.parse(parser, null);
|
||||
}
|
||||
|
||||
public static CharFilteredText readCharFilteredText(StreamInput in) throws IOException {
|
||||
CharFilteredText text = new CharFilteredText();
|
||||
text.readFrom(in);
|
||||
return text;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
name = in.readString();
|
||||
texts = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(name);
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -49,9 +50,9 @@ import org.elasticsearch.index.analysis.AnalysisRegistry;
|
|||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.NormalizingCharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.NormalizingTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
|
@ -96,8 +97,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
|
||||
@Override
|
||||
protected AnalyzeResponse newResponse() {
|
||||
return new AnalyzeResponse();
|
||||
protected Writeable.Reader<AnalyzeResponse> getResponseReader() {
|
||||
return AnalyzeResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -140,14 +141,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
MappedFieldType fieldType = indexService.mapperService().fullName(request.field());
|
||||
if (fieldType != null) {
|
||||
if (fieldType.tokenized()) {
|
||||
if (fieldType.tokenized() || fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
} else if (fieldType instanceof KeywordFieldMapper.KeywordFieldType) {
|
||||
analyzer = ((KeywordFieldMapper.KeywordFieldType) fieldType).normalizer();
|
||||
if (analyzer == null) {
|
||||
// this will be KeywordAnalyzer
|
||||
analyzer = fieldType.indexAnalyzer();
|
||||
}
|
||||
} else {
|
||||
throw new IllegalArgumentException("Can't process field [" + request.field() +
|
||||
"], Analysis requests are only supported on tokenized fields");
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.mapping.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
public class GetFieldMappingsAction extends Action<GetFieldMappingsResponse> {
|
||||
|
||||
|
@ -32,6 +33,11 @@ public class GetFieldMappingsAction extends Action<GetFieldMappingsResponse> {
|
|||
|
||||
@Override
|
||||
public GetFieldMappingsResponse newResponse() {
|
||||
return new GetFieldMappingsResponse();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writeable.Reader<GetFieldMappingsResponse> getResponseReader() {
|
||||
return GetFieldMappingsResponse::new;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,9 +92,33 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
|
|||
this.mappings = mappings;
|
||||
}
|
||||
|
||||
|
||||
GetFieldMappingsResponse() {
|
||||
}
|
||||
|
||||
GetFieldMappingsResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
int size = in.readVInt();
|
||||
Map<String, Map<String, Map<String, FieldMappingMetaData>>> indexMapBuilder = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
String index = in.readString();
|
||||
int typesSize = in.readVInt();
|
||||
Map<String, Map<String, FieldMappingMetaData>> typeMapBuilder = new HashMap<>(typesSize);
|
||||
for (int j = 0; j < typesSize; j++) {
|
||||
String type = in.readString();
|
||||
int fieldSize = in.readVInt();
|
||||
Map<String, FieldMappingMetaData> fieldMapBuilder = new HashMap<>(fieldSize);
|
||||
for (int k = 0; k < fieldSize; k++) {
|
||||
fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference()));
|
||||
}
|
||||
typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder));
|
||||
}
|
||||
indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder));
|
||||
}
|
||||
mappings = unmodifiableMap(indexMapBuilder);
|
||||
|
||||
}
|
||||
|
||||
/** returns the retrieved field mapping. The return map keys are index, type, field (as specified in the request). */
|
||||
public Map<String, Map<String, Map<String, FieldMappingMetaData>>> mappings() {
|
||||
return mappings;
|
||||
|
@ -269,25 +293,7 @@ public class GetFieldMappingsResponse extends ActionResponse implements ToXConte
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
Map<String, Map<String, Map<String, FieldMappingMetaData>>> indexMapBuilder = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
String index = in.readString();
|
||||
int typesSize = in.readVInt();
|
||||
Map<String, Map<String, FieldMappingMetaData>> typeMapBuilder = new HashMap<>(typesSize);
|
||||
for (int j = 0; j < typesSize; j++) {
|
||||
String type = in.readString();
|
||||
int fieldSize = in.readVInt();
|
||||
Map<String, FieldMappingMetaData> fieldMapBuilder = new HashMap<>(fieldSize);
|
||||
for (int k = 0; k < fieldSize; k++) {
|
||||
fieldMapBuilder.put(in.readString(), new FieldMappingMetaData(in.readString(), in.readBytesReference()));
|
||||
}
|
||||
typeMapBuilder.put(type, unmodifiableMap(fieldMapBuilder));
|
||||
}
|
||||
indexMapBuilder.put(index, unmodifiableMap(typeMapBuilder));
|
||||
}
|
||||
mappings = unmodifiableMap(indexMapBuilder);
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardsIterator;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -123,8 +124,8 @@ public class TransportGetFieldMappingsIndexAction
|
|||
}
|
||||
|
||||
@Override
|
||||
protected GetFieldMappingsResponse newResponse() {
|
||||
return new GetFieldMappingsResponse();
|
||||
protected Writeable.Reader<GetFieldMappingsResponse> getResponseReader() {
|
||||
return GetFieldMappingsResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.explain;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
/**
|
||||
* Entry point for the explain feature.
|
||||
|
@ -35,6 +36,11 @@ public class ExplainAction extends Action<ExplainResponse> {
|
|||
|
||||
@Override
|
||||
public ExplainResponse newResponse() {
|
||||
return new ExplainResponse();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writeable.Reader<ExplainResponse> getResponseReader() {
|
||||
return ExplainResponse::new;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO
|
|||
private Explanation explanation;
|
||||
private GetResult getResult;
|
||||
|
||||
// TODO(talevy): remove dependency on empty constructor from ExplainResponseTests
|
||||
ExplainResponse() {
|
||||
}
|
||||
|
||||
|
@ -80,6 +81,20 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO
|
|||
this.getResult = getResult;
|
||||
}
|
||||
|
||||
public ExplainResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
exists = in.readBoolean();
|
||||
if (in.readBoolean()) {
|
||||
explanation = readExplanation(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
getResult = GetResult.readGetResult(in);
|
||||
}
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
return index;
|
||||
}
|
||||
|
@ -123,17 +138,7 @@ public class ExplainResponse extends ActionResponse implements StatusToXContentO
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
exists = in.readBoolean();
|
||||
if (in.readBoolean()) {
|
||||
explanation = readExplanation(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
getResult = GetResult.readGetResult(in);
|
||||
}
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.routing.ShardIterator;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -152,8 +153,8 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
}
|
||||
|
||||
@Override
|
||||
protected ExplainResponse newResponse() {
|
||||
return new ExplainResponse();
|
||||
protected Writeable.Reader<ExplainResponse> getResponseReader() {
|
||||
return ExplainResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.ObjectMapper;
|
||||
|
@ -114,8 +115,8 @@ public class TransportFieldCapabilitiesIndexAction extends TransportSingleShardA
|
|||
}
|
||||
|
||||
@Override
|
||||
protected FieldCapabilitiesIndexResponse newResponse() {
|
||||
return new FieldCapabilitiesIndexResponse();
|
||||
protected Writeable.Reader<FieldCapabilitiesIndexResponse> getResponseReader() {
|
||||
return FieldCapabilitiesIndexResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.get;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
public class GetAction extends Action<GetResponse> {
|
||||
|
||||
|
@ -32,6 +33,11 @@ public class GetAction extends Action<GetResponse> {
|
|||
|
||||
@Override
|
||||
public GetResponse newResponse() {
|
||||
return new GetResponse();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writeable.Reader<GetResponse> getResponseReader() {
|
||||
return GetResponse::new;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,7 +48,9 @@ public class GetResponse extends ActionResponse implements Iterable<DocumentFiel
|
|||
|
||||
GetResult getResult;
|
||||
|
||||
GetResponse() {
|
||||
GetResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
getResult = GetResult.readGetResult(in);
|
||||
}
|
||||
|
||||
public GetResponse(GetResult getResult) {
|
||||
|
@ -203,8 +205,7 @@ public class GetResponse extends ActionResponse implements Iterable<DocumentFiel
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
getResult = GetResult.readGetResult(in);
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -104,8 +104,7 @@ public class MultiGetItemResponse implements Streamable {
|
|||
if (in.readBoolean()) {
|
||||
failure = MultiGetResponse.Failure.readFailure(in);
|
||||
} else {
|
||||
response = new GetResponse();
|
||||
response.readFrom(in);
|
||||
response = new GetResponse(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -119,4 +118,4 @@ public class MultiGetItemResponse implements Streamable {
|
|||
response.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,9 +30,9 @@ import java.util.List;
|
|||
|
||||
public class MultiGetShardResponse extends ActionResponse {
|
||||
|
||||
IntArrayList locations;
|
||||
List<GetResponse> responses;
|
||||
List<MultiGetResponse.Failure> failures;
|
||||
final IntArrayList locations;
|
||||
final List<GetResponse> responses;
|
||||
final List<MultiGetResponse.Failure> failures;
|
||||
|
||||
MultiGetShardResponse() {
|
||||
locations = new IntArrayList();
|
||||
|
@ -40,6 +40,27 @@ public class MultiGetShardResponse extends ActionResponse {
|
|||
failures = new ArrayList<>();
|
||||
}
|
||||
|
||||
MultiGetShardResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
int size = in.readVInt();
|
||||
locations = new IntArrayList(size);
|
||||
responses = new ArrayList<>(size);
|
||||
failures = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
if (in.readBoolean()) {
|
||||
responses.add(new GetResponse(in));
|
||||
} else {
|
||||
responses.add(null);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
failures.add(MultiGetResponse.Failure.readFailure(in));
|
||||
} else {
|
||||
failures.add(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void add(int location, GetResponse response) {
|
||||
locations.add(location);
|
||||
responses.add(response);
|
||||
|
@ -54,26 +75,7 @@ public class MultiGetShardResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
locations = new IntArrayList(size);
|
||||
responses = new ArrayList<>(size);
|
||||
failures = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
if (in.readBoolean()) {
|
||||
GetResponse response = new GetResponse();
|
||||
response.readFrom(in);
|
||||
responses.add(response);
|
||||
} else {
|
||||
responses.add(null);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
failures.add(MultiGetResponse.Failure.readFailure(in));
|
||||
} else {
|
||||
failures.add(null);
|
||||
}
|
||||
}
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -96,4 +98,4 @@ public class MultiGetShardResponse extends ActionResponse {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -108,8 +109,8 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
}
|
||||
|
||||
@Override
|
||||
protected GetResponse newResponse() {
|
||||
return new GetResponse();
|
||||
protected Writeable.Reader<GetResponse> getResponseReader() {
|
||||
return GetResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -57,8 +58,8 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
|||
}
|
||||
|
||||
@Override
|
||||
protected MultiGetShardResponse newResponse() {
|
||||
return new MultiGetShardResponse();
|
||||
protected Writeable.Reader<MultiGetShardResponse> getResponseReader() {
|
||||
return MultiGetShardResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -302,12 +302,19 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
|||
* ensure that the same value, determined by the coordinating node, is used on all nodes involved in the execution of the search
|
||||
* request. When created through {@link #crossClusterSearch(SearchRequest, String[], String, long, boolean)}, this method returns
|
||||
* the provided current time, otherwise it will return {@link System#currentTimeMillis()}.
|
||||
*
|
||||
*/
|
||||
long getOrCreateAbsoluteStartMillis() {
|
||||
return absoluteStartMillis == DEFAULT_ABSOLUTE_START_MILLIS ? System.currentTimeMillis() : absoluteStartMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the provided <code>absoluteStartMillis</code> when created through {@link #crossClusterSearch} and
|
||||
* -1 otherwise.
|
||||
*/
|
||||
long getAbsoluteStartMillis() {
|
||||
return absoluteStartMillis;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the indices the search will be executed on.
|
||||
*/
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.cluster.routing.ShardsIterator;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -118,7 +119,8 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
}
|
||||
});
|
||||
}
|
||||
protected abstract Response newResponse();
|
||||
|
||||
protected abstract Writeable.Reader<Response> getResponseReader();
|
||||
|
||||
protected abstract boolean resolveIndex(Request request);
|
||||
|
||||
|
@ -182,13 +184,12 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
public void start() {
|
||||
if (shardIt == null) {
|
||||
// just execute it on the local node
|
||||
final Writeable.Reader<Response> reader = getResponseReader();
|
||||
transportService.sendRequest(clusterService.localNode(), transportShardAction, internalRequest.request(),
|
||||
new TransportResponseHandler<Response>() {
|
||||
@Override
|
||||
public Response read(StreamInput in) throws IOException {
|
||||
Response response = newResponse();
|
||||
response.readFrom(in);
|
||||
return response;
|
||||
return reader.read(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -251,14 +252,13 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
node
|
||||
);
|
||||
}
|
||||
final Writeable.Reader<Response> reader = getResponseReader();
|
||||
transportService.sendRequest(node, transportShardAction, internalRequest.request(),
|
||||
new TransportResponseHandler<Response>() {
|
||||
|
||||
@Override
|
||||
public Response read(StreamInput in) throws IOException {
|
||||
Response response = newResponse();
|
||||
response.readFrom(in);
|
||||
return response;
|
||||
return reader.read(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -105,8 +105,7 @@ public class MultiTermVectorsItemResponse implements Streamable {
|
|||
if (in.readBoolean()) {
|
||||
failure = MultiTermVectorsResponse.Failure.readFailure(in);
|
||||
} else {
|
||||
response = new TermVectorsResponse();
|
||||
response.readFrom(in);
|
||||
response = new TermVectorsResponse(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,4 +119,4 @@ public class MultiTermVectorsItemResponse implements Streamable {
|
|||
response.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,9 +30,9 @@ import java.util.List;
|
|||
|
||||
public class MultiTermVectorsShardResponse extends ActionResponse {
|
||||
|
||||
IntArrayList locations;
|
||||
List<TermVectorsResponse> responses;
|
||||
List<MultiTermVectorsResponse.Failure> failures;
|
||||
final IntArrayList locations;
|
||||
final List<TermVectorsResponse> responses;
|
||||
final List<MultiTermVectorsResponse.Failure> failures;
|
||||
|
||||
MultiTermVectorsShardResponse() {
|
||||
locations = new IntArrayList();
|
||||
|
@ -40,6 +40,27 @@ public class MultiTermVectorsShardResponse extends ActionResponse {
|
|||
failures = new ArrayList<>();
|
||||
}
|
||||
|
||||
MultiTermVectorsShardResponse(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
int size = in.readVInt();
|
||||
locations = new IntArrayList(size);
|
||||
responses = new ArrayList<>(size);
|
||||
failures = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
if (in.readBoolean()) {
|
||||
responses.add(new TermVectorsResponse(in));
|
||||
} else {
|
||||
responses.add(null);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
failures.add(MultiTermVectorsResponse.Failure.readFailure(in));
|
||||
} else {
|
||||
failures.add(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void add(int location, TermVectorsResponse response) {
|
||||
locations.add(location);
|
||||
responses.add(response);
|
||||
|
@ -54,26 +75,7 @@ public class MultiTermVectorsShardResponse extends ActionResponse {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
locations = new IntArrayList(size);
|
||||
responses = new ArrayList<>(size);
|
||||
failures = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
locations.add(in.readVInt());
|
||||
if (in.readBoolean()) {
|
||||
TermVectorsResponse response = new TermVectorsResponse();
|
||||
response.readFrom(in);
|
||||
responses.add(response);
|
||||
} else {
|
||||
responses.add(null);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
failures.add(MultiTermVectorsResponse.Failure.readFailure(in));
|
||||
} else {
|
||||
failures.add(null);
|
||||
}
|
||||
}
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -96,4 +98,4 @@ public class MultiTermVectorsShardResponse extends ActionResponse {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
public class TermVectorsAction extends Action<TermVectorsResponse> {
|
||||
|
||||
|
@ -32,6 +33,11 @@ public class TermVectorsAction extends Action<TermVectorsResponse> {
|
|||
|
||||
@Override
|
||||
public TermVectorsResponse newResponse() {
|
||||
return new TermVectorsResponse();
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Writeable.Reader<TermVectorsResponse> getResponseReader() {
|
||||
return TermVectorsResponse::new;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,6 +103,20 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj
|
|||
TermVectorsResponse() {
|
||||
}
|
||||
|
||||
TermVectorsResponse(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
docVersion = in.readVLong();
|
||||
exists = in.readBoolean();
|
||||
artificial = in.readBoolean();
|
||||
tookInMillis = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
headerRef = in.readBytesReference();
|
||||
termVectors = in.readBytesReference();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(index);
|
||||
|
@ -127,17 +141,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContentObj
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = in.readString();
|
||||
type = in.readString();
|
||||
id = in.readString();
|
||||
docVersion = in.readVLong();
|
||||
exists = in.readBoolean();
|
||||
artificial = in.readBoolean();
|
||||
tookInMillis = in.readVLong();
|
||||
if (in.readBoolean()) {
|
||||
headerRef = in.readBytesReference();
|
||||
termVectors = in.readBytesReference();
|
||||
}
|
||||
throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable");
|
||||
}
|
||||
|
||||
public Fields getFields() throws IOException {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
@ -58,8 +59,8 @@ public class TransportShardMultiTermsVectorAction extends
|
|||
}
|
||||
|
||||
@Override
|
||||
protected MultiTermVectorsShardResponse newResponse() {
|
||||
return new MultiTermVectorsShardResponse();
|
||||
protected Writeable.Reader<MultiTermVectorsShardResponse> getResponseReader() {
|
||||
return MultiTermVectorsShardResponse::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue