Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
b08d7c872b
|
@ -147,7 +147,7 @@ class ClusterConfiguration {
|
||||||
// map from destination path, to source file
|
// map from destination path, to source file
|
||||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||||
|
|
||||||
LinkedHashMap<String, Project> plugins = new LinkedHashMap<>()
|
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||||
|
|
||||||
List<Project> modules = new ArrayList<>()
|
List<Project> modules = new ArrayList<>()
|
||||||
|
|
||||||
|
@ -185,6 +185,11 @@ class ClusterConfiguration {
|
||||||
plugins.put(pluginProject.name, pluginProject)
|
plugins.put(pluginProject.name, pluginProject)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Input
|
||||||
|
void mavenPlugin(String name, String mavenCoords) {
|
||||||
|
plugins.put(name, mavenCoords)
|
||||||
|
}
|
||||||
|
|
||||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||||
@Input
|
@Input
|
||||||
void module(Project moduleProject) {
|
void module(Project moduleProject) {
|
||||||
|
|
|
@ -99,8 +99,8 @@ class ClusterFormationTasks {
|
||||||
// from mirrors using gradles built-in mechanism etc.
|
// from mirrors using gradles built-in mechanism etc.
|
||||||
|
|
||||||
configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion)
|
configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion)
|
||||||
for (Map.Entry<String, Project> entry : config.plugins.entrySet()) {
|
for (Map.Entry<String, Object> entry : config.plugins.entrySet()) {
|
||||||
configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(), bwcPlugins, config.bwcVersion)
|
configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion)
|
||||||
}
|
}
|
||||||
bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||||
bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||||
|
@ -150,10 +150,15 @@ class ClusterFormationTasks {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */
|
/** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */
|
||||||
static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, Version elasticsearchVersion) {
|
static void configureBwcPluginDependency(Project project, Object plugin, Configuration configuration, Version elasticsearchVersion) {
|
||||||
verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject)
|
if (plugin instanceof Project) {
|
||||||
final String pluginName = findPluginName(pluginProject)
|
Project pluginProject = (Project)plugin
|
||||||
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip")
|
verifyProjectHasBuildPlugin(configuration.name, elasticsearchVersion, project, pluginProject)
|
||||||
|
final String pluginName = findPluginName(pluginProject)
|
||||||
|
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip")
|
||||||
|
} else {
|
||||||
|
project.dependencies.add(configuration.name, "${plugin}@zip")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -210,9 +215,9 @@ class ClusterFormationTasks {
|
||||||
}
|
}
|
||||||
|
|
||||||
// install plugins
|
// install plugins
|
||||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
for (String pluginName : node.config.plugins.keySet()) {
|
||||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
String actionName = pluginTaskName('install', pluginName, 'Plugin')
|
||||||
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue(), prefix)
|
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, pluginName, prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// sets up any extra config files that need to be copied over to the ES instance;
|
// sets up any extra config files that need to be copied over to the ES instance;
|
||||||
|
@ -444,31 +449,40 @@ class ClusterFormationTasks {
|
||||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||||
|
|
||||||
List<FileCollection> pluginFiles = []
|
List<FileCollection> pluginFiles = []
|
||||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||||
|
|
||||||
Project pluginProject = plugin.getValue()
|
String configurationName = pluginConfigurationName(prefix, plugin.key)
|
||||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
|
||||||
String configurationName = pluginConfigurationName(prefix, pluginProject)
|
|
||||||
Configuration configuration = project.configurations.findByName(configurationName)
|
Configuration configuration = project.configurations.findByName(configurationName)
|
||||||
if (configuration == null) {
|
if (configuration == null) {
|
||||||
configuration = project.configurations.create(configurationName)
|
configuration = project.configurations.create(configurationName)
|
||||||
}
|
}
|
||||||
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
|
|
||||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
|
||||||
|
|
||||||
// also allow rest tests to use the rest spec from the plugin
|
if (plugin.getValue() instanceof Project) {
|
||||||
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
|
Project pluginProject = plugin.getValue()
|
||||||
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
|
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
|
||||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
|
||||||
if (restApiDir.exists() == false) continue
|
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||||
if (copyRestSpec == null) {
|
|
||||||
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
|
// also allow rest tests to use the rest spec from the plugin
|
||||||
copyPlugins.dependsOn(copyRestSpec)
|
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
|
||||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
|
||||||
|
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||||
|
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||||
|
if (restApiDir.exists() == false) continue
|
||||||
|
if (copyRestSpec == null) {
|
||||||
|
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
|
||||||
|
copyPlugins.dependsOn(copyRestSpec)
|
||||||
|
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||||
|
}
|
||||||
|
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||||
}
|
}
|
||||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
} else {
|
||||||
|
project.dependencies.add(configurationName, "${plugin.getValue()}@zip")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
pluginFiles.add(configuration)
|
pluginFiles.add(configuration)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,32 +491,37 @@ class ClusterFormationTasks {
|
||||||
return copyPlugins
|
return copyPlugins
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String pluginConfigurationName(final String prefix, final Project project) {
|
private static String pluginConfigurationName(final String prefix, final String name) {
|
||||||
return "_plugin_${prefix}_${project.path}".replace(':', '_')
|
return "_plugin_${prefix}_${name}".replace(':', '_')
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String pluginBwcConfigurationName(final String prefix, final Project project) {
|
private static String pluginBwcConfigurationName(final String prefix, final String name) {
|
||||||
return "_plugin_bwc_${prefix}_${project.path}".replace(':', '_')
|
return "_plugin_bwc_${prefix}_${name}".replace(':', '_')
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */
|
/** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */
|
||||||
static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) {
|
static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) {
|
||||||
Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins")
|
Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins")
|
||||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||||
Project pluginProject = plugin.getValue()
|
String configurationName = pluginBwcConfigurationName(prefix, plugin.key)
|
||||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
|
||||||
String configurationName = pluginBwcConfigurationName(prefix, pluginProject)
|
|
||||||
Configuration configuration = project.configurations.findByName(configurationName)
|
Configuration configuration = project.configurations.findByName(configurationName)
|
||||||
if (configuration == null) {
|
if (configuration == null) {
|
||||||
configuration = project.configurations.create(configurationName)
|
configuration = project.configurations.create(configurationName)
|
||||||
}
|
}
|
||||||
|
|
||||||
final String depName = findPluginName(pluginProject)
|
if (plugin.getValue() instanceof Project) {
|
||||||
|
Project pluginProject = plugin.getValue()
|
||||||
|
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||||
|
|
||||||
Dependency dep = bwcPlugins.dependencies.find {
|
final String depName = findPluginName(pluginProject)
|
||||||
it.name == depName
|
|
||||||
|
Dependency dep = bwcPlugins.dependencies.find {
|
||||||
|
it.name == depName
|
||||||
|
}
|
||||||
|
configuration.dependencies.add(dep)
|
||||||
|
} else {
|
||||||
|
project.dependencies.add(configurationName, "${plugin.getValue()}@zip")
|
||||||
}
|
}
|
||||||
configuration.dependencies.add(dep)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) {
|
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) {
|
||||||
|
@ -527,12 +546,12 @@ class ClusterFormationTasks {
|
||||||
return installModule
|
return installModule
|
||||||
}
|
}
|
||||||
|
|
||||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin, String prefix) {
|
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) {
|
||||||
final FileCollection pluginZip;
|
final FileCollection pluginZip;
|
||||||
if (node.nodeVersion != VersionProperties.elasticsearch) {
|
if (node.nodeVersion != VersionProperties.elasticsearch) {
|
||||||
pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, plugin))
|
pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName))
|
||||||
} else {
|
} else {
|
||||||
pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, plugin))
|
pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName))
|
||||||
}
|
}
|
||||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||||
final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||||
|
|
|
@ -163,7 +163,8 @@ analysis module. ({pull}30397[#30397])
|
||||||
[float]
|
[float]
|
||||||
=== Enhancements
|
=== Enhancements
|
||||||
|
|
||||||
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255])
|
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow
|
||||||
|
copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404])
|
||||||
|
|
||||||
Added new "Request" object flavored request methods in the RestClient. Prefer
|
Added new "Request" object flavored request methods in the RestClient. Prefer
|
||||||
these instead of the multi-argument versions. ({pull}29623[#29623])
|
these instead of the multi-argument versions. ({pull}29623[#29623])
|
||||||
|
|
|
@ -62,11 +62,20 @@ the following request:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST my_source_index/_shrink/my_target_index
|
POST my_source_index/_shrink/my_target_index?copy_settings=true
|
||||||
|
{
|
||||||
|
"settings": {
|
||||||
|
"index.routing.allocation.require._name": null, <1>
|
||||||
|
"index.blocks.write": null <2>
|
||||||
|
}
|
||||||
|
}
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
// CONSOLE
|
// CONSOLE
|
||||||
// TEST[continued]
|
// TEST[continued]
|
||||||
|
|
||||||
|
<1> Clear the allocation requirement copied from the source index.
|
||||||
|
<2> Clear the index write block copied from the source index.
|
||||||
|
|
||||||
The above request returns immediately once the target index has been added to
|
The above request returns immediately once the target index has been added to
|
||||||
the cluster state -- it doesn't wait for the shrink operation to start.
|
the cluster state -- it doesn't wait for the shrink operation to start.
|
||||||
|
|
||||||
|
@ -97,7 +106,7 @@ and accepts `settings` and `aliases` parameters for the target index:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST my_source_index/_shrink/my_target_index
|
POST my_source_index/_shrink/my_target_index?copy_settings=true
|
||||||
{
|
{
|
||||||
"settings": {
|
"settings": {
|
||||||
"index.number_of_replicas": 1,
|
"index.number_of_replicas": 1,
|
||||||
|
@ -125,9 +134,11 @@ NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
|
||||||
and `index.sort` settings, index settings on the source index are not copied
|
and `index.sort` settings, index settings on the source index are not copied
|
||||||
during a shrink operation. With the exception of non-copyable settings, settings
|
during a shrink operation. With the exception of non-copyable settings, settings
|
||||||
from the source index can be copied to the target index by adding the URL
|
from the source index can be copied to the target index by adding the URL
|
||||||
parameter `copy_settings=true` to the request.
|
parameter `copy_settings=true` to the request. Note that `copy_settings` can not
|
||||||
|
be set to `false`. The parameter `copy_settings` will be removed in 8.0.0
|
||||||
|
|
||||||
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
|
deprecated[6.4.0, not copying settings is deprecated, copying settings will be
|
||||||
|
the default behavior in 7.x]
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Monitoring the shrink process
|
=== Monitoring the shrink process
|
||||||
|
|
|
@ -123,7 +123,7 @@ the following request:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST my_source_index/_split/my_target_index
|
POST my_source_index/_split/my_target_index?copy_settings=true
|
||||||
{
|
{
|
||||||
"settings": {
|
"settings": {
|
||||||
"index.number_of_shards": 2
|
"index.number_of_shards": 2
|
||||||
|
@ -158,7 +158,7 @@ and accepts `settings` and `aliases` parameters for the target index:
|
||||||
|
|
||||||
[source,js]
|
[source,js]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
POST my_source_index/_split/my_target_index
|
POST my_source_index/_split/my_target_index?copy_settings=true
|
||||||
{
|
{
|
||||||
"settings": {
|
"settings": {
|
||||||
"index.number_of_shards": 5 <1>
|
"index.number_of_shards": 5 <1>
|
||||||
|
@ -181,9 +181,11 @@ NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
|
||||||
and `index.sort` settings, index settings on the source index are not copied
|
and `index.sort` settings, index settings on the source index are not copied
|
||||||
during a split operation. With the exception of non-copyable settings, settings
|
during a split operation. With the exception of non-copyable settings, settings
|
||||||
from the source index can be copied to the target index by adding the URL
|
from the source index can be copied to the target index by adding the URL
|
||||||
parameter `copy_settings=true` to the request.
|
parameter `copy_settings=true` to the request. Note that `copy_settings` can not
|
||||||
|
be set to `false`. The parameter `copy_settings` will be removed in 8.0.0
|
||||||
|
|
||||||
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
|
deprecated[6.4.0, not copying settings is deprecated, copying settings will be
|
||||||
|
the default behavior in 7.x]
|
||||||
|
|
||||||
[float]
|
[float]
|
||||||
=== Monitoring the split process
|
=== Monitoring the split process
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
org.gradle.daemon=false
|
org.gradle.daemon=false
|
||||||
org.gradle.jvmargs=-Xmx1792m
|
org.gradle.jvmargs=-Xmx2g
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
||||||
|
@ -25,6 +25,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory for {@link ClassicTokenizer}
|
* Factory for {@link ClassicTokenizer}
|
||||||
|
@ -33,7 +34,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
private final int maxTokenLength;
|
private final int maxTokenLength;
|
||||||
|
|
||||||
public ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||||
}
|
}
|
|
@ -34,9 +34,11 @@ import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
|
||||||
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
||||||
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
||||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||||
|
import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||||
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
||||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||||
import org.apache.lucene.analysis.core.UpperCaseFilter;
|
import org.apache.lucene.analysis.core.UpperCaseFilter;
|
||||||
|
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||||
import org.apache.lucene.analysis.cz.CzechStemFilter;
|
import org.apache.lucene.analysis.cz.CzechStemFilter;
|
||||||
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
|
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
|
||||||
import org.apache.lucene.analysis.de.GermanStemFilter;
|
import org.apache.lucene.analysis.de.GermanStemFilter;
|
||||||
|
@ -58,17 +60,25 @@ import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||||
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||||
|
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||||
|
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||||
|
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||||
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
|
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
|
||||||
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
||||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||||
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
||||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||||
import org.apache.lucene.analysis.standard.ClassicFilter;
|
import org.apache.lucene.analysis.standard.ClassicFilter;
|
||||||
|
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
||||||
|
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||||
|
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||||
import org.apache.lucene.analysis.tr.ApostropheFilter;
|
import org.apache.lucene.analysis.tr.ApostropheFilter;
|
||||||
import org.apache.lucene.analysis.util.ElisionFilter;
|
import org.apache.lucene.analysis.util.ElisionFilter;
|
||||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
import org.elasticsearch.common.logging.Loggers;
|
||||||
|
import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||||
|
@ -169,6 +179,19 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
||||||
Map<String, AnalysisProvider<TokenizerFactory>> tokenizers = new TreeMap<>();
|
Map<String, AnalysisProvider<TokenizerFactory>> tokenizers = new TreeMap<>();
|
||||||
tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new);
|
tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new);
|
||||||
tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new);
|
tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new);
|
||||||
|
tokenizers.put("thai", ThaiTokenizerFactory::new);
|
||||||
|
tokenizers.put("nGram", NGramTokenizerFactory::new);
|
||||||
|
tokenizers.put("ngram", NGramTokenizerFactory::new);
|
||||||
|
tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new);
|
||||||
|
tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new);
|
||||||
|
tokenizers.put("classic", ClassicTokenizerFactory::new);
|
||||||
|
tokenizers.put("letter", LetterTokenizerFactory::new);
|
||||||
|
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
|
||||||
|
tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new);
|
||||||
|
tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new);
|
||||||
|
tokenizers.put("pattern", PatternTokenizerFactory::new);
|
||||||
|
tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new);
|
||||||
|
tokenizers.put("whitespace", WhitespaceTokenizerFactory::new);
|
||||||
return tokenizers;
|
return tokenizers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,6 +306,16 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
||||||
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
|
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
|
||||||
List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
|
List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
|
tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
|
||||||
|
() -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null));
|
||||||
tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() {
|
tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() {
|
||||||
@Override
|
@Override
|
||||||
public String name() {
|
public String name() {
|
||||||
|
@ -294,6 +327,13 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
||||||
return new LowerCaseFilter(tokenStream);
|
return new LowerCaseFilter(tokenStream);
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
// Temporary shim for aliases. TODO deprecate after they are moved
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new, null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("edgeNGram",
|
||||||
|
() -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
|
||||||
|
tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null));
|
||||||
|
|
||||||
return tokenizers;
|
return tokenizers;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||||
|
@ -25,19 +25,17 @@ import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars;
|
import static org.elasticsearch.analysis.common.NGramTokenizerFactory.parseTokenChars;
|
||||||
|
|
||||||
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
private final int minGram;
|
private final int minGram;
|
||||||
|
|
||||||
private final int maxGram;
|
private final int maxGram;
|
||||||
|
|
||||||
private final CharMatcher matcher;
|
private final CharMatcher matcher;
|
||||||
|
|
||||||
|
EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
|
@ -17,17 +17,18 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.core.LetterTokenizer;
|
import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
|
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
public LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,17 +17,19 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||||
|
|
||||||
public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent {
|
public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent {
|
||||||
|
|
||||||
public LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
import java.lang.reflect.Field;
|
||||||
import java.lang.reflect.Modifier;
|
import java.lang.reflect.Modifier;
|
||||||
|
@ -83,7 +84,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
|
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
|
||||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||||
|
@ -25,6 +25,7 @@ import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
|
@ -35,7 +36,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
private final int skip;
|
private final int skip;
|
||||||
private final boolean reverse;
|
private final boolean reverse;
|
||||||
|
|
||||||
public PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
bufferSize = settings.getAsInt("buffer_size", 1024);
|
bufferSize = settings.getAsInt("buffer_size", 1024);
|
||||||
String delimiter = settings.get("delimiter");
|
String delimiter = settings.get("delimiter");
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.regex.Regex;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
@ -33,7 +34,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
private final Pattern pattern;
|
private final Pattern pattern;
|
||||||
private final int group;
|
private final int group;
|
||||||
|
|
||||||
public PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
|
|
||||||
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
|
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
|
|
@ -17,20 +17,21 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory for {@link ThaiTokenizer}
|
* Factory for {@link ThaiTokenizer}
|
||||||
*/
|
*/
|
||||||
public class ThaiTokenizerFactory extends AbstractTokenizerFactory {
|
public class ThaiTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
public ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
|
@ -25,12 +25,13 @@ import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
|
public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
private final int maxTokenLength;
|
private final int maxTokenLength;
|
||||||
|
|
||||||
public UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||||
}
|
}
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
|
@ -26,13 +26,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory {
|
public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
static final String MAX_TOKEN_LENGTH = "max_token_length";
|
static final String MAX_TOKEN_LENGTH = "max_token_length";
|
||||||
private Integer maxTokenLength;
|
private Integer maxTokenLength;
|
||||||
|
|
||||||
public WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||||
super(indexSettings, name, settings);
|
super(indexSettings, name, settings);
|
||||||
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||||
}
|
}
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.en.PorterStemFilterFactory;
|
||||||
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory;
|
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory;
|
||||||
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
||||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||||
|
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
||||||
import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory;
|
import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
||||||
import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase;
|
import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase;
|
||||||
|
@ -45,6 +46,16 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
|
||||||
Map<String, Class<?>> tokenizers = new TreeMap<>(super.getTokenizers());
|
Map<String, Class<?>> tokenizers = new TreeMap<>(super.getTokenizers());
|
||||||
tokenizers.put("simplepattern", SimplePatternTokenizerFactory.class);
|
tokenizers.put("simplepattern", SimplePatternTokenizerFactory.class);
|
||||||
tokenizers.put("simplepatternsplit", SimplePatternSplitTokenizerFactory.class);
|
tokenizers.put("simplepatternsplit", SimplePatternSplitTokenizerFactory.class);
|
||||||
|
tokenizers.put("thai", ThaiTokenizerFactory.class);
|
||||||
|
tokenizers.put("ngram", NGramTokenizerFactory.class);
|
||||||
|
tokenizers.put("edgengram", EdgeNGramTokenizerFactory.class);
|
||||||
|
tokenizers.put("classic", ClassicTokenizerFactory.class);
|
||||||
|
tokenizers.put("letter", LetterTokenizerFactory.class);
|
||||||
|
tokenizers.put("lowercase", LowerCaseTokenizerFactory.class);
|
||||||
|
tokenizers.put("pathhierarchy", PathHierarchyTokenizerFactory.class);
|
||||||
|
tokenizers.put("pattern", PatternTokenizerFactory.class);
|
||||||
|
tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class);
|
||||||
|
tokenizers.put("whitespace", WhitespaceTokenizerFactory.class);
|
||||||
return tokenizers;
|
return tokenizers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -211,10 +222,25 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected Map<String, Class<?>> getPreConfiguredTokenizers() {
|
protected Map<String, Class<?>> getPreConfiguredTokenizers() {
|
||||||
Map<String, Class<?>> filters = new TreeMap<>(super.getPreConfiguredTokenizers());
|
Map<String, Class<?>> tokenizers = new TreeMap<>(super.getPreConfiguredTokenizers());
|
||||||
filters.put("keyword", null);
|
tokenizers.put("keyword", null);
|
||||||
filters.put("lowercase", null);
|
tokenizers.put("lowercase", null);
|
||||||
return filters;
|
tokenizers.put("classic", null);
|
||||||
|
tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class);
|
||||||
|
tokenizers.put("path_hierarchy", null);
|
||||||
|
tokenizers.put("letter", null);
|
||||||
|
tokenizers.put("whitespace", null);
|
||||||
|
tokenizers.put("ngram", null);
|
||||||
|
tokenizers.put("edge_ngram", null);
|
||||||
|
tokenizers.put("pattern", null);
|
||||||
|
tokenizers.put("thai", null);
|
||||||
|
|
||||||
|
// TODO drop aliases once they are moved to module
|
||||||
|
tokenizers.put("nGram", tokenizers.get("ngram"));
|
||||||
|
tokenizers.put("edgeNGram", tokenizers.get("edge_ngram"));
|
||||||
|
tokenizers.put("PathHierarchy", tokenizers.get("path_hierarchy"));
|
||||||
|
|
||||||
|
return tokenizers;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -45,7 +45,7 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
try {
|
try {
|
||||||
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
|
AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin());
|
||||||
Assert.fail("[common_words] or [common_words_path] is set");
|
Assert.fail("[common_words] or [common_words_path] is set");
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.query;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.lucene.index.Term;
|
import org.apache.lucene.index.Term;
|
||||||
import org.apache.lucene.search.Query;
|
import org.apache.lucene.search.Query;
|
||||||
|
@ -29,12 +29,22 @@ import org.apache.lucene.search.PhraseQuery;
|
||||||
import org.apache.lucene.search.MultiPhraseQuery;
|
import org.apache.lucene.search.MultiPhraseQuery;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
|
import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
|
||||||
|
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||||
|
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
|
||||||
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
|
import org.elasticsearch.index.query.QueryStringQueryBuilder;
|
||||||
|
import org.elasticsearch.index.query.SimpleQueryStringBuilder;
|
||||||
|
import org.elasticsearch.index.query.SimpleQueryStringFlag;
|
||||||
import org.elasticsearch.index.search.MatchQuery;
|
import org.elasticsearch.index.search.MatchQuery;
|
||||||
|
import org.elasticsearch.plugins.Plugin;
|
||||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
@ -49,6 +59,11 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
|
||||||
private static Query expectedQueryWithUnigram;
|
private static Query expectedQueryWithUnigram;
|
||||||
private static Query expectedPhraseQueryWithUnigram;
|
private static Query expectedPhraseQueryWithUnigram;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||||
|
return Collections.singleton(CommonAnalysisPlugin.class);
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() {
|
public void setup() {
|
||||||
Settings settings = Settings.builder()
|
Settings settings = Settings.builder()
|
||||||
|
@ -150,42 +165,42 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
|
||||||
public void testMatchPhraseQuery() throws IOException {
|
public void testMatchPhraseQuery() throws IOException {
|
||||||
MatchPhraseQueryBuilder builder =
|
MatchPhraseQueryBuilder builder =
|
||||||
new MatchPhraseQueryBuilder("text_shingle_unigram", "foo bar baz");
|
new MatchPhraseQueryBuilder("text_shingle_unigram", "foo bar baz");
|
||||||
Query query = builder.doToQuery(shardContext);
|
Query query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder =
|
builder =
|
||||||
new MatchPhraseQueryBuilder("text_shingle", "foo bar baz biz");
|
new MatchPhraseQueryBuilder("text_shingle", "foo bar baz biz");
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQuery, equalTo(query));
|
assertThat(expectedPhraseQuery, equalTo(query));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMatchQuery() throws IOException {
|
public void testMatchQuery() throws IOException {
|
||||||
MatchQueryBuilder builder =
|
MatchQueryBuilder builder =
|
||||||
new MatchQueryBuilder("text_shingle_unigram", "foo bar baz");
|
new MatchQueryBuilder("text_shingle_unigram", "foo bar baz");
|
||||||
Query query = builder.doToQuery(shardContext);
|
Query query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder = new MatchQueryBuilder("text_shingle", "foo bar baz biz");
|
builder = new MatchQueryBuilder("text_shingle", "foo bar baz biz");
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQuery, equalTo(query));
|
assertThat(expectedQuery, equalTo(query));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMultiMatchQuery() throws IOException {
|
public void testMultiMatchQuery() throws IOException {
|
||||||
MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar baz",
|
MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar baz",
|
||||||
"text_shingle_unigram");
|
"text_shingle_unigram");
|
||||||
Query query = builder.doToQuery(shardContext);
|
Query query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder.type(MatchQuery.Type.PHRASE);
|
builder.type(MatchQuery.Type.PHRASE);
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder = new MultiMatchQueryBuilder("foo bar baz biz", "text_shingle");
|
builder = new MultiMatchQueryBuilder("foo bar baz biz", "text_shingle");
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQuery, equalTo(query));
|
assertThat(expectedQuery, equalTo(query));
|
||||||
|
|
||||||
builder.type(MatchQuery.Type.PHRASE);
|
builder.type(MatchQuery.Type.PHRASE);
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQuery, equalTo(query));
|
assertThat(expectedPhraseQuery, equalTo(query));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,47 +208,47 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
|
||||||
SimpleQueryStringBuilder builder = new SimpleQueryStringBuilder("foo bar baz");
|
SimpleQueryStringBuilder builder = new SimpleQueryStringBuilder("foo bar baz");
|
||||||
builder.field("text_shingle_unigram");
|
builder.field("text_shingle_unigram");
|
||||||
builder.flags(SimpleQueryStringFlag.NONE);
|
builder.flags(SimpleQueryStringFlag.NONE);
|
||||||
Query query = builder.doToQuery(shardContext);
|
Query query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder = new SimpleQueryStringBuilder("\"foo bar baz\"");
|
builder = new SimpleQueryStringBuilder("\"foo bar baz\"");
|
||||||
builder.field("text_shingle_unigram");
|
builder.field("text_shingle_unigram");
|
||||||
builder.flags(SimpleQueryStringFlag.PHRASE);
|
builder.flags(SimpleQueryStringFlag.PHRASE);
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder = new SimpleQueryStringBuilder("foo bar baz biz");
|
builder = new SimpleQueryStringBuilder("foo bar baz biz");
|
||||||
builder.field("text_shingle");
|
builder.field("text_shingle");
|
||||||
builder.flags(SimpleQueryStringFlag.NONE);
|
builder.flags(SimpleQueryStringFlag.NONE);
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQuery, equalTo(query));
|
assertThat(expectedQuery, equalTo(query));
|
||||||
|
|
||||||
builder = new SimpleQueryStringBuilder("\"foo bar baz biz\"");
|
builder = new SimpleQueryStringBuilder("\"foo bar baz biz\"");
|
||||||
builder.field("text_shingle");
|
builder.field("text_shingle");
|
||||||
builder.flags(SimpleQueryStringFlag.PHRASE);
|
builder.flags(SimpleQueryStringFlag.PHRASE);
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQuery, equalTo(query));
|
assertThat(expectedPhraseQuery, equalTo(query));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testQueryString() throws IOException {
|
public void testQueryString() throws IOException {
|
||||||
QueryStringQueryBuilder builder = new QueryStringQueryBuilder("foo bar baz");
|
QueryStringQueryBuilder builder = new QueryStringQueryBuilder("foo bar baz");
|
||||||
builder.field("text_shingle_unigram");
|
builder.field("text_shingle_unigram");
|
||||||
Query query = builder.doToQuery(shardContext);
|
Query query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder = new QueryStringQueryBuilder("\"foo bar baz\"");
|
builder = new QueryStringQueryBuilder("\"foo bar baz\"");
|
||||||
builder.field("text_shingle_unigram");
|
builder.field("text_shingle_unigram");
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||||
|
|
||||||
builder = new QueryStringQueryBuilder("foo bar baz biz");
|
builder = new QueryStringQueryBuilder("foo bar baz biz");
|
||||||
builder.field("text_shingle");
|
builder.field("text_shingle");
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedQuery, equalTo(query));
|
assertThat(expectedQuery, equalTo(query));
|
||||||
|
|
||||||
builder = new QueryStringQueryBuilder("\"foo bar baz biz\"");
|
builder = new QueryStringQueryBuilder("\"foo bar baz biz\"");
|
||||||
builder.field("text_shingle");
|
builder.field("text_shingle");
|
||||||
query = builder.doToQuery(shardContext);
|
query = builder.toQuery(shardContext);
|
||||||
assertThat(expectedPhraseQuery, equalTo(query));
|
assertThat(expectedPhraseQuery, equalTo(query));
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -30,8 +30,6 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.settings.Settings.Builder;
|
import org.elasticsearch.common.settings.Settings.Builder;
|
||||||
import org.elasticsearch.index.Index;
|
import org.elasticsearch.index.Index;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
|
|
||||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||||
import org.elasticsearch.test.IndexSettingsModule;
|
import org.elasticsearch.test.IndexSettingsModule;
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||||
|
|
|
@ -17,15 +17,13 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis.synonyms;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
|
||||||
import org.apache.lucene.analysis.Analyzer;
|
import org.apache.lucene.analysis.Analyzer;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.logging.Loggers;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
@ -44,7 +42,6 @@ import static org.hamcrest.Matchers.instanceOf;
|
||||||
import static org.hamcrest.Matchers.startsWith;
|
import static org.hamcrest.Matchers.startsWith;
|
||||||
|
|
||||||
public class SynonymsAnalysisTests extends ESTestCase {
|
public class SynonymsAnalysisTests extends ESTestCase {
|
||||||
protected final Logger logger = Loggers.getLogger(getClass());
|
|
||||||
private IndexAnalyzers indexAnalyzers;
|
private IndexAnalyzers indexAnalyzers;
|
||||||
|
|
||||||
public void testSynonymsAnalysis() throws IOException {
|
public void testSynonymsAnalysis() throws IOException {
|
||||||
|
@ -56,14 +53,14 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
||||||
Files.copy(synonyms, config.resolve("synonyms.txt"));
|
Files.copy(synonyms, config.resolve("synonyms.txt"));
|
||||||
Files.copy(synonymsWordnet, config.resolve("synonyms_wordnet.txt"));
|
Files.copy(synonymsWordnet, config.resolve("synonyms_wordnet.txt"));
|
||||||
|
|
||||||
String json = "/org/elasticsearch/index/analysis/synonyms/synonyms.json";
|
String json = "/org/elasticsearch/analysis/common/synonyms.json";
|
||||||
Settings settings = Settings.builder().
|
Settings settings = Settings.builder().
|
||||||
loadFromStream(json, getClass().getResourceAsStream(json), false)
|
loadFromStream(json, getClass().getResourceAsStream(json), false)
|
||||||
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
||||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||||
|
|
||||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||||
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
|
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||||
|
|
||||||
match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
|
match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
|
||||||
match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
|
match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
|
||||||
|
@ -91,7 +88,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
||||||
.build();
|
.build();
|
||||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||||
try {
|
try {
|
||||||
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
|
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||||
fail("fail! due to synonym word deleted by analyzer");
|
fail("fail! due to synonym word deleted by analyzer");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
assertThat(e, instanceOf(IllegalArgumentException.class));
|
assertThat(e, instanceOf(IllegalArgumentException.class));
|
||||||
|
@ -112,7 +109,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
||||||
.build();
|
.build();
|
||||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||||
try {
|
try {
|
||||||
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
|
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||||
fail("fail! due to synonym word deleted by analyzer");
|
fail("fail! due to synonym word deleted by analyzer");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
assertThat(e, instanceOf(IllegalArgumentException.class));
|
assertThat(e, instanceOf(IllegalArgumentException.class));
|
|
@ -17,7 +17,7 @@
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.elasticsearch.index.analysis;
|
package org.elasticsearch.analysis.common;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||||
|
|
|
@ -70,3 +70,374 @@
|
||||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
- match: { detail.tokenizer.tokens.0.token: foo }
|
- match: { detail.tokenizer.tokens.0.token: foo }
|
||||||
- match: { detail.tokenizer.tokens.1.token: bar }
|
- match: { detail.tokenizer.tokens.1.token: bar }
|
||||||
|
|
||||||
|
---
|
||||||
|
"thai_tokenizer":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "ภาษาไทย"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: thai
|
||||||
|
- length: { detail.tokenizer.tokens: 2 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: ภาษา }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: ไทย }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "ภาษาไทย"
|
||||||
|
explain: true
|
||||||
|
tokenizer: thai
|
||||||
|
- length: { detail.tokenizer.tokens: 2 }
|
||||||
|
- match: { detail.tokenizer.name: thai }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: ภาษา }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: ไทย }
|
||||||
|
|
||||||
|
---
|
||||||
|
"ngram":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foobar"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: ngram
|
||||||
|
min_gram: 3
|
||||||
|
max_gram: 3
|
||||||
|
- length: { detail.tokenizer.tokens: 4 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: foo }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: oob }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: oba }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: bar }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foobar"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: nGram
|
||||||
|
min_gram: 3
|
||||||
|
max_gram: 3
|
||||||
|
- length: { detail.tokenizer.tokens: 4 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: foo }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: oob }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: oba }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: bar }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foo"
|
||||||
|
explain: true
|
||||||
|
tokenizer: ngram
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: ngram }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: f }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: o }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: oo }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: o }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foo"
|
||||||
|
explain: true
|
||||||
|
tokenizer: nGram
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: nGram }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: f }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: o }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: oo }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: o }
|
||||||
|
|
||||||
|
---
|
||||||
|
"edge_ngram":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foo"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: edge_ngram
|
||||||
|
min_gram: 1
|
||||||
|
max_gram: 3
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: f }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: foo }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foo"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: edgeNGram
|
||||||
|
min_gram: 1
|
||||||
|
max_gram: 3
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: f }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: foo }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foo"
|
||||||
|
explain: true
|
||||||
|
tokenizer: edge_ngram
|
||||||
|
- length: { detail.tokenizer.tokens: 2 }
|
||||||
|
- match: { detail.tokenizer.name: edge_ngram }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: f }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "foo"
|
||||||
|
explain: true
|
||||||
|
tokenizer: edgeNGram
|
||||||
|
- length: { detail.tokenizer.tokens: 2 }
|
||||||
|
- match: { detail.tokenizer.name: edgeNGram }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: f }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||||
|
|
||||||
|
---
|
||||||
|
"classic":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Brown-Foxes don't jump."
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: classic
|
||||||
|
- length: { detail.tokenizer.tokens: 4 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: don't }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: jump }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Brown-Foxes don't jump."
|
||||||
|
explain: true
|
||||||
|
tokenizer: classic
|
||||||
|
- length: { detail.tokenizer.tokens: 4 }
|
||||||
|
- match: { detail.tokenizer.name: classic }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: don't }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: jump }
|
||||||
|
|
||||||
|
---
|
||||||
|
"letter":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Brown-Foxes don't jump."
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: letter
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: don }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: t }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Brown-Foxes don't jump."
|
||||||
|
explain: true
|
||||||
|
tokenizer: letter
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: letter }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: don }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: t }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||||
|
|
||||||
|
---
|
||||||
|
"lowercase":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Brown-Foxes don't jump."
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: lowercase
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: brown }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: foxes }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: don }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: t }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Brown-Foxes don't jump."
|
||||||
|
explain: true
|
||||||
|
tokenizer: lowercase
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: lowercase }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: brown }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: foxes }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: don }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: t }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||||
|
|
||||||
|
---
|
||||||
|
"path_hierarchy":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "a/b/c"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: path_hierarchy
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: a }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "a/b/c"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: PathHierarchy
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: a }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "a/b/c"
|
||||||
|
explain: true
|
||||||
|
tokenizer: path_hierarchy
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: path_hierarchy }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: a }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "a/b/c"
|
||||||
|
explain: true
|
||||||
|
tokenizer: PathHierarchy
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: PathHierarchy }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: a }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||||
|
|
||||||
|
---
|
||||||
|
"pattern":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "split by whitespace by default"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: pattern
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: split }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: by }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: by }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: default }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "split by whitespace by default"
|
||||||
|
explain: true
|
||||||
|
tokenizer: pattern
|
||||||
|
- length: { detail.tokenizer.tokens: 5 }
|
||||||
|
- match: { detail.tokenizer.name: pattern }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: split }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: by }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: by }
|
||||||
|
- match: { detail.tokenizer.tokens.4.token: default }
|
||||||
|
|
||||||
|
---
|
||||||
|
"uax_url_email":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Email me at john.smith@global-international.com"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: uax_url_email
|
||||||
|
- length: { detail.tokenizer.tokens: 4 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: Email }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: me }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: at }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: john.smith@global-international.com }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "Email me at john.smith@global-international.com"
|
||||||
|
explain: true
|
||||||
|
tokenizer: uax_url_email
|
||||||
|
- length: { detail.tokenizer.tokens: 4 }
|
||||||
|
- match: { detail.tokenizer.name: uax_url_email }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: Email }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: me }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: at }
|
||||||
|
- match: { detail.tokenizer.tokens.3.token: john.smith@global-international.com }
|
||||||
|
|
||||||
|
---
|
||||||
|
"whitespace":
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "split by whitespace"
|
||||||
|
explain: true
|
||||||
|
tokenizer:
|
||||||
|
type: whitespace
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: split }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: by }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
body:
|
||||||
|
text: "split by whitespace"
|
||||||
|
explain: true
|
||||||
|
tokenizer: whitespace
|
||||||
|
- length: { detail.tokenizer.tokens: 3 }
|
||||||
|
- match: { detail.tokenizer.name: whitespace }
|
||||||
|
- match: { detail.tokenizer.tokens.0.token: split }
|
||||||
|
- match: { detail.tokenizer.tokens.1.token: by }
|
||||||
|
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||||
|
|
|
@ -67,3 +67,33 @@
|
||||||
text: "<html>foo</html>"
|
text: "<html>foo</html>"
|
||||||
- length: { tokens: 1 }
|
- length: { tokens: 1 }
|
||||||
- match: { tokens.0.token: "\nfoo\n" }
|
- match: { tokens.0.token: "\nfoo\n" }
|
||||||
|
|
||||||
|
---
|
||||||
|
"Synonym filter with tokenizer":
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test_synonym
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
index:
|
||||||
|
analysis:
|
||||||
|
tokenizer:
|
||||||
|
trigram:
|
||||||
|
type: nGram
|
||||||
|
min_gram: 3
|
||||||
|
max_gram: 3
|
||||||
|
filter:
|
||||||
|
synonym:
|
||||||
|
type: synonym
|
||||||
|
synonyms: ["kimchy => shay"]
|
||||||
|
|
||||||
|
- do:
|
||||||
|
indices.analyze:
|
||||||
|
index: test_synonym
|
||||||
|
body:
|
||||||
|
tokenizer: trigram
|
||||||
|
filter: [synonym]
|
||||||
|
text: kimchy
|
||||||
|
- length: { tokens: 2 }
|
||||||
|
- match: { tokens.0.token: sha }
|
||||||
|
- match: { tokens.1.token: hay }
|
||||||
|
|
|
@ -39,3 +39,97 @@
|
||||||
text:
|
text:
|
||||||
query: foa
|
query: foa
|
||||||
- match: {hits.total: 1}
|
- match: {hits.total: 1}
|
||||||
|
|
||||||
|
---
|
||||||
|
"testNGramCopyField":
|
||||||
|
- do:
|
||||||
|
indices.create:
|
||||||
|
index: test
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
number_of_shards: 1
|
||||||
|
number_of_replicas: 0
|
||||||
|
max_ngram_diff: 9
|
||||||
|
analysis:
|
||||||
|
analyzer:
|
||||||
|
my_ngram_analyzer:
|
||||||
|
tokenizer: my_ngram_tokenizer
|
||||||
|
tokenizer:
|
||||||
|
my_ngram_tokenizer:
|
||||||
|
type: ngram
|
||||||
|
min: 1,
|
||||||
|
max: 10
|
||||||
|
token_chars: []
|
||||||
|
mappings:
|
||||||
|
doc:
|
||||||
|
properties:
|
||||||
|
origin:
|
||||||
|
type: text
|
||||||
|
copy_to: meta
|
||||||
|
meta:
|
||||||
|
type: text
|
||||||
|
analyzer: my_ngram_analyzer
|
||||||
|
|
||||||
|
- do:
|
||||||
|
index:
|
||||||
|
index: test
|
||||||
|
type: doc
|
||||||
|
id: 1
|
||||||
|
body: { "origin": "C.A1234.5678" }
|
||||||
|
refresh: true
|
||||||
|
|
||||||
|
- do:
|
||||||
|
search:
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
match:
|
||||||
|
meta:
|
||||||
|
query: 1234
|
||||||
|
- match: {hits.total: 1}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
search:
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
match:
|
||||||
|
meta:
|
||||||
|
query: 1234.56
|
||||||
|
- match: {hits.total: 1}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
search:
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
match:
|
||||||
|
meta:
|
||||||
|
query: A1234
|
||||||
|
- match: {hits.total: 1}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
search:
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
term:
|
||||||
|
meta:
|
||||||
|
value: a1234
|
||||||
|
- match: {hits.total: 0}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
search:
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
match:
|
||||||
|
meta:
|
||||||
|
query: A1234
|
||||||
|
analyzer: my_ngram_analyzer
|
||||||
|
- match: {hits.total: 1}
|
||||||
|
|
||||||
|
- do:
|
||||||
|
search:
|
||||||
|
body:
|
||||||
|
query:
|
||||||
|
match:
|
||||||
|
meta:
|
||||||
|
query: a1234
|
||||||
|
analyzer: my_ngram_analyzer
|
||||||
|
- match: {hits.total: 1}
|
||||||
|
|
|
@ -230,6 +230,11 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||||
fixtureSupported = true
|
fixtureSupported = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
boolean legalPath = rootProject.rootDir.toString().contains(" ") == false
|
||||||
|
if (legalPath == false) {
|
||||||
|
fixtureSupported = false
|
||||||
|
}
|
||||||
|
|
||||||
// Always ignore HA integration tests in the normal integration test runner, they are included below as
|
// Always ignore HA integration tests in the normal integration test runner, they are included below as
|
||||||
// part of their own HA-specific integration test tasks.
|
// part of their own HA-specific integration test tasks.
|
||||||
integTestRunner.exclude('**/Ha*TestSuiteIT.class')
|
integTestRunner.exclude('**/Ha*TestSuiteIT.class')
|
||||||
|
@ -248,7 +253,12 @@ if (fixtureSupported) {
|
||||||
// Only include the HA integration tests for the HA test task
|
// Only include the HA integration tests for the HA test task
|
||||||
integTestHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class'])
|
integTestHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class'])
|
||||||
} else {
|
} else {
|
||||||
logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
|
if (legalPath) {
|
||||||
|
logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
|
||||||
|
} else {
|
||||||
|
logger.warn("hdfsFixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'")
|
||||||
|
}
|
||||||
|
|
||||||
// The normal integration test runner will just test that the plugin loads
|
// The normal integration test runner will just test that the plugin loads
|
||||||
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
|
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
|
||||||
// HA fixture is unsupported. Don't run them.
|
// HA fixture is unsupported. Don't run them.
|
||||||
|
|
|
@ -76,36 +76,6 @@
|
||||||
- match: { detail.tokenfilters.0.name: "_anonymous_tokenfilter" }
|
- match: { detail.tokenfilters.0.name: "_anonymous_tokenfilter" }
|
||||||
- match: { detail.tokenfilters.0.tokens.0.token: bar }
|
- match: { detail.tokenfilters.0.tokens.0.token: bar }
|
||||||
|
|
||||||
---
|
|
||||||
"Synonym filter with tokenizer":
|
|
||||||
- do:
|
|
||||||
indices.create:
|
|
||||||
index: test_synonym
|
|
||||||
body:
|
|
||||||
settings:
|
|
||||||
index:
|
|
||||||
analysis:
|
|
||||||
tokenizer:
|
|
||||||
trigram:
|
|
||||||
type: nGram
|
|
||||||
min_gram: 3
|
|
||||||
max_gram: 3
|
|
||||||
filter:
|
|
||||||
synonym:
|
|
||||||
type: synonym
|
|
||||||
synonyms: ["kimchy => shay"]
|
|
||||||
|
|
||||||
- do:
|
|
||||||
indices.analyze:
|
|
||||||
index: test_synonym
|
|
||||||
body:
|
|
||||||
tokenizer: trigram
|
|
||||||
filter: [synonym]
|
|
||||||
text: kimchy
|
|
||||||
- length: { tokens: 2 }
|
|
||||||
- match: { tokens.0.token: sha }
|
|
||||||
- match: { tokens.1.token: hay }
|
|
||||||
|
|
||||||
---
|
---
|
||||||
"Custom normalizer in request":
|
"Custom normalizer in request":
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
---
|
---
|
||||||
"Shrink index via API":
|
"Shrink index via API":
|
||||||
|
- skip:
|
||||||
|
version: " - 6.99.99"
|
||||||
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
|
features: "warnings"
|
||||||
# creates an index with one document solely allocated on the master node
|
# creates an index with one document solely allocated on the master node
|
||||||
# and shrinks it into a new index with a single shard
|
# and shrinks it into a new index with a single shard
|
||||||
# we don't do the relocation to a single node after the index is created
|
# we don't do the relocation to a single node after the index is created
|
||||||
|
@ -62,6 +66,8 @@
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
|
|
@ -1,5 +1,10 @@
|
||||||
---
|
---
|
||||||
"Shrink index ignores target template mapping":
|
"Shrink index ignores target template mapping":
|
||||||
|
- skip:
|
||||||
|
version: " - 6.99.99"
|
||||||
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
|
features: "warnings"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.state: {}
|
cluster.state: {}
|
||||||
# Get master node id
|
# Get master node id
|
||||||
|
@ -65,6 +70,8 @@
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
"Copy settings during shrink index":
|
"Copy settings during shrink index":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.3.99"
|
version: " - 6.99.99"
|
||||||
reason: copy_settings did not exist prior to 6.4.0
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
features: "warnings"
|
features: "warnings"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
|
@ -47,8 +47,6 @@
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.merge.scheduler.max_thread_count: 2
|
index.merge.scheduler.max_thread_count: 2
|
||||||
warnings:
|
|
||||||
- "parameter [copy_settings] is deprecated but was [true]"
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
@ -64,20 +62,19 @@
|
||||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
||||||
|
|
||||||
# now we do a actual shrink and do not copy settings
|
# now we do a actual shrink and do not copy settings (by default)
|
||||||
- do:
|
- do:
|
||||||
indices.shrink:
|
indices.shrink:
|
||||||
index: "source"
|
index: "source"
|
||||||
target: "no-copy-settings-target"
|
target: "no-copy-settings-target"
|
||||||
wait_for_active_shards: 1
|
wait_for_active_shards: 1
|
||||||
master_timeout: 10s
|
master_timeout: 10s
|
||||||
copy_settings: false
|
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.merge.scheduler.max_thread_count: 2
|
index.merge.scheduler.max_thread_count: 2
|
||||||
warnings:
|
warnings:
|
||||||
- "parameter [copy_settings] is deprecated but was [false]"
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
@ -92,3 +89,16 @@
|
||||||
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||||
- is_false: no-copy-settings-target.settings.index.blocks.write
|
- is_false: no-copy-settings-target.settings.index.blocks.write
|
||||||
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
||||||
|
|
||||||
|
# now we do a actual shrink and try to set no copy settings
|
||||||
|
- do:
|
||||||
|
catch: /illegal_argument_exception/
|
||||||
|
indices.shrink:
|
||||||
|
index: "source"
|
||||||
|
target: "explicit-no-copy-settings-target"
|
||||||
|
wait_for_active_shards: 1
|
||||||
|
master_timeout: 10s
|
||||||
|
copy_settings: false
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
index.number_of_replicas: 0
|
||||||
|
|
|
@ -33,8 +33,9 @@ setup:
|
||||||
---
|
---
|
||||||
"Split index via API":
|
"Split index via API":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.0.99"
|
version: " - 6.99.99"
|
||||||
reason: Added in 6.1.0
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
|
features: "warnings"
|
||||||
|
|
||||||
# make it read-only
|
# make it read-only
|
||||||
- do:
|
- do:
|
||||||
|
@ -60,6 +61,8 @@ setup:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 4
|
index.number_of_shards: 4
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
@ -103,13 +106,13 @@ setup:
|
||||||
|
|
||||||
---
|
---
|
||||||
"Split from 1 to N":
|
"Split from 1 to N":
|
||||||
# - skip:
|
|
||||||
# version: " - 6.99.99"
|
|
||||||
# reason: Added in 7.0.0
|
|
||||||
# uncomment once AwaitsFix is resolved
|
|
||||||
- skip:
|
- skip:
|
||||||
|
# when re-enabling uncomment the below skips
|
||||||
version: "all"
|
version: "all"
|
||||||
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
|
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
|
||||||
|
# version: " - 6.99.99"
|
||||||
|
# reason: expects warnings that pre-7.0.0 will not send
|
||||||
|
features: "warnings"
|
||||||
- do:
|
- do:
|
||||||
indices.create:
|
indices.create:
|
||||||
index: source_one_shard
|
index: source_one_shard
|
||||||
|
@ -163,6 +166,8 @@ setup:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 5
|
index.number_of_shards: 5
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
@ -208,8 +213,9 @@ setup:
|
||||||
---
|
---
|
||||||
"Create illegal split indices":
|
"Create illegal split indices":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.0.99"
|
version: " - 6.99.99"
|
||||||
reason: Added in 6.1.0
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
|
features: "warnings"
|
||||||
|
|
||||||
# try to do an illegal split with number_of_routing_shards set
|
# try to do an illegal split with number_of_routing_shards set
|
||||||
- do:
|
- do:
|
||||||
|
@ -224,6 +230,8 @@ setup:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 4
|
index.number_of_shards: 4
|
||||||
index.number_of_routing_shards: 8
|
index.number_of_routing_shards: 8
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
# try to do an illegal split with illegal number_of_shards
|
# try to do an illegal split with illegal number_of_shards
|
||||||
- do:
|
- do:
|
||||||
|
@ -237,3 +245,5 @@ setup:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 6
|
index.number_of_shards: 6
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
---
|
---
|
||||||
"Split index ignores target template mapping":
|
"Split index ignores target template mapping":
|
||||||
# - skip:
|
|
||||||
# version: " - 6.0.99"
|
|
||||||
# reason: Added in 6.1.0
|
|
||||||
# uncomment once AwaitsFix is resolved
|
|
||||||
- skip:
|
- skip:
|
||||||
|
# when re-enabling uncomment the below skips
|
||||||
version: "all"
|
version: "all"
|
||||||
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
|
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
|
||||||
|
# version: " - 6.99.99"
|
||||||
|
# reason: expects warnings that pre-7.0.0 will not send
|
||||||
|
features: "warnings"
|
||||||
|
|
||||||
# create index
|
# create index
|
||||||
- do:
|
- do:
|
||||||
|
@ -68,6 +68,8 @@
|
||||||
settings:
|
settings:
|
||||||
index.number_of_shards: 2
|
index.number_of_shards: 2
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
|
warnings:
|
||||||
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
---
|
---
|
||||||
"Copy settings during split index":
|
"Copy settings during split index":
|
||||||
- skip:
|
- skip:
|
||||||
version: " - 6.3.99"
|
version: " - 6.99.99"
|
||||||
reason: copy_settings did not exist prior to 6.4.0
|
reason: expects warnings that pre-7.0.0 will not send
|
||||||
features: "warnings"
|
features: "warnings"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
|
@ -50,8 +50,6 @@
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 2
|
index.number_of_shards: 2
|
||||||
index.merge.scheduler.max_thread_count: 2
|
index.merge.scheduler.max_thread_count: 2
|
||||||
warnings:
|
|
||||||
- "parameter [copy_settings] is deprecated but was [true]"
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
@ -67,21 +65,20 @@
|
||||||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
||||||
|
|
||||||
# now we do a actual shrink and do not copy settings
|
# now we do a actual shrink and do not copy settings (by default)
|
||||||
- do:
|
- do:
|
||||||
indices.split:
|
indices.split:
|
||||||
index: "source"
|
index: "source"
|
||||||
target: "no-copy-settings-target"
|
target: "no-copy-settings-target"
|
||||||
wait_for_active_shards: 1
|
wait_for_active_shards: 1
|
||||||
master_timeout: 10s
|
master_timeout: 10s
|
||||||
copy_settings: false
|
|
||||||
body:
|
body:
|
||||||
settings:
|
settings:
|
||||||
index.number_of_replicas: 0
|
index.number_of_replicas: 0
|
||||||
index.number_of_shards: 2
|
index.number_of_shards: 2
|
||||||
index.merge.scheduler.max_thread_count: 2
|
index.merge.scheduler.max_thread_count: 2
|
||||||
warnings:
|
warnings:
|
||||||
- "parameter [copy_settings] is deprecated but was [false]"
|
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
cluster.health:
|
cluster.health:
|
||||||
|
@ -96,3 +93,15 @@
|
||||||
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||||
- is_false: no-copy-settings-target.settings.index.blocks.write
|
- is_false: no-copy-settings-target.settings.index.blocks.write
|
||||||
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
||||||
|
|
||||||
|
- do:
|
||||||
|
catch: /illegal_argument_exception/
|
||||||
|
indices.split:
|
||||||
|
index: "source"
|
||||||
|
target: "explicit-no-copy-settings-target"
|
||||||
|
wait_for_active_shards: 1
|
||||||
|
master_timeout: 10s
|
||||||
|
copy_settings: false
|
||||||
|
body:
|
||||||
|
settings:
|
||||||
|
index.number_of_replicas: 0
|
||||||
|
|
|
@ -56,7 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
private CreateIndexRequest targetIndexRequest;
|
private CreateIndexRequest targetIndexRequest;
|
||||||
private String sourceIndex;
|
private String sourceIndex;
|
||||||
private ResizeType type = ResizeType.SHRINK;
|
private ResizeType type = ResizeType.SHRINK;
|
||||||
private boolean copySettings = false;
|
private Boolean copySettings;
|
||||||
|
|
||||||
ResizeRequest() {}
|
ResizeRequest() {}
|
||||||
|
|
||||||
|
@ -80,6 +80,7 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
if (type == ResizeType.SPLIT && IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) {
|
if (type == ResizeType.SPLIT && IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) {
|
||||||
validationException = addValidationError("index.number_of_shards is required for split operations", validationException);
|
validationException = addValidationError("index.number_of_shards is required for split operations", validationException);
|
||||||
}
|
}
|
||||||
|
assert copySettings == null || copySettings;
|
||||||
return validationException;
|
return validationException;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,10 +99,12 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
} else {
|
} else {
|
||||||
type = ResizeType.SHRINK; // BWC this used to be shrink only
|
type = ResizeType.SHRINK; // BWC this used to be shrink only
|
||||||
}
|
}
|
||||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
if (in.getVersion().before(Version.V_6_4_0)) {
|
||||||
|
copySettings = null;
|
||||||
|
} else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){
|
||||||
copySettings = in.readBoolean();
|
copySettings = in.readBoolean();
|
||||||
} else {
|
} else {
|
||||||
copySettings = false;
|
copySettings = in.readOptionalBoolean();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,8 +116,12 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
|
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
|
||||||
out.writeEnum(type);
|
out.writeEnum(type);
|
||||||
}
|
}
|
||||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
if (out.getVersion().before(Version.V_6_4_0)) {
|
||||||
out.writeBoolean(copySettings);
|
|
||||||
|
} else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) {
|
||||||
|
out.writeBoolean(copySettings == null ? false : copySettings);
|
||||||
|
} else {
|
||||||
|
out.writeOptionalBoolean(copySettings);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,11 +194,14 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
||||||
return type;
|
return type;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setCopySettings(final boolean copySettings) {
|
public void setCopySettings(final Boolean copySettings) {
|
||||||
|
if (copySettings != null && copySettings == false) {
|
||||||
|
throw new IllegalArgumentException("[copySettings] can not be explicitly set to [false]");
|
||||||
|
}
|
||||||
this.copySettings = copySettings;
|
this.copySettings = copySettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean getCopySettings() {
|
public Boolean getCopySettings() {
|
||||||
return copySettings;
|
return copySettings;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
||||||
.waitForActiveShards(targetIndex.waitForActiveShards())
|
.waitForActiveShards(targetIndex.waitForActiveShards())
|
||||||
.recoverFrom(metaData.getIndex())
|
.recoverFrom(metaData.getIndex())
|
||||||
.resizeType(resizeRequest.getResizeType())
|
.resizeType(resizeRequest.getResizeType())
|
||||||
.copySettings(resizeRequest.getCopySettings());
|
.copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.env.Environment;
|
import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
import org.elasticsearch.index.IndexSettings;
|
||||||
|
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||||
|
|
||||||
public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
|
public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
|
||||||
|
|
||||||
|
|
|
@ -39,11 +39,9 @@ import org.elasticsearch.index.analysis.CatalanAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.ChineseAnalyzerProvider;
|
import org.elasticsearch.index.analysis.ChineseAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.CjkAnalyzerProvider;
|
import org.elasticsearch.index.analysis.CjkAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.ClassicTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.CzechAnalyzerProvider;
|
import org.elasticsearch.index.analysis.CzechAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.DanishAnalyzerProvider;
|
import org.elasticsearch.index.analysis.DanishAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.DutchAnalyzerProvider;
|
import org.elasticsearch.index.analysis.DutchAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.EnglishAnalyzerProvider;
|
import org.elasticsearch.index.analysis.EnglishAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider;
|
import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.FinnishAnalyzerProvider;
|
import org.elasticsearch.index.analysis.FinnishAnalyzerProvider;
|
||||||
|
@ -60,14 +58,9 @@ import org.elasticsearch.index.analysis.ItalianAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.KeywordAnalyzerProvider;
|
import org.elasticsearch.index.analysis.KeywordAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
||||||
import org.elasticsearch.index.analysis.LatvianAnalyzerProvider;
|
import org.elasticsearch.index.analysis.LatvianAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.LetterTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider;
|
import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
|
import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.PatternAnalyzerProvider;
|
import org.elasticsearch.index.analysis.PatternAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.PatternTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.PersianAnalyzerProvider;
|
import org.elasticsearch.index.analysis.PersianAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider;
|
import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||||
|
@ -88,13 +81,10 @@ import org.elasticsearch.index.analysis.StopAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.SwedishAnalyzerProvider;
|
import org.elasticsearch.index.analysis.SwedishAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.ThaiAnalyzerProvider;
|
import org.elasticsearch.index.analysis.ThaiAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.ThaiTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||||
import org.elasticsearch.index.analysis.TurkishAnalyzerProvider;
|
import org.elasticsearch.index.analysis.TurkishAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider;
|
import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider;
|
||||||
import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
|
|
||||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -223,36 +213,19 @@ public final class AnalysisModule {
|
||||||
}
|
}
|
||||||
preConfiguredTokenizers.register(name, preConfigured);
|
preConfiguredTokenizers.register(name, preConfigured);
|
||||||
}
|
}
|
||||||
// Temporary shim for aliases. TODO deprecate after they are moved
|
|
||||||
preConfiguredTokenizers.register("nGram", preConfiguredTokenizers.getRegistry().get("ngram"));
|
|
||||||
preConfiguredTokenizers.register("edgeNGram", preConfiguredTokenizers.getRegistry().get("edge_ngram"));
|
|
||||||
preConfiguredTokenizers.register("PathHierarchy", preConfiguredTokenizers.getRegistry().get("path_hierarchy"));
|
|
||||||
|
|
||||||
for (AnalysisPlugin plugin: plugins) {
|
for (AnalysisPlugin plugin: plugins) {
|
||||||
for (PreConfiguredTokenizer tokenizer : plugin.getPreConfiguredTokenizers()) {
|
for (PreConfiguredTokenizer tokenizer : plugin.getPreConfiguredTokenizers()) {
|
||||||
preConfiguredTokenizers.register(tokenizer.getName(), tokenizer);
|
preConfiguredTokenizers.register(tokenizer.getName(), tokenizer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return unmodifiableMap(preConfiguredTokenizers.getRegistry());
|
return unmodifiableMap(preConfiguredTokenizers.getRegistry());
|
||||||
}
|
}
|
||||||
|
|
||||||
private NamedRegistry<AnalysisProvider<TokenizerFactory>> setupTokenizers(List<AnalysisPlugin> plugins) {
|
private NamedRegistry<AnalysisProvider<TokenizerFactory>> setupTokenizers(List<AnalysisPlugin> plugins) {
|
||||||
NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = new NamedRegistry<>("tokenizer");
|
NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = new NamedRegistry<>("tokenizer");
|
||||||
tokenizers.register("standard", StandardTokenizerFactory::new);
|
tokenizers.register("standard", StandardTokenizerFactory::new);
|
||||||
tokenizers.register("uax_url_email", UAX29URLEmailTokenizerFactory::new);
|
|
||||||
tokenizers.register("path_hierarchy", PathHierarchyTokenizerFactory::new);
|
|
||||||
tokenizers.register("PathHierarchy", PathHierarchyTokenizerFactory::new);
|
|
||||||
tokenizers.register("keyword", KeywordTokenizerFactory::new);
|
tokenizers.register("keyword", KeywordTokenizerFactory::new);
|
||||||
tokenizers.register("letter", LetterTokenizerFactory::new);
|
|
||||||
tokenizers.register("lowercase", LowerCaseTokenizerFactory::new);
|
|
||||||
tokenizers.register("whitespace", WhitespaceTokenizerFactory::new);
|
|
||||||
tokenizers.register("nGram", NGramTokenizerFactory::new);
|
|
||||||
tokenizers.register("ngram", NGramTokenizerFactory::new);
|
|
||||||
tokenizers.register("edgeNGram", EdgeNGramTokenizerFactory::new);
|
|
||||||
tokenizers.register("edge_ngram", EdgeNGramTokenizerFactory::new);
|
|
||||||
tokenizers.register("pattern", PatternTokenizerFactory::new);
|
|
||||||
tokenizers.register("classic", ClassicTokenizerFactory::new);
|
|
||||||
tokenizers.register("thai", ThaiTokenizerFactory::new);
|
|
||||||
tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers);
|
tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers);
|
||||||
return tokenizers;
|
return tokenizers;
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,18 +19,8 @@
|
||||||
package org.elasticsearch.indices.analysis;
|
package org.elasticsearch.indices.analysis;
|
||||||
|
|
||||||
import org.apache.lucene.analysis.Tokenizer;
|
import org.apache.lucene.analysis.Tokenizer;
|
||||||
import org.apache.lucene.analysis.core.LetterTokenizer;
|
|
||||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
|
||||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
|
||||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
|
||||||
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
|
||||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||||
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
|
||||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.regex.Regex;
|
|
||||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||||
|
|
||||||
|
@ -41,69 +31,6 @@ public enum PreBuiltTokenizers {
|
||||||
protected Tokenizer create(Version version) {
|
protected Tokenizer create(Version version) {
|
||||||
return new StandardTokenizer();
|
return new StandardTokenizer();
|
||||||
}
|
}
|
||||||
},
|
|
||||||
|
|
||||||
CLASSIC(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new ClassicTokenizer();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
UAX_URL_EMAIL(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new UAX29URLEmailTokenizer();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
PATH_HIERARCHY(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new PathHierarchyTokenizer();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
LETTER(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new LetterTokenizer();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
WHITESPACE(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new WhitespaceTokenizer();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
NGRAM(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new NGramTokenizer();
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
EDGE_NGRAM(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
PATTERN(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new PatternTokenizer(Regex.compile("\\W+", null), -1);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
|
|
||||||
THAI(CachingStrategy.ONE) {
|
|
||||||
@Override
|
|
||||||
protected Tokenizer create(Version version) {
|
|
||||||
return new ThaiTokenizer();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
;
|
;
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.elasticsearch.indices.flush;
|
package org.elasticsearch.indices.flush;
|
||||||
|
|
||||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||||
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -501,8 +502,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||||
if (indexShard.routingEntry().primary() == false) {
|
if (indexShard.routingEntry().primary() == false) {
|
||||||
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
||||||
}
|
}
|
||||||
|
if (Assertions.ENABLED) {
|
||||||
|
if (logger.isTraceEnabled()) {
|
||||||
|
logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations());
|
||||||
|
}
|
||||||
|
}
|
||||||
int opCount = indexShard.getActiveOperationsCount();
|
int opCount = indexShard.getActiveOperationsCount();
|
||||||
logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
|
// Need to snapshot the debug info twice as it's updated concurrently with the permit count.
|
||||||
|
if (Assertions.ENABLED) {
|
||||||
|
if (logger.isTraceEnabled()) {
|
||||||
|
logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations());
|
||||||
|
}
|
||||||
|
}
|
||||||
return new InFlightOpsResponse(opCount);
|
return new InFlightOpsResponse(opCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -950,6 +950,20 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
|
|
||||||
final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
|
final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
|
||||||
try {
|
try {
|
||||||
|
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
|
||||||
|
// attempt to write an index file with this generation failed mid-way after creating the temporary file.
|
||||||
|
for (final String blobName : blobs.keySet()) {
|
||||||
|
if (indexShardSnapshotsFormat.isTempBlobName(blobName)) {
|
||||||
|
try {
|
||||||
|
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
||||||
|
} catch (IOException e) {
|
||||||
|
logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blob [{}] during finalization",
|
||||||
|
snapshotId, shardId, blobName), e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If we deleted all snapshots, we don't need to create a new index file
|
// If we deleted all snapshots, we don't need to create a new index file
|
||||||
if (snapshots.size() > 0) {
|
if (snapshots.size() > 0) {
|
||||||
indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, blobContainer, indexGeneration);
|
indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, blobContainer, indexGeneration);
|
||||||
|
@ -957,7 +971,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
|
|
||||||
// Delete old index files
|
// Delete old index files
|
||||||
for (final String blobName : blobs.keySet()) {
|
for (final String blobName : blobs.keySet()) {
|
||||||
if (indexShardSnapshotsFormat.isTempBlobName(blobName) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) {
|
if (blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) {
|
||||||
try {
|
try {
|
||||||
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -48,17 +48,22 @@ public abstract class RestResizeHandler extends BaseRestHandler {
|
||||||
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||||
resizeRequest.setResizeType(getResizeType());
|
resizeRequest.setResizeType(getResizeType());
|
||||||
final String rawCopySettings = request.param("copy_settings");
|
final String rawCopySettings = request.param("copy_settings");
|
||||||
final boolean copySettings;
|
final Boolean copySettings;
|
||||||
if (rawCopySettings == null) {
|
if (rawCopySettings == null) {
|
||||||
copySettings = resizeRequest.getCopySettings();
|
copySettings = resizeRequest.getCopySettings();
|
||||||
|
} else if (rawCopySettings.isEmpty()) {
|
||||||
|
copySettings = true;
|
||||||
} else {
|
} else {
|
||||||
deprecationLogger.deprecated("parameter [copy_settings] is deprecated but was [" + rawCopySettings + "]");
|
copySettings = Booleans.parseBoolean(rawCopySettings);
|
||||||
if (rawCopySettings.length() == 0) {
|
if (copySettings == false) {
|
||||||
copySettings = true;
|
throw new IllegalArgumentException("parameter [copy_settings] can not be explicitly set to [false]");
|
||||||
} else {
|
|
||||||
copySettings = Booleans.parseBoolean(rawCopySettings);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (copySettings == null) {
|
||||||
|
deprecationLogger.deprecated(
|
||||||
|
"resize operations without copying settings is deprecated; "
|
||||||
|
+ "set parameter [copy_settings] to [true] for future default behavior");
|
||||||
|
}
|
||||||
resizeRequest.setCopySettings(copySettings);
|
resizeRequest.setCopySettings(copySettings);
|
||||||
request.applyContentParser(resizeRequest::fromXContent);
|
request.applyContentParser(resizeRequest::fromXContent);
|
||||||
resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout()));
|
resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout()));
|
||||||
|
|
|
@ -287,7 +287,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
e = expectThrows(IllegalArgumentException.class,
|
e = expectThrows(IllegalArgumentException.class,
|
||||||
() -> TransportAnalyzeAction.analyze(
|
() -> TransportAnalyzeAction.analyze(
|
||||||
new AnalyzeRequest()
|
new AnalyzeRequest()
|
||||||
.tokenizer("whitespace")
|
.tokenizer("standard")
|
||||||
.addTokenFilter("foobar")
|
.addTokenFilter("foobar")
|
||||||
.text("the qu1ck brown fox"),
|
.text("the qu1ck brown fox"),
|
||||||
"text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount));
|
"text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount));
|
||||||
|
@ -300,7 +300,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
e = expectThrows(IllegalArgumentException.class,
|
e = expectThrows(IllegalArgumentException.class,
|
||||||
() -> TransportAnalyzeAction.analyze(
|
() -> TransportAnalyzeAction.analyze(
|
||||||
new AnalyzeRequest()
|
new AnalyzeRequest()
|
||||||
.tokenizer("whitespace")
|
.tokenizer("standard")
|
||||||
.addTokenFilter("lowercase")
|
.addTokenFilter("lowercase")
|
||||||
.addCharFilter("foobar")
|
.addCharFilter("foobar")
|
||||||
.text("the qu1ck brown fox"),
|
.text("the qu1ck brown fox"),
|
||||||
|
@ -322,7 +322,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
||||||
|
|
||||||
public void testNonPreBuildTokenFilter() throws IOException {
|
public void testNonPreBuildTokenFilter() throws IOException {
|
||||||
AnalyzeRequest request = new AnalyzeRequest();
|
AnalyzeRequest request = new AnalyzeRequest();
|
||||||
request.tokenizer("whitespace");
|
request.tokenizer("standard");
|
||||||
request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters()
|
request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters()
|
||||||
request.text("the quick brown fox");
|
request.text("the quick brown fox");
|
||||||
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount);
|
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount);
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.Sort;
|
||||||
import org.apache.lucene.search.SortField;
|
import org.apache.lucene.search.SortField;
|
||||||
import org.apache.lucene.search.SortedSetSelector;
|
import org.apache.lucene.search.SortedSetSelector;
|
||||||
import org.apache.lucene.search.SortedSetSortField;
|
import org.apache.lucene.search.SortedSetSortField;
|
||||||
|
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||||
|
@ -76,6 +77,7 @@ import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
|
|
||||||
|
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416")
|
||||||
public class ShrinkIndexIT extends ESIntegTestCase {
|
public class ShrinkIndexIT extends ESIntegTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -83,7 +85,6 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
||||||
return Arrays.asList(InternalSettingsPlugin.class);
|
return Arrays.asList(InternalSettingsPlugin.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416")
|
|
||||||
public void testCreateShrinkIndexToN() {
|
public void testCreateShrinkIndexToN() {
|
||||||
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
|
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
|
||||||
int[] shardSplits = randomFrom(possibleShardSplits);
|
int[] shardSplits = randomFrom(possibleShardSplits);
|
||||||
|
|
|
@ -31,12 +31,34 @@ import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.function.Consumer;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||||
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.hasToString;
|
||||||
|
|
||||||
public class ResizeRequestTests extends ESTestCase {
|
public class ResizeRequestTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testCopySettingsValidation() {
|
||||||
|
runTestCopySettingsValidation(false, r -> {
|
||||||
|
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, r::get);
|
||||||
|
assertThat(e, hasToString(containsString("[copySettings] can not be explicitly set to [false]")));
|
||||||
|
});
|
||||||
|
|
||||||
|
runTestCopySettingsValidation(null, r -> assertNull(r.get().getCopySettings()));
|
||||||
|
runTestCopySettingsValidation(true, r -> assertTrue(r.get().getCopySettings()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runTestCopySettingsValidation(final Boolean copySettings, final Consumer<Supplier<ResizeRequest>> consumer) {
|
||||||
|
consumer.accept(() -> {
|
||||||
|
final ResizeRequest request = new ResizeRequest();
|
||||||
|
request.setCopySettings(copySettings);
|
||||||
|
return request;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
public void testToXContent() throws IOException {
|
public void testToXContent() throws IOException {
|
||||||
{
|
{
|
||||||
ResizeRequest request = new ResizeRequest("target", "source");
|
ResizeRequest request = new ResizeRequest("target", "source");
|
||||||
|
|
|
@ -188,7 +188,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||||
.addAlias(new Alias("alias"))
|
.addAlias(new Alias("alias"))
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put(indexSettings())
|
.put(indexSettings())
|
||||||
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
client().prepareIndex("test", "type1", Integer.toString(i))
|
client().prepareIndex("test", "type1", Integer.toString(i))
|
||||||
|
@ -260,7 +260,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||||
.endObject().endObject();
|
.endObject().endObject();
|
||||||
assertAcked(prepareCreate("test").addMapping("type1", mapping)
|
assertAcked(prepareCreate("test").addMapping("type1", mapping)
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||||
for (int i = 0; i < 10; i++) {
|
for (int i = 0; i < 10; i++) {
|
||||||
client().prepareIndex("test", "type1", Integer.toString(i))
|
client().prepareIndex("test", "type1", Integer.toString(i))
|
||||||
|
@ -394,7 +394,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
||||||
.addMapping("type1", mapping)
|
.addMapping("type1", mapping)
|
||||||
.setSettings(Settings.builder()
|
.setSettings(Settings.builder()
|
||||||
.put(indexSettings())
|
.put(indexSettings())
|
||||||
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.action.termvectors;
|
package org.elasticsearch.action.termvectors;
|
||||||
|
|
||||||
|
import org.apache.lucene.analysis.MockTokenizer;
|
||||||
import org.apache.lucene.analysis.TokenFilter;
|
import org.apache.lucene.analysis.TokenFilter;
|
||||||
import org.apache.lucene.analysis.TokenStream;
|
import org.apache.lucene.analysis.TokenStream;
|
||||||
import org.apache.lucene.analysis.payloads.FloatEncoder;
|
import org.apache.lucene.analysis.payloads.FloatEncoder;
|
||||||
|
@ -35,6 +36,7 @@ import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.index.analysis.PreConfiguredTokenizer;
|
||||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||||
|
@ -93,6 +95,12 @@ public class GetTermVectorsTests extends ESSingleNodeTestCase {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
|
||||||
|
return Collections.singletonList(PreConfiguredTokenizer.singleton("mock-whitespace",
|
||||||
|
() -> new MockTokenizer(MockTokenizer.WHITESPACE, false), null));
|
||||||
|
}
|
||||||
|
|
||||||
// Based on DelimitedPayloadTokenFilter:
|
// Based on DelimitedPayloadTokenFilter:
|
||||||
final class MockPayloadTokenFilter extends TokenFilter {
|
final class MockPayloadTokenFilter extends TokenFilter {
|
||||||
private final char delimiter;
|
private final char delimiter;
|
||||||
|
@ -151,7 +159,7 @@ public class GetTermVectorsTests extends ESSingleNodeTestCase {
|
||||||
.startObject("field").field("type", "text").field("term_vector", "with_positions_offsets_payloads")
|
.startObject("field").field("type", "text").field("term_vector", "with_positions_offsets_payloads")
|
||||||
.field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
|
.field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
|
||||||
Settings setting = Settings.builder()
|
Settings setting = Settings.builder()
|
||||||
.put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.payload_test.tokenizer", "mock-whitespace")
|
||||||
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload")
|
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload")
|
||||||
.put("index.analysis.filter.my_delimited_payload.delimiter", delimiter)
|
.put("index.analysis.filter.my_delimited_payload.delimiter", delimiter)
|
||||||
.put("index.analysis.filter.my_delimited_payload.encoding", encodingString)
|
.put("index.analysis.filter.my_delimited_payload.encoding", encodingString)
|
||||||
|
|
|
@ -35,10 +35,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
import static org.hamcrest.Matchers.notNullValue;
|
|
||||||
import static org.hamcrest.Matchers.startsWith;
|
import static org.hamcrest.Matchers.startsWith;
|
||||||
|
|
||||||
|
|
||||||
public class AnalyzeActionIT extends ESIntegTestCase {
|
public class AnalyzeActionIT extends ESIntegTestCase {
|
||||||
public void testSimpleAnalyzerTests() throws Exception {
|
public void testSimpleAnalyzerTests() throws Exception {
|
||||||
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
|
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
|
||||||
|
@ -333,14 +331,14 @@ public class AnalyzeActionIT extends ESIntegTestCase {
|
||||||
AnalyzeResponse analyzeResponse = client().admin().indices()
|
AnalyzeResponse analyzeResponse = client().admin().indices()
|
||||||
.prepareAnalyze()
|
.prepareAnalyze()
|
||||||
.setText("Foo buzz test")
|
.setText("Foo buzz test")
|
||||||
.setTokenizer("whitespace")
|
.setTokenizer("standard")
|
||||||
.addTokenFilter("lowercase")
|
.addTokenFilter("lowercase")
|
||||||
.addTokenFilter(stopFilterSettings)
|
.addTokenFilter(stopFilterSettings)
|
||||||
.setExplain(true)
|
.setExplain(true)
|
||||||
.get();
|
.get();
|
||||||
|
|
||||||
//tokenizer
|
//tokenizer
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("whitespace"));
|
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("standard"));
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3));
|
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3));
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("Foo"));
|
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("Foo"));
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
|
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
|
||||||
|
@ -393,41 +391,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
|
||||||
assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens()[0].getPositionLength(), equalTo(1));
|
assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens()[0].getPositionLength(), equalTo(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testCustomTokenizerInRequest() throws Exception {
|
|
||||||
Map<String, Object> tokenizerSettings = new HashMap<>();
|
|
||||||
tokenizerSettings.put("type", "nGram");
|
|
||||||
tokenizerSettings.put("min_gram", 2);
|
|
||||||
tokenizerSettings.put("max_gram", 2);
|
|
||||||
|
|
||||||
AnalyzeResponse analyzeResponse = client().admin().indices()
|
|
||||||
.prepareAnalyze()
|
|
||||||
.setText("good")
|
|
||||||
.setTokenizer(tokenizerSettings)
|
|
||||||
.setExplain(true)
|
|
||||||
.get();
|
|
||||||
|
|
||||||
//tokenizer
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("_anonymous_tokenizer"));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("go"));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getEndOffset(), equalTo(2));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPosition(), equalTo(0));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPositionLength(), equalTo(1));
|
|
||||||
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getTerm(), equalTo("oo"));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getStartOffset(), equalTo(1));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getEndOffset(), equalTo(3));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getPosition(), equalTo(1));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getPositionLength(), equalTo(1));
|
|
||||||
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getTerm(), equalTo("od"));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getStartOffset(), equalTo(2));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getEndOffset(), equalTo(4));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getPosition(), equalTo(2));
|
|
||||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getPositionLength(), equalTo(1));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testAnalyzeKeywordField() throws IOException {
|
public void testAnalyzeKeywordField() throws IOException {
|
||||||
assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "keyword", "type=keyword"));
|
assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "keyword", "type=keyword"));
|
||||||
ensureGreen("test");
|
ensureGreen("test");
|
||||||
|
|
|
@ -254,8 +254,7 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
result.totalShards(), result.failed(), result.failureReason(), detail);
|
result.totalShards(), result.failed(), result.failureReason(), detail);
|
||||||
}
|
}
|
||||||
|
|
||||||
@TestLogging("_root:DEBUG")
|
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
|
||||||
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
||||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||||
|
@ -297,8 +296,7 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@TestLogging("_root:DEBUG")
|
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
|
||||||
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
||||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||||
|
|
|
@ -677,7 +677,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
||||||
" \"analysis\" : {\n" +
|
" \"analysis\" : {\n" +
|
||||||
" \"analyzer\" : {\n" +
|
" \"analyzer\" : {\n" +
|
||||||
" \"custom_1\" : {\n" +
|
" \"custom_1\" : {\n" +
|
||||||
" \"tokenizer\" : \"whitespace\"\n" +
|
" \"tokenizer\" : \"standard\"\n" +
|
||||||
" }\n" +
|
" }\n" +
|
||||||
" }\n" +
|
" }\n" +
|
||||||
" }\n" +
|
" }\n" +
|
||||||
|
|
|
@ -20,15 +20,20 @@
|
||||||
package org.elasticsearch.rest.action.admin.indices;
|
package org.elasticsearch.rest.action.admin.indices;
|
||||||
|
|
||||||
import org.elasticsearch.client.node.NodeClient;
|
import org.elasticsearch.client.node.NodeClient;
|
||||||
|
import org.elasticsearch.common.Booleans;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
import org.elasticsearch.rest.RestController;
|
import org.elasticsearch.rest.RestController;
|
||||||
|
import org.elasticsearch.rest.RestHandler;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
import java.util.Locale;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.hasToString;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
public class RestResizeHandlerTests extends ESTestCase {
|
public class RestResizeHandlerTests extends ESTestCase {
|
||||||
|
@ -36,27 +41,41 @@ public class RestResizeHandlerTests extends ESTestCase {
|
||||||
public void testShrinkCopySettingsDeprecated() throws IOException {
|
public void testShrinkCopySettingsDeprecated() throws IOException {
|
||||||
final RestResizeHandler.RestShrinkIndexAction handler =
|
final RestResizeHandler.RestShrinkIndexAction handler =
|
||||||
new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class));
|
new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class));
|
||||||
final String copySettings = randomFrom("true", "false");
|
for (final String copySettings : new String[]{null, "", "true", "false"}) {
|
||||||
final FakeRestRequest request =
|
runTestResizeCopySettingsDeprecated(handler, "shrink", copySettings);
|
||||||
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
|
}
|
||||||
.withParams(Collections.singletonMap("copy_settings", copySettings))
|
|
||||||
.withPath("source/_shrink/target")
|
|
||||||
.build();
|
|
||||||
handler.prepareRequest(request, mock(NodeClient.class));
|
|
||||||
assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSplitCopySettingsDeprecated() throws IOException {
|
public void testSplitCopySettingsDeprecated() throws IOException {
|
||||||
final RestResizeHandler.RestSplitIndexAction handler =
|
final RestResizeHandler.RestSplitIndexAction handler =
|
||||||
new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class));
|
new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class));
|
||||||
final String copySettings = randomFrom("true", "false");
|
for (final String copySettings : new String[]{null, "", "true", "false"}) {
|
||||||
final FakeRestRequest request =
|
runTestResizeCopySettingsDeprecated(handler, "split", copySettings);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void runTestResizeCopySettingsDeprecated(
|
||||||
|
final RestResizeHandler handler, final String resizeOperation, final String copySettings) throws IOException {
|
||||||
|
final FakeRestRequest.Builder builder =
|
||||||
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
|
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
|
||||||
.withParams(Collections.singletonMap("copy_settings", copySettings))
|
.withParams(Collections.singletonMap("copy_settings", copySettings))
|
||||||
.withPath("source/_split/target")
|
.withPath(String.format(Locale.ROOT, "source/_%s/target", resizeOperation));
|
||||||
.build();
|
if (copySettings != null) {
|
||||||
handler.prepareRequest(request, mock(NodeClient.class));
|
builder.withParams(Collections.singletonMap("copy_settings", copySettings));
|
||||||
assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]");
|
}
|
||||||
|
final FakeRestRequest request = builder.build();
|
||||||
|
if ("false".equals(copySettings)) {
|
||||||
|
final IllegalArgumentException e =
|
||||||
|
expectThrows(IllegalArgumentException.class, () -> handler.prepareRequest(request, mock(NodeClient.class)));
|
||||||
|
assertThat(e, hasToString(containsString("parameter [copy_settings] can not be explicitly set to [false]")));
|
||||||
|
} else {
|
||||||
|
handler.prepareRequest(request, mock(NodeClient.class));
|
||||||
|
if (copySettings == null) {
|
||||||
|
assertWarnings(
|
||||||
|
"resize operations without copying settings is deprecated; "
|
||||||
|
+ "set parameter [copy_settings] to [true] for future default behavior");
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1359,7 +1359,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
||||||
public void testPhrasePrefix() throws IOException {
|
public void testPhrasePrefix() throws IOException {
|
||||||
Builder builder = Settings.builder()
|
Builder builder = Settings.builder()
|
||||||
.put(indexSettings())
|
.put(indexSettings())
|
||||||
.put("index.analysis.analyzer.synonym.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.synonym.tokenizer", "standard")
|
||||||
.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
|
.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
|
||||||
.put("index.analysis.filter.synonym.type", "synonym")
|
.put("index.analysis.filter.synonym.type", "synonym")
|
||||||
.putList("index.analysis.filter.synonym.synonyms", "quick => fast");
|
.putList("index.analysis.filter.synonym.synonyms", "quick => fast");
|
||||||
|
@ -2804,7 +2804,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
||||||
public void testSynonyms() throws IOException {
|
public void testSynonyms() throws IOException {
|
||||||
Builder builder = Settings.builder()
|
Builder builder = Settings.builder()
|
||||||
.put(indexSettings())
|
.put(indexSettings())
|
||||||
.put("index.analysis.analyzer.synonym.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.synonym.tokenizer", "standard")
|
||||||
.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
|
.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
|
||||||
.put("index.analysis.filter.synonym.type", "synonym")
|
.put("index.analysis.filter.synonym.type", "synonym")
|
||||||
.putList("index.analysis.filter.synonym.synonyms", "fast,quick");
|
.putList("index.analysis.filter.synonym.synonyms", "fast,quick");
|
||||||
|
|
|
@ -156,7 +156,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
||||||
|
|
||||||
public void testMoreDocs() throws Exception {
|
public void testMoreDocs() throws Exception {
|
||||||
Builder builder = Settings.builder();
|
Builder builder = Settings.builder();
|
||||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
|
builder.put("index.analysis.analyzer.synonym.tokenizer", "standard");
|
||||||
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
||||||
builder.put("index.analysis.filter.synonym.type", "synonym");
|
builder.put("index.analysis.filter.synonym.type", "synonym");
|
||||||
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||||
|
@ -234,7 +234,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
||||||
// Tests a rescore window smaller than number of hits:
|
// Tests a rescore window smaller than number of hits:
|
||||||
public void testSmallRescoreWindow() throws Exception {
|
public void testSmallRescoreWindow() throws Exception {
|
||||||
Builder builder = Settings.builder();
|
Builder builder = Settings.builder();
|
||||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
|
builder.put("index.analysis.analyzer.synonym.tokenizer", "standard");
|
||||||
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
||||||
builder.put("index.analysis.filter.synonym.type", "synonym");
|
builder.put("index.analysis.filter.synonym.type", "synonym");
|
||||||
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||||
|
@ -306,7 +306,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
||||||
// Tests a rescorer that penalizes the scores:
|
// Tests a rescorer that penalizes the scores:
|
||||||
public void testRescorerMadeScoresWorse() throws Exception {
|
public void testRescorerMadeScoresWorse() throws Exception {
|
||||||
Builder builder = Settings.builder();
|
Builder builder = Settings.builder();
|
||||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
|
builder.put("index.analysis.analyzer.synonym.tokenizer", "standard");
|
||||||
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
||||||
builder.put("index.analysis.filter.synonym.type", "synonym");
|
builder.put("index.analysis.filter.synonym.type", "synonym");
|
||||||
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||||
|
|
|
@ -82,7 +82,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
||||||
.put("index.analysis.analyzer.perfect_match.tokenizer", "keyword")
|
.put("index.analysis.analyzer.perfect_match.tokenizer", "keyword")
|
||||||
.put("index.analysis.analyzer.perfect_match.filter", "lowercase")
|
.put("index.analysis.analyzer.perfect_match.filter", "lowercase")
|
||||||
.put("index.analysis.analyzer.category.type", "custom")
|
.put("index.analysis.analyzer.category.type", "custom")
|
||||||
.put("index.analysis.analyzer.category.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.category.tokenizer", "standard")
|
||||||
.put("index.analysis.analyzer.category.filter", "lowercase")
|
.put("index.analysis.analyzer.category.filter", "lowercase")
|
||||||
);
|
);
|
||||||
assertAcked(builder.addMapping("test", createMapping()));
|
assertAcked(builder.addMapping("test", createMapping()));
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.search.query;
|
package org.elasticsearch.search.query;
|
||||||
|
|
||||||
import org.apache.lucene.util.English;
|
import org.apache.lucene.util.English;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||||
|
@ -30,7 +29,6 @@ import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.index.IndexSettings;
|
|
||||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||||
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
|
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
|
||||||
|
@ -351,7 +349,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
||||||
.put(SETTING_NUMBER_OF_SHARDS,1)
|
.put(SETTING_NUMBER_OF_SHARDS,1)
|
||||||
.put("index.analysis.filter.syns.type","synonym")
|
.put("index.analysis.filter.syns.type","synonym")
|
||||||
.putList("index.analysis.filter.syns.synonyms","quick,fast")
|
.putList("index.analysis.filter.syns.synonyms","quick,fast")
|
||||||
.put("index.analysis.analyzer.syns.tokenizer","whitespace")
|
.put("index.analysis.analyzer.syns.tokenizer","standard")
|
||||||
.put("index.analysis.analyzer.syns.filter","syns")
|
.put("index.analysis.analyzer.syns.filter","syns")
|
||||||
)
|
)
|
||||||
.addMapping("type1", "field1", "type=text,analyzer=syns", "field2", "type=text,analyzer=syns"));
|
.addMapping("type1", "field1", "type=text,analyzer=syns", "field2", "type=text,analyzer=syns"));
|
||||||
|
@ -1764,56 +1762,6 @@ public class SearchQueryIT extends ESIntegTestCase {
|
||||||
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
|
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
|
||||||
}
|
}
|
||||||
|
|
||||||
// see #5120
|
|
||||||
public void testNGramCopyField() {
|
|
||||||
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder()
|
|
||||||
.put(indexSettings())
|
|
||||||
.put(IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey(), 9)
|
|
||||||
.put("index.analysis.analyzer.my_ngram_analyzer.type", "custom")
|
|
||||||
.put("index.analysis.analyzer.my_ngram_analyzer.tokenizer", "my_ngram_tokenizer")
|
|
||||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.type", "nGram")
|
|
||||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.min_gram", "1")
|
|
||||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.max_gram", "10")
|
|
||||||
.putList("index.analysis.tokenizer.my_ngram_tokenizer.token_chars", new String[0]));
|
|
||||||
assertAcked(builder.addMapping("test", "origin", "type=text,copy_to=meta", "meta", "type=text,analyzer=my_ngram_analyzer"));
|
|
||||||
// we only have ngrams as the index analyzer so searches will get standard analyzer
|
|
||||||
|
|
||||||
|
|
||||||
client().prepareIndex("test", "test", "1").setSource("origin", "C.A1234.5678")
|
|
||||||
.setRefreshPolicy(IMMEDIATE)
|
|
||||||
.get();
|
|
||||||
|
|
||||||
SearchResponse searchResponse = client().prepareSearch("test")
|
|
||||||
.setQuery(matchQuery("meta", "1234"))
|
|
||||||
.get();
|
|
||||||
assertHitCount(searchResponse, 1L);
|
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
|
||||||
.setQuery(matchQuery("meta", "1234.56"))
|
|
||||||
.get();
|
|
||||||
assertHitCount(searchResponse, 1L);
|
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
|
||||||
.setQuery(termQuery("meta", "A1234"))
|
|
||||||
.get();
|
|
||||||
assertHitCount(searchResponse, 1L);
|
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
|
||||||
.setQuery(termQuery("meta", "a1234"))
|
|
||||||
.get();
|
|
||||||
assertHitCount(searchResponse, 0L); // it's upper case
|
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
|
||||||
.setQuery(matchQuery("meta", "A1234").analyzer("my_ngram_analyzer"))
|
|
||||||
.get(); // force ngram analyzer
|
|
||||||
assertHitCount(searchResponse, 1L);
|
|
||||||
|
|
||||||
searchResponse = client().prepareSearch("test")
|
|
||||||
.setQuery(matchQuery("meta", "a1234").analyzer("my_ngram_analyzer"))
|
|
||||||
.get(); // this one returns a hit since it's default operator is OR
|
|
||||||
assertHitCount(searchResponse, 1L);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedException {
|
public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedException {
|
||||||
createIndex("test1");
|
createIndex("test1");
|
||||||
indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "Johnnie Walker Black Label"),
|
indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "Johnnie Walker Black Label"),
|
||||||
|
|
|
@ -427,7 +427,7 @@ public class SuggestSearchIT extends ESIntegTestCase {
|
||||||
public void testStopwordsOnlyPhraseSuggest() throws IOException {
|
public void testStopwordsOnlyPhraseSuggest() throws IOException {
|
||||||
assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=text,analyzer=stopwd").setSettings(
|
assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=text,analyzer=stopwd").setSettings(
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put("index.analysis.analyzer.stopwd.tokenizer", "whitespace")
|
.put("index.analysis.analyzer.stopwd.tokenizer", "standard")
|
||||||
.putList("index.analysis.analyzer.stopwd.filter", "stop")
|
.putList("index.analysis.analyzer.stopwd.filter", "stop")
|
||||||
));
|
));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
|
@ -3094,7 +3094,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||||
assertEquals("IndexShardSnapshotFailedException[Aborted]", snapshotInfo.shardFailures().get(0).reason());
|
assertEquals("IndexShardSnapshotFailedException[Aborted]", snapshotInfo.shardFailures().get(0).reason());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30507")
|
|
||||||
public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception {
|
public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception {
|
||||||
logger.info("--> creating repository");
|
logger.info("--> creating repository");
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
|
|
|
@ -22,18 +22,10 @@ package org.elasticsearch.indices.analysis;
|
||||||
import org.apache.lucene.analysis.util.CharFilterFactory;
|
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||||
import org.elasticsearch.Version;
|
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
import org.elasticsearch.index.analysis.ClassicTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.HunspellTokenFilterFactory;
|
import org.elasticsearch.index.analysis.HunspellTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
||||||
import org.elasticsearch.index.analysis.LetterTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||||
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.PatternTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||||
import org.elasticsearch.index.analysis.PreConfiguredTokenizer;
|
import org.elasticsearch.index.analysis.PreConfiguredTokenizer;
|
||||||
|
@ -43,9 +35,6 @@ import org.elasticsearch.index.analysis.StandardTokenizerFactory;
|
||||||
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory;
|
import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
||||||
import org.elasticsearch.index.analysis.ThaiTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory;
|
|
||||||
import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
|
|
||||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
@ -88,20 +77,20 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
|
|
||||||
static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
|
static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
|
||||||
// exposed in ES
|
// exposed in ES
|
||||||
.put("classic", ClassicTokenizerFactory.class)
|
.put("classic", MovedToAnalysisCommon.class)
|
||||||
.put("edgengram", EdgeNGramTokenizerFactory.class)
|
.put("edgengram", MovedToAnalysisCommon.class)
|
||||||
.put("keyword", KeywordTokenizerFactory.class)
|
.put("keyword", KeywordTokenizerFactory.class)
|
||||||
.put("letter", LetterTokenizerFactory.class)
|
.put("letter", MovedToAnalysisCommon.class)
|
||||||
.put("lowercase", LowerCaseTokenizerFactory.class)
|
.put("lowercase", MovedToAnalysisCommon.class)
|
||||||
.put("ngram", NGramTokenizerFactory.class)
|
.put("ngram", MovedToAnalysisCommon.class)
|
||||||
.put("pathhierarchy", PathHierarchyTokenizerFactory.class)
|
.put("pathhierarchy", MovedToAnalysisCommon.class)
|
||||||
.put("pattern", PatternTokenizerFactory.class)
|
.put("pattern", MovedToAnalysisCommon.class)
|
||||||
.put("simplepattern", MovedToAnalysisCommon.class)
|
.put("simplepattern", MovedToAnalysisCommon.class)
|
||||||
.put("simplepatternsplit", MovedToAnalysisCommon.class)
|
.put("simplepatternsplit", MovedToAnalysisCommon.class)
|
||||||
.put("standard", StandardTokenizerFactory.class)
|
.put("standard", StandardTokenizerFactory.class)
|
||||||
.put("thai", ThaiTokenizerFactory.class)
|
.put("thai", MovedToAnalysisCommon.class)
|
||||||
.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class)
|
.put("uax29urlemail", MovedToAnalysisCommon.class)
|
||||||
.put("whitespace", WhitespaceTokenizerFactory.class)
|
.put("whitespace", MovedToAnalysisCommon.class)
|
||||||
|
|
||||||
// this one "seems to mess up offsets". probably shouldn't be a tokenizer...
|
// this one "seems to mess up offsets". probably shouldn't be a tokenizer...
|
||||||
.put("wikipedia", Void.class)
|
.put("wikipedia", Void.class)
|
||||||
|
@ -292,23 +281,8 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase {
|
||||||
Map<String, Class<?>> tokenizers = new HashMap<>();
|
Map<String, Class<?>> tokenizers = new HashMap<>();
|
||||||
// TODO drop this temporary shim when all the old style tokenizers have been migrated to new style
|
// TODO drop this temporary shim when all the old style tokenizers have been migrated to new style
|
||||||
for (PreBuiltTokenizers tokenizer : PreBuiltTokenizers.values()) {
|
for (PreBuiltTokenizers tokenizer : PreBuiltTokenizers.values()) {
|
||||||
final Class<?> luceneFactoryClazz;
|
tokenizers.put(tokenizer.name().toLowerCase(Locale.ROOT), null);
|
||||||
switch (tokenizer) {
|
|
||||||
case UAX_URL_EMAIL:
|
|
||||||
luceneFactoryClazz = org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class;
|
|
||||||
break;
|
|
||||||
case PATH_HIERARCHY:
|
|
||||||
luceneFactoryClazz = Void.class;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
luceneFactoryClazz = null;
|
|
||||||
}
|
|
||||||
tokenizers.put(tokenizer.name().toLowerCase(Locale.ROOT), luceneFactoryClazz);
|
|
||||||
}
|
}
|
||||||
// TODO drop aliases once they are moved to module
|
|
||||||
tokenizers.put("nGram", tokenizers.get("ngram"));
|
|
||||||
tokenizers.put("edgeNGram", tokenizers.get("edge_ngram"));
|
|
||||||
tokenizers.put("PathHierarchy", tokenizers.get("path_hierarchy"));
|
|
||||||
return tokenizers;
|
return tokenizers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,10 +91,10 @@ public class StateProcessor extends AbstractComponent {
|
||||||
}
|
}
|
||||||
|
|
||||||
void persist(String jobId, BytesReference bytes) throws IOException {
|
void persist(String jobId, BytesReference bytes) throws IOException {
|
||||||
logger.trace("[{}] ES API CALL: bulk index", jobId);
|
|
||||||
BulkRequest bulkRequest = new BulkRequest();
|
BulkRequest bulkRequest = new BulkRequest();
|
||||||
bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON);
|
bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON);
|
||||||
if (bulkRequest.numberOfActions() > 0) {
|
if (bulkRequest.numberOfActions() > 0) {
|
||||||
|
logger.trace("[{}] Persisting job state document", jobId);
|
||||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
|
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
|
||||||
client.bulk(bulkRequest).actionGet();
|
client.bulk(bulkRequest).actionGet();
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
package org.elasticsearch.xpack.sql.plan.logical.command.sys;
|
package org.elasticsearch.xpack.sql.plan.logical.command.sys;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo;
|
||||||
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType;
|
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType;
|
||||||
import org.elasticsearch.xpack.sql.expression.Attribute;
|
import org.elasticsearch.xpack.sql.expression.Attribute;
|
||||||
import org.elasticsearch.xpack.sql.expression.regex.LikePattern;
|
import org.elasticsearch.xpack.sql.expression.regex.LikePattern;
|
||||||
|
@ -18,6 +19,7 @@ import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
||||||
import org.elasticsearch.xpack.sql.util.CollectionUtils;
|
import org.elasticsearch.xpack.sql.util.CollectionUtils;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Comparator;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
@ -93,6 +95,8 @@ public class SysTables extends Command {
|
||||||
enumeration[3] = type.toSql();
|
enumeration[3] = type.toSql();
|
||||||
values.add(asList(enumeration));
|
values.add(asList(enumeration));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
values.sort(Comparator.comparing(l -> l.get(3).toString()));
|
||||||
listener.onResponse(Rows.of(output(), values));
|
listener.onResponse(Rows.of(output(), values));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -112,6 +116,9 @@ public class SysTables extends Command {
|
||||||
|
|
||||||
session.indexResolver().resolveNames(index, regex, types, ActionListener.wrap(result -> listener.onResponse(
|
session.indexResolver().resolveNames(index, regex, types, ActionListener.wrap(result -> listener.onResponse(
|
||||||
Rows.of(output(), result.stream()
|
Rows.of(output(), result.stream()
|
||||||
|
// sort by type (which might be legacy), then by name
|
||||||
|
.sorted(Comparator.<IndexInfo, String> comparing(i -> legacyName(i.type()))
|
||||||
|
.thenComparing(Comparator.comparing(i -> i.name())))
|
||||||
.map(t -> asList(cluster,
|
.map(t -> asList(cluster,
|
||||||
EMPTY,
|
EMPTY,
|
||||||
t.name(),
|
t.name(),
|
||||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.xpack.sql.type.DataTypes;
|
||||||
import org.elasticsearch.xpack.sql.type.EsField;
|
import org.elasticsearch.xpack.sql.type.EsField;
|
||||||
import org.elasticsearch.xpack.sql.type.TypesTests;
|
import org.elasticsearch.xpack.sql.type.TypesTests;
|
||||||
|
|
||||||
|
import java.util.Comparator;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -57,30 +58,30 @@ public class SysTablesTests extends ESTestCase {
|
||||||
|
|
||||||
public void testSysTablesNoTypes() throws Exception {
|
public void testSysTablesNoTypes() throws Exception {
|
||||||
executeCommand("SYS TABLES", r -> {
|
executeCommand("SYS TABLES", r -> {
|
||||||
|
assertEquals("alias", r.column(2));
|
||||||
|
assertTrue(r.advanceRow());
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
assertEquals("test", r.column(2));
|
assertEquals("test", r.column(2));
|
||||||
assertTrue(r.advanceRow());
|
|
||||||
assertEquals("alias", r.column(2));
|
|
||||||
}, index, alias);
|
}, index, alias);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSysTablesPattern() throws Exception {
|
public void testSysTablesPattern() throws Exception {
|
||||||
executeCommand("SYS TABLES LIKE '%'", r -> {
|
executeCommand("SYS TABLES LIKE '%'", r -> {
|
||||||
|
assertEquals("alias", r.column(2));
|
||||||
|
assertTrue(r.advanceRow());
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
assertEquals("test", r.column(2));
|
assertEquals("test", r.column(2));
|
||||||
assertTrue(r.advanceRow());
|
|
||||||
assertEquals("alias", r.column(2));
|
|
||||||
}, index, alias);
|
}, index, alias);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSysTablesPatternParameterized() throws Exception {
|
public void testSysTablesPatternParameterized() throws Exception {
|
||||||
List<SqlTypedParamValue> params = asList(param("%"));
|
List<SqlTypedParamValue> params = asList(param("%"));
|
||||||
executeCommand("SYS TABLES LIKE ?", params, r -> {
|
executeCommand("SYS TABLES LIKE ?", params, r -> {
|
||||||
|
assertEquals("alias", r.column(2));
|
||||||
|
assertTrue(r.advanceRow());
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
assertEquals("test", r.column(2));
|
assertEquals("test", r.column(2));
|
||||||
assertTrue(r.advanceRow());
|
}, alias, index);
|
||||||
assertEquals("alias", r.column(2));
|
|
||||||
}, index, alias);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSysTablesOnlyAliases() throws Exception {
|
public void testSysTablesOnlyAliases() throws Exception {
|
||||||
|
@ -131,32 +132,32 @@ public class SysTablesTests extends ESTestCase {
|
||||||
|
|
||||||
public void testSysTablesOnlyIndicesAndAliases() throws Exception {
|
public void testSysTablesOnlyIndicesAndAliases() throws Exception {
|
||||||
executeCommand("SYS TABLES LIKE 'test' TYPE 'ALIAS', 'BASE TABLE'", r -> {
|
executeCommand("SYS TABLES LIKE 'test' TYPE 'ALIAS', 'BASE TABLE'", r -> {
|
||||||
|
assertEquals("alias", r.column(2));
|
||||||
|
assertTrue(r.advanceRow());
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
assertEquals("test", r.column(2));
|
assertEquals("test", r.column(2));
|
||||||
assertTrue(r.advanceRow());
|
|
||||||
assertEquals("alias", r.column(2));
|
|
||||||
}, index, alias);
|
}, index, alias);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSysTablesOnlyIndicesAndAliasesParameterized() throws Exception {
|
public void testSysTablesOnlyIndicesAndAliasesParameterized() throws Exception {
|
||||||
List<SqlTypedParamValue> params = asList(param("ALIAS"), param("BASE TABLE"));
|
List<SqlTypedParamValue> params = asList(param("ALIAS"), param("BASE TABLE"));
|
||||||
executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> {
|
executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> {
|
||||||
|
assertEquals("alias", r.column(2));
|
||||||
|
assertTrue(r.advanceRow());
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
assertEquals("test", r.column(2));
|
assertEquals("test", r.column(2));
|
||||||
assertTrue(r.advanceRow());
|
|
||||||
assertEquals("alias", r.column(2));
|
|
||||||
}, index, alias);
|
}, index, alias);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSysTablesOnlyIndicesLegacyAndAliasesParameterized() throws Exception {
|
public void testSysTablesOnlyIndicesLegacyAndAliasesParameterized() throws Exception {
|
||||||
List<SqlTypedParamValue> params = asList(param("ALIAS"), param("TABLE"));
|
List<SqlTypedParamValue> params = asList(param("ALIAS"), param("TABLE"));
|
||||||
executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> {
|
executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> {
|
||||||
|
assertEquals("alias", r.column(2));
|
||||||
|
assertEquals("ALIAS", r.column(3));
|
||||||
|
assertTrue(r.advanceRow());
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
assertEquals("test", r.column(2));
|
assertEquals("test", r.column(2));
|
||||||
assertEquals("TABLE", r.column(3));
|
assertEquals("TABLE", r.column(3));
|
||||||
assertTrue(r.advanceRow());
|
|
||||||
assertEquals("alias", r.column(2));
|
|
||||||
assertEquals("ALIAS", r.column(3));
|
|
||||||
}, index, alias);
|
}, index, alias);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +189,7 @@ public class SysTablesTests extends ESTestCase {
|
||||||
executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> {
|
executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> {
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
|
|
||||||
Iterator<IndexType> it = IndexType.VALID.iterator();
|
Iterator<IndexType> it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator();
|
||||||
|
|
||||||
for (int t = 0; t < r.size(); t++) {
|
for (int t = 0; t < r.size(); t++) {
|
||||||
assertEquals(it.next().toSql(), r.column(3));
|
assertEquals(it.next().toSql(), r.column(3));
|
||||||
|
@ -209,7 +210,7 @@ public class SysTablesTests extends ESTestCase {
|
||||||
executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> {
|
executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> {
|
||||||
assertEquals(2, r.size());
|
assertEquals(2, r.size());
|
||||||
|
|
||||||
Iterator<IndexType> it = IndexType.VALID.iterator();
|
Iterator<IndexType> it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator();
|
||||||
|
|
||||||
for (int t = 0; t < r.size(); t++) {
|
for (int t = 0; t < r.size(); t++) {
|
||||||
assertEquals(it.next().toSql(), r.column(3));
|
assertEquals(it.next().toSql(), r.column(3));
|
||||||
|
|
|
@ -141,7 +141,7 @@ subprojects {
|
||||||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||||
dependsOn copyTestNodeKeystore
|
dependsOn copyTestNodeKeystore
|
||||||
if (version.before('6.3.0')) {
|
if (version.before('6.3.0')) {
|
||||||
plugin xpackProject('plugin').path
|
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
|
||||||
}
|
}
|
||||||
bwcVersion = version
|
bwcVersion = version
|
||||||
numBwcNodes = 2
|
numBwcNodes = 2
|
||||||
|
|
|
@ -82,7 +82,7 @@ for (Version version : bwcVersions.wireCompatible) {
|
||||||
|
|
||||||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||||
if (version.before('6.3.0')) {
|
if (version.before('6.3.0')) {
|
||||||
plugin xpackProject('plugin').path
|
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
|
||||||
}
|
}
|
||||||
bwcVersion = version
|
bwcVersion = version
|
||||||
numBwcNodes = 2
|
numBwcNodes = 2
|
||||||
|
|
|
@ -123,7 +123,7 @@ subprojects {
|
||||||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||||
dependsOn copyTestNodeKeystore
|
dependsOn copyTestNodeKeystore
|
||||||
if (version.before('6.3.0')) {
|
if (version.before('6.3.0')) {
|
||||||
plugin xpackProject('plugin').path
|
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
|
||||||
}
|
}
|
||||||
String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users'
|
String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users'
|
||||||
setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
|
setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
|
||||||
|
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.qa.sql.multinode;
|
||||||
import org.apache.http.HttpHost;
|
import org.apache.http.HttpHost;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.Response;
|
import org.elasticsearch.client.Response;
|
||||||
import org.elasticsearch.client.RestClient;
|
import org.elasticsearch.client.RestClient;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
@ -53,7 +54,7 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
||||||
String firstHostName = null;
|
String firstHostName = null;
|
||||||
|
|
||||||
String match = firstHost.getHostName() + ":" + firstHost.getPort();
|
String match = firstHost.getHostName() + ":" + firstHost.getPort();
|
||||||
Map<String, Object> nodesInfo = responseToMap(client().performRequest("GET", "/_nodes"));
|
Map<String, Object> nodesInfo = responseToMap(client().performRequest(new Request("GET", "/_nodes")));
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
Map<String, Object> nodes = (Map<String, Object>) nodesInfo.get("nodes");
|
Map<String, Object> nodes = (Map<String, Object>) nodesInfo.get("nodes");
|
||||||
for (Map.Entry<String, Object> node : nodes.entrySet()) {
|
for (Map.Entry<String, Object> node : nodes.entrySet()) {
|
||||||
|
@ -74,7 +75,9 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
index.endObject();
|
index.endObject();
|
||||||
index.endObject();
|
index.endObject();
|
||||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity(Strings.toString(index), ContentType.APPLICATION_JSON));
|
Request request = new Request("PUT", "/test");
|
||||||
|
request.setJsonEntity(Strings.toString(index));
|
||||||
|
client().performRequest(request);
|
||||||
int documents = between(10, 100);
|
int documents = between(10, 100);
|
||||||
createTestData(documents);
|
createTestData(documents);
|
||||||
|
|
||||||
|
@ -84,6 +87,9 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createTestData(int documents) throws UnsupportedCharsetException, IOException {
|
private void createTestData(int documents) throws UnsupportedCharsetException, IOException {
|
||||||
|
Request request = new Request("PUT", "/test/test/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
|
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
for (int i = 0; i < documents; i++) {
|
for (int i = 0; i < documents; i++) {
|
||||||
int a = 3 * i;
|
int a = 3 * i;
|
||||||
|
@ -92,8 +98,9 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
||||||
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n");
|
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n");
|
||||||
bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n");
|
bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n");
|
||||||
}
|
}
|
||||||
client().performRequest("PUT", "/test/test/_bulk", singletonMap("refresh", "true"),
|
request.setJsonEntity(bulk.toString());
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
client().performRequest(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Map<String, Object> responseToMap(Response response) throws IOException {
|
private Map<String, Object> responseToMap(Response response) throws IOException {
|
||||||
|
@ -108,14 +115,12 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
||||||
expected.put("columns", singletonList(columnInfo(mode, "COUNT(1)", "long", JDBCType.BIGINT, 20)));
|
expected.put("columns", singletonList(columnInfo(mode, "COUNT(1)", "long", JDBCType.BIGINT, 20)));
|
||||||
expected.put("rows", singletonList(singletonList(count)));
|
expected.put("rows", singletonList(singletonList(count)));
|
||||||
|
|
||||||
Map<String, String> params = new TreeMap<>();
|
Request request = new Request("POST", "/_xpack/sql");
|
||||||
params.put("format", "json"); // JSON is easier to parse then a table
|
if (false == mode.isEmpty()) {
|
||||||
if (Strings.hasText(mode)) {
|
request.addParameter("mode", mode);
|
||||||
params.put("mode", mode); // JDBC or PLAIN mode
|
|
||||||
}
|
}
|
||||||
|
request.setJsonEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}");
|
||||||
Map<String, Object> actual = responseToMap(client.performRequest("POST", "/_xpack/sql", params,
|
Map<String, Object> actual = responseToMap(client.performRequest(request));
|
||||||
new StringEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON)));
|
|
||||||
|
|
||||||
if (false == expected.equals(actual)) {
|
if (false == expected.equals(actual)) {
|
||||||
NotEqualMessageBuilder message = new NotEqualMessageBuilder();
|
NotEqualMessageBuilder message = new NotEqualMessageBuilder();
|
||||||
|
|
|
@ -10,6 +10,7 @@ import org.apache.http.HttpEntity;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
import org.apache.http.message.BasicHeader;
|
import org.apache.http.message.BasicHeader;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.Response;
|
import org.elasticsearch.client.Response;
|
||||||
import org.elasticsearch.client.ResponseException;
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -176,14 +177,15 @@ public class RestSqlSecurityIT extends SqlSecurityTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Map<String, Object> runSql(@Nullable String asUser, String mode, HttpEntity entity) throws IOException {
|
private static Map<String, Object> runSql(@Nullable String asUser, String mode, HttpEntity entity) throws IOException {
|
||||||
Map<String, String> params = new TreeMap<>();
|
Request request = new Request("POST", "/_xpack/sql");
|
||||||
params.put("format", "json"); // JSON is easier to parse then a table
|
if (false == mode.isEmpty()) {
|
||||||
if (Strings.hasText(mode)) {
|
request.addParameter("mode", mode);
|
||||||
params.put("mode", mode); // JDBC or PLAIN mode
|
|
||||||
}
|
}
|
||||||
Header[] headers = asUser == null ? new Header[0] : new Header[] {new BasicHeader("es-security-runas-user", asUser)};
|
if (asUser != null) {
|
||||||
Response response = client().performRequest("POST", "/_xpack/sql", params, entity, headers);
|
request.setHeaders(new BasicHeader("es-security-runas-user", asUser));
|
||||||
return toMap(response);
|
}
|
||||||
|
request.setEntity(entity);
|
||||||
|
return toMap(client().performRequest(request));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void assertResponse(Map<String, Object> expected, Map<String, Object> actual) {
|
private static void assertResponse(Map<String, Object> expected, Map<String, Object> actual) {
|
||||||
|
|
|
@ -11,6 +11,7 @@ import org.apache.lucene.util.SuppressForbidden;
|
||||||
import org.elasticsearch.SpecialPermission;
|
import org.elasticsearch.SpecialPermission;
|
||||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.ResponseException;
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -41,7 +42,6 @@ import java.util.TreeMap;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import static java.util.Collections.emptyMap;
|
|
||||||
import static java.util.Collections.singletonMap;
|
import static java.util.Collections.singletonMap;
|
||||||
import static org.hamcrest.Matchers.contains;
|
import static org.hamcrest.Matchers.contains;
|
||||||
import static org.hamcrest.Matchers.empty;
|
import static org.hamcrest.Matchers.empty;
|
||||||
|
@ -135,6 +135,9 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
||||||
* write the test data once. */
|
* write the test data once. */
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Request request = new Request("PUT", "/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
|
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
|
bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
|
||||||
bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n");
|
bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n");
|
||||||
|
@ -142,8 +145,8 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
||||||
bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n");
|
bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n");
|
||||||
bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
|
bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
|
||||||
bulk.append("{\"a\": \"test\"}\n");
|
bulk.append("{\"a\": \"test\"}\n");
|
||||||
client().performRequest("PUT", "/_bulk", singletonMap("refresh", "true"),
|
request.setJsonEntity(bulk.toString());
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
client().performRequest(request);
|
||||||
oneTimeSetup = true;
|
oneTimeSetup = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -173,7 +176,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void wipeIndicesAfterTests() throws IOException {
|
public static void wipeIndicesAfterTests() throws IOException {
|
||||||
try {
|
try {
|
||||||
adminClient().performRequest("DELETE", "*");
|
adminClient().performRequest(new Request("DELETE", "*"));
|
||||||
} catch (ResponseException e) {
|
} catch (ResponseException e) {
|
||||||
// 404 here just means we had no indexes
|
// 404 here just means we had no indexes
|
||||||
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
|
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
|
||||||
|
@ -472,13 +475,15 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static void createUser(String name, String role) throws IOException {
|
protected static void createUser(String name, String role) throws IOException {
|
||||||
XContentBuilder user = JsonXContent.contentBuilder().prettyPrint().startObject(); {
|
Request request = new Request("PUT", "/_xpack/security/user/" + name);
|
||||||
|
XContentBuilder user = JsonXContent.contentBuilder().prettyPrint();
|
||||||
|
user.startObject(); {
|
||||||
user.field("password", "testpass");
|
user.field("password", "testpass");
|
||||||
user.field("roles", role);
|
user.field("roles", role);
|
||||||
}
|
}
|
||||||
user.endObject();
|
user.endObject();
|
||||||
client().performRequest("PUT", "/_xpack/security/user/" + name, emptyMap(),
|
request.setJsonEntity(Strings.toString(user));
|
||||||
new StringEntity(Strings.toString(user), ContentType.APPLICATION_JSON));
|
client().performRequest(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected AuditLogAsserter createAuditLogAsserter() {
|
protected AuditLogAsserter createAuditLogAsserter() {
|
||||||
|
|
|
@ -5,9 +5,9 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.qa.sql.cli;
|
package org.elasticsearch.xpack.qa.sql.cli;
|
||||||
|
|
||||||
import org.apache.http.HttpEntity;
|
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.common.CheckedConsumer;
|
import org.elasticsearch.common.CheckedConsumer;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
@ -19,7 +19,6 @@ import org.junit.Before;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import static java.util.Collections.singletonMap;
|
|
||||||
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
|
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
|
||||||
|
|
||||||
public abstract class CliIntegrationTestCase extends ESRestTestCase {
|
public abstract class CliIntegrationTestCase extends ESRestTestCase {
|
||||||
|
@ -60,11 +59,13 @@ public abstract class CliIntegrationTestCase extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
|
protected void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
|
||||||
|
Request request = new Request("PUT", "/" + index + "/doc/1");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
|
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
|
||||||
body.accept(builder);
|
body.accept(builder);
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
|
request.setJsonEntity(Strings.toString(builder));
|
||||||
client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc);
|
client().performRequest(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String command(String command) throws IOException {
|
public String command(String command) throws IOException {
|
||||||
|
|
|
@ -8,8 +8,7 @@ package org.elasticsearch.xpack.qa.sql.cli;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import static java.util.Collections.emptyMap;
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.startsWith;
|
import static org.hamcrest.Matchers.startsWith;
|
||||||
|
|
||||||
|
@ -41,7 +40,9 @@ public abstract class ErrorsTestCase extends CliIntegrationTestCase implements o
|
||||||
@Override
|
@Override
|
||||||
public void testSelectFromIndexWithoutTypes() throws Exception {
|
public void testSelectFromIndexWithoutTypes() throws Exception {
|
||||||
// Create an index without any types
|
// Create an index without any types
|
||||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON));
|
Request request = new Request("PUT", "/test");
|
||||||
|
request.setJsonEntity("{}");
|
||||||
|
client().performRequest(request);
|
||||||
|
|
||||||
assertFoundOneProblem(command("SELECT * FROM test"));
|
assertFoundOneProblem(command("SELECT * FROM test"));
|
||||||
assertEquals("line 1:15: [test] doesn't have any types so it is incompatible with sql" + END, readLine());
|
assertEquals("line 1:15: [test] doesn't have any types so it is incompatible with sql" + END, readLine());
|
||||||
|
|
|
@ -7,10 +7,10 @@ package org.elasticsearch.xpack.qa.sql.cli;
|
||||||
|
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import static java.util.Collections.singletonMap;
|
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -18,13 +18,16 @@ import static org.hamcrest.Matchers.containsString;
|
||||||
*/
|
*/
|
||||||
public abstract class FetchSizeTestCase extends CliIntegrationTestCase {
|
public abstract class FetchSizeTestCase extends CliIntegrationTestCase {
|
||||||
public void testSelect() throws IOException {
|
public void testSelect() throws IOException {
|
||||||
|
Request request = new Request("PUT", "/test/doc/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
bulk.append("{\"index\":{}}\n");
|
bulk.append("{\"index\":{}}\n");
|
||||||
bulk.append("{\"test_field\":" + i + "}\n");
|
bulk.append("{\"test_field\":" + i + "}\n");
|
||||||
}
|
}
|
||||||
client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"),
|
request.setJsonEntity(bulk.toString());
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
client().performRequest(request);
|
||||||
|
|
||||||
assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m4[0m", command("fetch size = 4"));
|
assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m4[0m", command("fetch size = 4"));
|
||||||
assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"",
|
assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"",
|
||||||
command("fetch separator = \" -- fetch sep -- \""));
|
command("fetch separator = \" -- fetch sep -- \""));
|
||||||
|
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.qa.sql.jdbc;
|
||||||
import org.apache.http.HttpHost;
|
import org.apache.http.HttpHost;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.RestClient;
|
import org.elasticsearch.client.RestClient;
|
||||||
import org.elasticsearch.common.CheckedBiConsumer;
|
import org.elasticsearch.common.CheckedBiConsumer;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
|
@ -55,6 +56,7 @@ public class DataLoader {
|
||||||
.endObject();
|
.endObject();
|
||||||
}
|
}
|
||||||
protected static void loadDatasetIntoEs(RestClient client, String index) throws Exception {
|
protected static void loadDatasetIntoEs(RestClient client, String index) throws Exception {
|
||||||
|
Request request = new Request("PUT", "/" + index);
|
||||||
XContentBuilder createIndex = JsonXContent.contentBuilder().startObject();
|
XContentBuilder createIndex = JsonXContent.contentBuilder().startObject();
|
||||||
createIndex.startObject("settings");
|
createIndex.startObject("settings");
|
||||||
{
|
{
|
||||||
|
@ -91,10 +93,8 @@ public class DataLoader {
|
||||||
createIndex.endObject();
|
createIndex.endObject();
|
||||||
}
|
}
|
||||||
createIndex.endObject().endObject();
|
createIndex.endObject().endObject();
|
||||||
|
request.setJsonEntity(Strings.toString(createIndex));
|
||||||
client.performRequest("PUT", "/" + index, emptyMap(), new StringEntity(Strings.toString(createIndex),
|
client.performRequest(request);
|
||||||
ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
|
|
||||||
Map<String, String> deps = new LinkedHashMap<>();
|
Map<String, String> deps = new LinkedHashMap<>();
|
||||||
csvToLines("departments", (titles, fields) -> deps.put(fields.get(0), fields.get(1)));
|
csvToLines("departments", (titles, fields) -> deps.put(fields.get(0), fields.get(1)));
|
||||||
|
@ -119,6 +119,8 @@ public class DataLoader {
|
||||||
list.add(dep);
|
list.add(dep);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
request = new Request("POST", "/" + index + "/emp/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
csvToLines("employees", (titles, fields) -> {
|
csvToLines("employees", (titles, fields) -> {
|
||||||
bulk.append("{\"index\":{}}\n");
|
bulk.append("{\"index\":{}}\n");
|
||||||
|
@ -149,14 +151,13 @@ public class DataLoader {
|
||||||
|
|
||||||
bulk.append("}\n");
|
bulk.append("}\n");
|
||||||
});
|
});
|
||||||
|
request.setJsonEntity(bulk.toString());
|
||||||
client.performRequest("POST", "/" + index + "/emp/_bulk", singletonMap("refresh", "true"),
|
client.performRequest(request);
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception {
|
protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception {
|
||||||
for (String index : indices) {
|
for (String index : indices) {
|
||||||
client.performRequest("POST", "/" + index + "/_alias/" + aliasName);
|
client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,7 @@ import java.sql.Connection;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import static java.util.Collections.emptyMap;
|
|
||||||
|
|
||||||
import static org.hamcrest.Matchers.startsWith;
|
import static org.hamcrest.Matchers.startsWith;
|
||||||
|
|
||||||
|
@ -37,7 +36,9 @@ public class ErrorsTestCase extends JdbcIntegrationTestCase implements org.elast
|
||||||
@Override
|
@Override
|
||||||
public void testSelectFromIndexWithoutTypes() throws Exception {
|
public void testSelectFromIndexWithoutTypes() throws Exception {
|
||||||
// Create an index without any types
|
// Create an index without any types
|
||||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON));
|
Request request = new Request("PUT", "/test");
|
||||||
|
request.setJsonEntity("{}");
|
||||||
|
client().performRequest(request);
|
||||||
|
|
||||||
try (Connection c = esJdbc()) {
|
try (Connection c = esJdbc()) {
|
||||||
SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery());
|
SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery());
|
||||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.qa.sql.jdbc;
|
||||||
|
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -15,7 +16,6 @@ import java.sql.ResultSet;
|
||||||
import java.sql.SQLException;
|
import java.sql.SQLException;
|
||||||
import java.sql.Statement;
|
import java.sql.Statement;
|
||||||
|
|
||||||
import static java.util.Collections.singletonMap;
|
|
||||||
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
|
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -25,13 +25,15 @@ import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearch
|
||||||
public class FetchSizeTestCase extends JdbcIntegrationTestCase {
|
public class FetchSizeTestCase extends JdbcIntegrationTestCase {
|
||||||
@Before
|
@Before
|
||||||
public void createTestIndex() throws IOException {
|
public void createTestIndex() throws IOException {
|
||||||
|
Request request = new Request("PUT", "/test/doc/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
bulk.append("{\"index\":{}}\n");
|
bulk.append("{\"index\":{}}\n");
|
||||||
bulk.append("{\"test_field\":" + i + "}\n");
|
bulk.append("{\"test_field\":" + i + "}\n");
|
||||||
}
|
}
|
||||||
client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"),
|
request.setJsonEntity(bulk.toString());
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
client().performRequest(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -9,6 +9,7 @@ import org.apache.http.HttpEntity;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
import org.apache.http.util.EntityUtils;
|
import org.apache.http.util.EntityUtils;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.common.CheckedConsumer;
|
import org.elasticsearch.common.CheckedConsumer;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
@ -85,16 +86,18 @@ public abstract class JdbcIntegrationTestCase extends ESRestTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
|
public static void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
|
||||||
|
Request request = new Request("PUT", "/" + index + "/doc/1");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
|
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
|
||||||
body.accept(builder);
|
body.accept(builder);
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
|
request.setJsonEntity(Strings.toString(builder));
|
||||||
client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc);
|
client().performRequest(request);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected String clusterName() {
|
protected String clusterName() {
|
||||||
try {
|
try {
|
||||||
String response = EntityUtils.toString(client().performRequest("GET", "/").getEntity());
|
String response = EntityUtils.toString(client().performRequest(new Request("GET", "/")).getEntity());
|
||||||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false).get("cluster_name").toString();
|
return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false).get("cluster_name").toString();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
package org.elasticsearch.xpack.qa.sql.jdbc;
|
package org.elasticsearch.xpack.qa.sql.jdbc;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.ResponseException;
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
import org.elasticsearch.common.SuppressForbidden;
|
||||||
|
@ -49,7 +50,7 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setupTestDataIfNeeded() throws Exception {
|
public void setupTestDataIfNeeded() throws Exception {
|
||||||
if (client().performRequest("HEAD", "/test_emp").getStatusLine().getStatusCode() == 404) {
|
if (client().performRequest(new Request("HEAD", "/test_emp")).getStatusLine().getStatusCode() == 404) {
|
||||||
DataLoader.loadDatasetIntoEs(client());
|
DataLoader.loadDatasetIntoEs(client());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,7 +63,7 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void wipeTestData() throws IOException {
|
public static void wipeTestData() throws IOException {
|
||||||
try {
|
try {
|
||||||
adminClient().performRequest("DELETE", "/*");
|
adminClient().performRequest(new Request("DELETE", "/*"));
|
||||||
} catch (ResponseException e) {
|
} catch (ResponseException e) {
|
||||||
// 404 here just means we had no indexes
|
// 404 here just means we had no indexes
|
||||||
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
|
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
|
||||||
|
|
|
@ -12,6 +12,7 @@ import org.apache.http.HttpEntity;
|
||||||
import org.apache.http.entity.ContentType;
|
import org.apache.http.entity.ContentType;
|
||||||
import org.apache.http.entity.StringEntity;
|
import org.apache.http.entity.StringEntity;
|
||||||
import org.apache.http.message.BasicHeader;
|
import org.apache.http.message.BasicHeader;
|
||||||
|
import org.elasticsearch.client.Request;
|
||||||
import org.elasticsearch.client.Response;
|
import org.elasticsearch.client.Response;
|
||||||
import org.elasticsearch.client.ResponseException;
|
import org.elasticsearch.client.ResponseException;
|
||||||
import org.elasticsearch.common.CheckedSupplier;
|
import org.elasticsearch.common.CheckedSupplier;
|
||||||
|
@ -74,16 +75,19 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNextPage() throws IOException {
|
public void testNextPage() throws IOException {
|
||||||
|
Request request = new Request("POST", "/test/test/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
String mode = randomMode();
|
String mode = randomMode();
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
for (int i = 0; i < 20; i++) {
|
for (int i = 0; i < 20; i++) {
|
||||||
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n");
|
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n");
|
||||||
bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n");
|
bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n");
|
||||||
}
|
}
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
request.setJsonEntity(bulk.toString());
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
client().performRequest(request);
|
||||||
|
|
||||||
String request = "{\"query\":\""
|
String sqlRequest =
|
||||||
|
"{\"query\":\""
|
||||||
+ " SELECT text, number, SQRT(number) AS s, SCORE()"
|
+ " SELECT text, number, SQRT(number) AS s, SCORE()"
|
||||||
+ " FROM test"
|
+ " FROM test"
|
||||||
+ " ORDER BY number, SCORE()\", "
|
+ " ORDER BY number, SCORE()\", "
|
||||||
|
@ -94,7 +98,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
for (int i = 0; i < 20; i += 2) {
|
for (int i = 0; i < 20; i += 2) {
|
||||||
Map<String, Object> response;
|
Map<String, Object> response;
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
response = runSql(mode, new StringEntity(request, ContentType.APPLICATION_JSON));
|
response = runSql(mode, new StringEntity(sqlRequest, ContentType.APPLICATION_JSON));
|
||||||
} else {
|
} else {
|
||||||
response = runSql(mode, new StringEntity("{\"cursor\":\"" + cursor + "\"}",
|
response = runSql(mode, new StringEntity("{\"cursor\":\"" + cursor + "\"}",
|
||||||
ContentType.APPLICATION_JSON));
|
ContentType.APPLICATION_JSON));
|
||||||
|
@ -138,12 +142,14 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testScoreWithFieldNamedScore() throws IOException {
|
public void testScoreWithFieldNamedScore() throws IOException {
|
||||||
|
Request request = new Request("POST", "/test/test/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
String mode = randomMode();
|
String mode = randomMode();
|
||||||
StringBuilder bulk = new StringBuilder();
|
StringBuilder bulk = new StringBuilder();
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||||
bulk.append("{\"name\":\"test\", \"score\":10}\n");
|
bulk.append("{\"name\":\"test\", \"score\":10}\n");
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
request.setJsonEntity(bulk.toString());
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
client().performRequest(request);
|
||||||
|
|
||||||
Map<String, Object> expected = new HashMap<>();
|
Map<String, Object> expected = new HashMap<>();
|
||||||
expected.put("columns", Arrays.asList(
|
expected.put("columns", Arrays.asList(
|
||||||
|
@ -209,7 +215,9 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
@Override
|
@Override
|
||||||
public void testSelectFromIndexWithoutTypes() throws Exception {
|
public void testSelectFromIndexWithoutTypes() throws Exception {
|
||||||
// Create an index without any types
|
// Create an index without any types
|
||||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON));
|
Request request = new Request("PUT", "/test");
|
||||||
|
request.setJsonEntity("{}");
|
||||||
|
client().performRequest(request);
|
||||||
String mode = randomFrom("jdbc", "plain");
|
String mode = randomFrom("jdbc", "plain");
|
||||||
expectBadRequest(() -> runSql(mode, "SELECT * FROM test"),
|
expectBadRequest(() -> runSql(mode, "SELECT * FROM test"),
|
||||||
containsString("1:15: [test] doesn't have any types so it is incompatible with sql"));
|
containsString("1:15: [test] doesn't have any types so it is incompatible with sql"));
|
||||||
|
@ -229,24 +237,9 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
containsString("1:8: Unknown function [missing]"));
|
containsString("1:8: Unknown function [missing]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void index(String... docs) throws IOException {
|
|
||||||
StringBuilder bulk = new StringBuilder();
|
|
||||||
for (String doc : docs) {
|
|
||||||
bulk.append("{\"index\":{}\n");
|
|
||||||
bulk.append(doc + "\n");
|
|
||||||
}
|
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void testSelectProjectScoreInAggContext() throws Exception {
|
public void testSelectProjectScoreInAggContext() throws Exception {
|
||||||
StringBuilder bulk = new StringBuilder();
|
index("{\"foo\":1}");
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
|
||||||
bulk.append("{\"foo\":1}\n");
|
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
expectBadRequest(() -> runSql(randomMode(),
|
expectBadRequest(() -> runSql(randomMode(),
|
||||||
" SELECT foo, SCORE(), COUNT(*)"
|
" SELECT foo, SCORE(), COUNT(*)"
|
||||||
+ " FROM test"
|
+ " FROM test"
|
||||||
|
@ -256,12 +249,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void testSelectOrderByScoreInAggContext() throws Exception {
|
public void testSelectOrderByScoreInAggContext() throws Exception {
|
||||||
StringBuilder bulk = new StringBuilder();
|
index("{\"foo\":1}");
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
|
||||||
bulk.append("{\"foo\":1}\n");
|
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
expectBadRequest(() -> runSql(randomMode(),
|
expectBadRequest(() -> runSql(randomMode(),
|
||||||
" SELECT foo, COUNT(*)"
|
" SELECT foo, COUNT(*)"
|
||||||
+ " FROM test"
|
+ " FROM test"
|
||||||
|
@ -272,36 +260,21 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void testSelectGroupByScore() throws Exception {
|
public void testSelectGroupByScore() throws Exception {
|
||||||
StringBuilder bulk = new StringBuilder();
|
index("{\"foo\":1}");
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
|
||||||
bulk.append("{\"foo\":1}\n");
|
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
expectBadRequest(() -> runSql(randomMode(), "SELECT COUNT(*) FROM test GROUP BY SCORE()"),
|
expectBadRequest(() -> runSql(randomMode(), "SELECT COUNT(*) FROM test GROUP BY SCORE()"),
|
||||||
containsString("Cannot use [SCORE()] for grouping"));
|
containsString("Cannot use [SCORE()] for grouping"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void testSelectScoreSubField() throws Exception {
|
public void testSelectScoreSubField() throws Exception {
|
||||||
StringBuilder bulk = new StringBuilder();
|
index("{\"foo\":1}");
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
|
||||||
bulk.append("{\"foo\":1}\n");
|
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
expectBadRequest(() -> runSql(randomMode(), "SELECT SCORE().bar FROM test"),
|
expectBadRequest(() -> runSql(randomMode(), "SELECT SCORE().bar FROM test"),
|
||||||
containsString("line 1:15: extraneous input '.' expecting {<EOF>, ','"));
|
containsString("line 1:15: extraneous input '.' expecting {<EOF>, ','"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void testSelectScoreInScalar() throws Exception {
|
public void testSelectScoreInScalar() throws Exception {
|
||||||
StringBuilder bulk = new StringBuilder();
|
index("{\"foo\":1}");
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
|
||||||
bulk.append("{\"foo\":1}\n");
|
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
expectBadRequest(() -> runSql(randomMode(), "SELECT SIN(SCORE()) FROM test"),
|
expectBadRequest(() -> runSql(randomMode(), "SELECT SIN(SCORE()) FROM test"),
|
||||||
containsString("line 1:12: [SCORE()] cannot be an argument to a function"));
|
containsString("line 1:12: [SCORE()] cannot be an argument to a function"));
|
||||||
}
|
}
|
||||||
|
@ -340,37 +313,32 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
}
|
}
|
||||||
|
|
||||||
private Map<String, Object> runSql(String mode, HttpEntity sql, String suffix) throws IOException {
|
private Map<String, Object> runSql(String mode, HttpEntity sql, String suffix) throws IOException {
|
||||||
Map<String, String> params = new TreeMap<>();
|
Request request = new Request("POST", "/_xpack/sql" + suffix);
|
||||||
params.put("error_trace", "true"); // Helps with debugging in case something crazy happens on the server.
|
request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server.
|
||||||
params.put("pretty", "true"); // Improves error reporting readability
|
request.addParameter("pretty", "true"); // Improves error reporting readability
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
// We default to JSON but we force it randomly for extra coverage
|
// We default to JSON but we force it randomly for extra coverage
|
||||||
params.put("format", "json");
|
request.addParameter("format", "json");
|
||||||
}
|
}
|
||||||
if (Strings.hasText(mode)) {
|
if (false == mode.isEmpty()) {
|
||||||
params.put("mode", mode); // JDBC or PLAIN mode
|
request.addParameter("mode", mode); // JDBC or PLAIN mode
|
||||||
}
|
}
|
||||||
Header[] headers = randomFrom(
|
request.setHeaders(randomFrom(
|
||||||
new Header[] {},
|
new Header[] {},
|
||||||
new Header[] {new BasicHeader("Accept", "*/*")},
|
new Header[] {new BasicHeader("Accept", "*/*")},
|
||||||
new Header[] {new BasicHeader("Accpet", "application/json")});
|
new Header[] {new BasicHeader("Accpet", "application/json")}));
|
||||||
Response response = client().performRequest("POST", "/_xpack/sql" + suffix, params, sql);
|
request.setEntity(sql);
|
||||||
|
Response response = client().performRequest(request);
|
||||||
try (InputStream content = response.getEntity().getContent()) {
|
try (InputStream content = response.getEntity().getContent()) {
|
||||||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBasicTranslateQuery() throws IOException {
|
public void testBasicTranslateQuery() throws IOException {
|
||||||
StringBuilder bulk = new StringBuilder();
|
index("{\"test\":\"test\"}", "{\"test\":\"test\"}");
|
||||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
|
||||||
bulk.append("{\"test\":\"test\"}\n");
|
|
||||||
bulk.append("{\"index\":{\"_id\":\"2\"}}\n");
|
|
||||||
bulk.append("{\"test\":\"test\"}\n");
|
|
||||||
client().performRequest("POST", "/test_translate/test/_bulk", singletonMap("refresh", "true"),
|
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
Map<String, Object> response = runSql(randomMode(), "SELECT * FROM test_translate", "/translate/");
|
Map<String, Object> response = runSql(randomMode(), "SELECT * FROM test", "/translate/");
|
||||||
assertEquals(response.get("size"), 1000);
|
assertEquals(1000, response.get("size"));
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
Map<String, Object> source = (Map<String, Object>) response.get("_source");
|
Map<String, Object> source = (Map<String, Object>) response.get("_source");
|
||||||
assertNotNull(source);
|
assertNotNull(source);
|
||||||
|
@ -459,13 +427,12 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testNextPageText() throws IOException {
|
public void testNextPageText() throws IOException {
|
||||||
StringBuilder bulk = new StringBuilder();
|
int size = 20;
|
||||||
for (int i = 0; i < 20; i++) {
|
String[] docs = new String[size];
|
||||||
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n");
|
for (int i = 0; i < size; i++) {
|
||||||
bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n");
|
docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n";
|
||||||
}
|
}
|
||||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
index(docs);
|
||||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
|
||||||
|
|
||||||
String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}";
|
String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}";
|
||||||
|
|
||||||
|
@ -563,23 +530,33 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
return runSqlAsText("", new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept);
|
return runSqlAsText("", new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run SQL as text using the {@code Accept} header to specify the format
|
||||||
|
* rather than the {@code format} parameter.
|
||||||
|
*/
|
||||||
private Tuple<String, String> runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException {
|
private Tuple<String, String> runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException {
|
||||||
Response response = client().performRequest("POST", "/_xpack/sql" + suffix, singletonMap("error_trace", "true"),
|
Request request = new Request("POST", "/_xpack/sql" + suffix);
|
||||||
entity, new BasicHeader("Accept", accept));
|
request.addParameter("error_trace", "true");
|
||||||
|
request.setEntity(entity);
|
||||||
|
request.setHeaders(new BasicHeader("Accept", accept));
|
||||||
|
Response response = client().performRequest(request);
|
||||||
return new Tuple<>(
|
return new Tuple<>(
|
||||||
Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),
|
Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),
|
||||||
response.getHeader("Cursor")
|
response.getHeader("Cursor")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Run SQL as text using the {@code format} parameter to specify the format
|
||||||
|
* rather than an {@code Accept} header.
|
||||||
|
*/
|
||||||
private Tuple<String, String> runSqlAsTextFormat(String sql, String format) throws IOException {
|
private Tuple<String, String> runSqlAsTextFormat(String sql, String format) throws IOException {
|
||||||
StringEntity entity = new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON);
|
Request request = new Request("POST", "/_xpack/sql");
|
||||||
|
request.addParameter("error_trace", "true");
|
||||||
|
request.addParameter("format", format);
|
||||||
|
request.setJsonEntity("{\"query\":\"" + sql + "\"}");
|
||||||
|
|
||||||
Map<String, String> params = new HashMap<>();
|
Response response = client().performRequest(request);
|
||||||
params.put("error_trace", "true");
|
|
||||||
params.put("format", format);
|
|
||||||
|
|
||||||
Response response = client().performRequest("POST", "/_xpack/sql", params, entity);
|
|
||||||
return new Tuple<>(
|
return new Tuple<>(
|
||||||
Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),
|
Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),
|
||||||
response.getHeader("Cursor")
|
response.getHeader("Cursor")
|
||||||
|
@ -595,23 +572,14 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
}
|
}
|
||||||
|
|
||||||
public static int getNumberOfSearchContexts(String index) throws IOException {
|
public static int getNumberOfSearchContexts(String index) throws IOException {
|
||||||
Response response = client().performRequest("GET", "/_stats/search");
|
return getOpenContexts(searchStats(), index);
|
||||||
Map<String, Object> stats;
|
|
||||||
try (InputStream content = response.getEntity().getContent()) {
|
|
||||||
stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
|
||||||
}
|
|
||||||
return getOpenContexts(stats, index);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void assertNoSearchContexts() throws IOException {
|
public static void assertNoSearchContexts() throws IOException {
|
||||||
Response response = client().performRequest("GET", "/_stats/search");
|
Map<String, Object> stats = searchStats();
|
||||||
Map<String, Object> stats;
|
|
||||||
try (InputStream content = response.getEntity().getContent()) {
|
|
||||||
stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
|
||||||
}
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
Map<String, Object> indexStats = (Map<String, Object>) stats.get("indices");
|
Map<String, Object> indicesStats = (Map<String, Object>) stats.get("indices");
|
||||||
for (String index : indexStats.keySet()) {
|
for (String index : indicesStats.keySet()) {
|
||||||
if (index.startsWith(".") == false) { // We are not interested in internal indices
|
if (index.startsWith(".") == false) { // We are not interested in internal indices
|
||||||
assertEquals(index + " should have no search contexts", 0, getOpenContexts(stats, index));
|
assertEquals(index + " should have no search contexts", 0, getOpenContexts(stats, index));
|
||||||
}
|
}
|
||||||
|
@ -619,12 +587,34 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
public static int getOpenContexts(Map<String, Object> indexStats, String index) {
|
private static int getOpenContexts(Map<String, Object> stats, String index) {
|
||||||
return (int) ((Map<String, Object>) ((Map<String, Object>) ((Map<String, Object>) ((Map<String, Object>)
|
stats = (Map<String, Object>) stats.get("indices");
|
||||||
indexStats.get("indices")).get(index)).get("total")).get("search")).get("open_contexts");
|
stats = (Map<String, Object>) stats.get(index);
|
||||||
|
stats = (Map<String, Object>) stats.get("total");
|
||||||
|
stats = (Map<String, Object>) stats.get("search");
|
||||||
|
return (Integer) stats.get("open_contexts");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Map<String, Object> searchStats() throws IOException {
|
||||||
|
Response response = client().performRequest(new Request("GET", "/_stats/search"));
|
||||||
|
try (InputStream content = response.getEntity().getContent()) {
|
||||||
|
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String randomMode() {
|
public static String randomMode() {
|
||||||
return randomFrom("", "jdbc", "plain");
|
return randomFrom("", "jdbc", "plain");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void index(String... docs) throws IOException {
|
||||||
|
Request request = new Request("POST", "/test/test/_bulk");
|
||||||
|
request.addParameter("refresh", "true");
|
||||||
|
StringBuilder bulk = new StringBuilder();
|
||||||
|
for (String doc : docs) {
|
||||||
|
bulk.append("{\"index\":{}\n");
|
||||||
|
bulk.append(doc + "\n");
|
||||||
|
}
|
||||||
|
request.setJsonEntity(bulk.toString());
|
||||||
|
client().performRequest(request);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue