Merge branch 'master' into index-lifecycle
This commit is contained in:
commit
b08d7c872b
|
@ -147,7 +147,7 @@ class ClusterConfiguration {
|
|||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, Project> plugins = new LinkedHashMap<>()
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
|
||||
List<Project> modules = new ArrayList<>()
|
||||
|
||||
|
@ -185,6 +185,11 @@ class ClusterConfiguration {
|
|||
plugins.put(pluginProject.name, pluginProject)
|
||||
}
|
||||
|
||||
@Input
|
||||
void mavenPlugin(String name, String mavenCoords) {
|
||||
plugins.put(name, mavenCoords)
|
||||
}
|
||||
|
||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||
@Input
|
||||
void module(Project moduleProject) {
|
||||
|
|
|
@ -99,8 +99,8 @@ class ClusterFormationTasks {
|
|||
// from mirrors using gradles built-in mechanism etc.
|
||||
|
||||
configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion)
|
||||
for (Map.Entry<String, Project> entry : config.plugins.entrySet()) {
|
||||
configureBwcPluginDependency("${prefix}_elasticsearchBwcPlugins", project, entry.getValue(), bwcPlugins, config.bwcVersion)
|
||||
for (Map.Entry<String, Object> entry : config.plugins.entrySet()) {
|
||||
configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion)
|
||||
}
|
||||
bwcDistro.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||
bwcPlugins.resolutionStrategy.cacheChangingModulesFor(0, TimeUnit.SECONDS)
|
||||
|
@ -150,10 +150,15 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
/** Adds a dependency on a different version of the given plugin, which will be retrieved using gradle's dependency resolution */
|
||||
static void configureBwcPluginDependency(String name, Project project, Project pluginProject, Configuration configuration, Version elasticsearchVersion) {
|
||||
verifyProjectHasBuildPlugin(name, elasticsearchVersion, project, pluginProject)
|
||||
static void configureBwcPluginDependency(Project project, Object plugin, Configuration configuration, Version elasticsearchVersion) {
|
||||
if (plugin instanceof Project) {
|
||||
Project pluginProject = (Project)plugin
|
||||
verifyProjectHasBuildPlugin(configuration.name, elasticsearchVersion, project, pluginProject)
|
||||
final String pluginName = findPluginName(pluginProject)
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.plugin:${pluginName}:${elasticsearchVersion}@zip")
|
||||
} else {
|
||||
project.dependencies.add(configuration.name, "${plugin}@zip")
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -210,9 +215,9 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, plugin.getValue(), prefix)
|
||||
for (String pluginName : node.config.plugins.keySet()) {
|
||||
String actionName = pluginTaskName('install', pluginName, 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(prefix, node, actionName), project, setup, node, pluginName, prefix)
|
||||
}
|
||||
|
||||
// sets up any extra config files that need to be copied over to the ES instance;
|
||||
|
@ -444,15 +449,18 @@ class ClusterFormationTasks {
|
|||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
|
||||
List<FileCollection> pluginFiles = []
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
|
||||
Project pluginProject = plugin.getValue()
|
||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||
String configurationName = pluginConfigurationName(prefix, pluginProject)
|
||||
String configurationName = pluginConfigurationName(prefix, plugin.key)
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
|
||||
if (plugin.getValue() instanceof Project) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||
|
||||
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
|
||||
|
@ -469,6 +477,12 @@ class ClusterFormationTasks {
|
|||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
} else {
|
||||
project.dependencies.add(configurationName, "${plugin.getValue()}@zip")
|
||||
}
|
||||
|
||||
|
||||
|
||||
pluginFiles.add(configuration)
|
||||
}
|
||||
|
||||
|
@ -477,32 +491,37 @@ class ClusterFormationTasks {
|
|||
return copyPlugins
|
||||
}
|
||||
|
||||
private static String pluginConfigurationName(final String prefix, final Project project) {
|
||||
return "_plugin_${prefix}_${project.path}".replace(':', '_')
|
||||
private static String pluginConfigurationName(final String prefix, final String name) {
|
||||
return "_plugin_${prefix}_${name}".replace(':', '_')
|
||||
}
|
||||
|
||||
private static String pluginBwcConfigurationName(final String prefix, final Project project) {
|
||||
return "_plugin_bwc_${prefix}_${project.path}".replace(':', '_')
|
||||
private static String pluginBwcConfigurationName(final String prefix, final String name) {
|
||||
return "_plugin_bwc_${prefix}_${name}".replace(':', '_')
|
||||
}
|
||||
|
||||
/** Configures task to copy a plugin based on a zip file resolved using dependencies for an older version */
|
||||
static Task configureCopyBwcPluginsTask(String name, Project project, Task setup, NodeInfo node, String prefix) {
|
||||
Configuration bwcPlugins = project.configurations.getByName("${prefix}_elasticsearchBwcPlugins")
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||
String configurationName = pluginBwcConfigurationName(prefix, pluginProject)
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
String configurationName = pluginBwcConfigurationName(prefix, plugin.key)
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
|
||||
if (plugin.getValue() instanceof Project) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
verifyProjectHasBuildPlugin(name, node.nodeVersion, project, pluginProject)
|
||||
|
||||
final String depName = findPluginName(pluginProject)
|
||||
|
||||
Dependency dep = bwcPlugins.dependencies.find {
|
||||
it.name == depName
|
||||
}
|
||||
configuration.dependencies.add(dep)
|
||||
} else {
|
||||
project.dependencies.add(configurationName, "${plugin.getValue()}@zip")
|
||||
}
|
||||
}
|
||||
|
||||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup) {
|
||||
|
@ -527,12 +546,12 @@ class ClusterFormationTasks {
|
|||
return installModule
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin, String prefix) {
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) {
|
||||
final FileCollection pluginZip;
|
||||
if (node.nodeVersion != VersionProperties.elasticsearch) {
|
||||
pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, plugin))
|
||||
pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName))
|
||||
} else {
|
||||
pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, plugin))
|
||||
pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName))
|
||||
}
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
final Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
|
|
|
@ -163,7 +163,8 @@ analysis module. ({pull}30397[#30397])
|
|||
[float]
|
||||
=== Enhancements
|
||||
|
||||
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255])
|
||||
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow
|
||||
copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404])
|
||||
|
||||
Added new "Request" object flavored request methods in the RestClient. Prefer
|
||||
these instead of the multi-argument versions. ({pull}29623[#29623])
|
||||
|
|
|
@ -62,11 +62,20 @@ the following request:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_source_index/_shrink/my_target_index
|
||||
POST my_source_index/_shrink/my_target_index?copy_settings=true
|
||||
{
|
||||
"settings": {
|
||||
"index.routing.allocation.require._name": null, <1>
|
||||
"index.blocks.write": null <2>
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
||||
<1> Clear the allocation requirement copied from the source index.
|
||||
<2> Clear the index write block copied from the source index.
|
||||
|
||||
The above request returns immediately once the target index has been added to
|
||||
the cluster state -- it doesn't wait for the shrink operation to start.
|
||||
|
||||
|
@ -97,7 +106,7 @@ and accepts `settings` and `aliases` parameters for the target index:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_source_index/_shrink/my_target_index
|
||||
POST my_source_index/_shrink/my_target_index?copy_settings=true
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_replicas": 1,
|
||||
|
@ -125,9 +134,11 @@ NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
|
|||
and `index.sort` settings, index settings on the source index are not copied
|
||||
during a shrink operation. With the exception of non-copyable settings, settings
|
||||
from the source index can be copied to the target index by adding the URL
|
||||
parameter `copy_settings=true` to the request.
|
||||
parameter `copy_settings=true` to the request. Note that `copy_settings` can not
|
||||
be set to `false`. The parameter `copy_settings` will be removed in 8.0.0
|
||||
|
||||
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
|
||||
deprecated[6.4.0, not copying settings is deprecated, copying settings will be
|
||||
the default behavior in 7.x]
|
||||
|
||||
[float]
|
||||
=== Monitoring the shrink process
|
||||
|
|
|
@ -123,7 +123,7 @@ the following request:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_source_index/_split/my_target_index
|
||||
POST my_source_index/_split/my_target_index?copy_settings=true
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards": 2
|
||||
|
@ -158,7 +158,7 @@ and accepts `settings` and `aliases` parameters for the target index:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST my_source_index/_split/my_target_index
|
||||
POST my_source_index/_split/my_target_index?copy_settings=true
|
||||
{
|
||||
"settings": {
|
||||
"index.number_of_shards": 5 <1>
|
||||
|
@ -181,9 +181,11 @@ NOTE: By default, with the exception of `index.analysis`, `index.similarity`,
|
|||
and `index.sort` settings, index settings on the source index are not copied
|
||||
during a split operation. With the exception of non-copyable settings, settings
|
||||
from the source index can be copied to the target index by adding the URL
|
||||
parameter `copy_settings=true` to the request.
|
||||
parameter `copy_settings=true` to the request. Note that `copy_settings` can not
|
||||
be set to `false`. The parameter `copy_settings` will be removed in 8.0.0
|
||||
|
||||
deprecated[6.4.0, `copy_settings` will default to `true` in 8.x and will be removed in 9.0.0]
|
||||
deprecated[6.4.0, not copying settings is deprecated, copying settings will be
|
||||
the default behavior in 7.x]
|
||||
|
||||
[float]
|
||||
=== Monitoring the split process
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
org.gradle.daemon=false
|
||||
org.gradle.jvmargs=-Xmx1792m
|
||||
org.gradle.jvmargs=-Xmx2g
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
||||
|
@ -25,6 +25,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
/**
|
||||
* Factory for {@link ClassicTokenizer}
|
||||
|
@ -33,7 +34,7 @@ public class ClassicTokenizerFactory extends AbstractTokenizerFactory {
|
|||
|
||||
private final int maxTokenLength;
|
||||
|
||||
public ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
ClassicTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||
}
|
|
@ -34,9 +34,11 @@ import org.apache.lucene.analysis.ckb.SoraniNormalizationFilter;
|
|||
import org.apache.lucene.analysis.commongrams.CommonGramsFilter;
|
||||
import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
||||
import org.apache.lucene.analysis.core.KeywordTokenizer;
|
||||
import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
||||
import org.apache.lucene.analysis.core.StopAnalyzer;
|
||||
import org.apache.lucene.analysis.core.UpperCaseFilter;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.cz.CzechStemFilter;
|
||||
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
|
||||
import org.apache.lucene.analysis.de.GermanStemFilter;
|
||||
|
@ -58,17 +60,25 @@ import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter;
|
|||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter;
|
||||
import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||
import org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilter;
|
||||
import org.apache.lucene.analysis.payloads.TypeAsPayloadTokenFilter;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.analysis.shingle.ShingleFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.standard.ClassicFilter;
|
||||
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
||||
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||
import org.apache.lucene.analysis.tr.ApostropheFilter;
|
||||
import org.apache.lucene.analysis.util.ElisionFilter;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||
|
@ -169,6 +179,19 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
|||
Map<String, AnalysisProvider<TokenizerFactory>> tokenizers = new TreeMap<>();
|
||||
tokenizers.put("simple_pattern", SimplePatternTokenizerFactory::new);
|
||||
tokenizers.put("simple_pattern_split", SimplePatternSplitTokenizerFactory::new);
|
||||
tokenizers.put("thai", ThaiTokenizerFactory::new);
|
||||
tokenizers.put("nGram", NGramTokenizerFactory::new);
|
||||
tokenizers.put("ngram", NGramTokenizerFactory::new);
|
||||
tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new);
|
||||
tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new);
|
||||
tokenizers.put("classic", ClassicTokenizerFactory::new);
|
||||
tokenizers.put("letter", LetterTokenizerFactory::new);
|
||||
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
|
||||
tokenizers.put("path_hierarchy", PathHierarchyTokenizerFactory::new);
|
||||
tokenizers.put("PathHierarchy", PathHierarchyTokenizerFactory::new);
|
||||
tokenizers.put("pattern", PatternTokenizerFactory::new);
|
||||
tokenizers.put("uax_url_email", UAX29URLEmailTokenizerFactory::new);
|
||||
tokenizers.put("whitespace", WhitespaceTokenizerFactory::new);
|
||||
return tokenizers;
|
||||
}
|
||||
|
||||
|
@ -283,6 +306,16 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
|||
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
|
||||
List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
|
||||
() -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", LowerCaseTokenizer::new, () -> new TokenFilterFactory() {
|
||||
@Override
|
||||
public String name() {
|
||||
|
@ -294,6 +327,13 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
|||
return new LowerCaseFilter(tokenStream);
|
||||
}
|
||||
}));
|
||||
|
||||
// Temporary shim for aliases. TODO deprecate after they are moved
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("nGram", NGramTokenizer::new, null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("edgeNGram",
|
||||
() -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
|
||||
tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null));
|
||||
|
||||
return tokenizers;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||
|
@ -25,19 +25,17 @@ import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars;
|
||||
import static org.elasticsearch.analysis.common.NGramTokenizerFactory.parseTokenChars;
|
||||
|
||||
public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
private final int minGram;
|
||||
|
||||
private final int maxGram;
|
||||
|
||||
private final CharMatcher matcher;
|
||||
|
||||
|
||||
public EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
EdgeNGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
||||
this.maxGram = settings.getAsInt("max_gram", NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE);
|
|
@ -17,17 +17,18 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
public class LetterTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
public LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
LetterTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
}
|
||||
|
|
@ -17,17 +17,19 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LowerCaseTokenizer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
|
||||
public class LowerCaseTokenizerFactory extends AbstractTokenizerFactory implements MultiTermAwareComponent {
|
||||
|
||||
public LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
LowerCaseTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
}
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Modifier;
|
||||
|
@ -83,7 +84,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory {
|
|||
return builder.build();
|
||||
}
|
||||
|
||||
public NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
NGramTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
int maxAllowedNgramDiff = indexSettings.getMaxNgramDiff();
|
||||
this.minGram = settings.getAsInt("min_gram", NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE);
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||
|
@ -25,6 +25,7 @@ import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
|
@ -35,7 +36,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final int skip;
|
||||
private final boolean reverse;
|
||||
|
||||
public PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
bufferSize = settings.getAsInt("buffer_size", 1024);
|
||||
String delimiter = settings.get("delimiter");
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.regex.Regex;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -33,7 +34,7 @@ public class PatternTokenizerFactory extends AbstractTokenizerFactory {
|
|||
private final Pattern pattern;
|
||||
private final int group;
|
||||
|
||||
public PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
PatternTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
|
||||
String sPattern = settings.get("pattern", "\\W+" /*PatternAnalyzer.NON_WORD_PATTERN*/);
|
|
@ -17,20 +17,21 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
/**
|
||||
* Factory for {@link ThaiTokenizer}
|
||||
*/
|
||||
public class ThaiTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
public ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
ThaiTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
}
|
||||
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
|
@ -25,12 +25,13 @@ import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
public class UAX29URLEmailTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
private final int maxTokenLength;
|
||||
|
||||
public UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
UAX29URLEmailTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
maxTokenLength = settings.getAsInt("max_token_length", StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
|
@ -26,13 +26,14 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
public class WhitespaceTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
static final String MAX_TOKEN_LENGTH = "max_token_length";
|
||||
private Integer maxTokenLength;
|
||||
|
||||
public WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
WhitespaceTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
maxTokenLength = settings.getAsInt(MAX_TOKEN_LENGTH, StandardAnalyzer.DEFAULT_MAX_TOKEN_LENGTH);
|
||||
}
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.en.PorterStemFilterFactory;
|
|||
import org.apache.lucene.analysis.miscellaneous.LimitTokenCountFilterFactory;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.SoraniNormalizationFilterFactory;
|
||||
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase;
|
||||
|
@ -45,6 +46,16 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
|
|||
Map<String, Class<?>> tokenizers = new TreeMap<>(super.getTokenizers());
|
||||
tokenizers.put("simplepattern", SimplePatternTokenizerFactory.class);
|
||||
tokenizers.put("simplepatternsplit", SimplePatternSplitTokenizerFactory.class);
|
||||
tokenizers.put("thai", ThaiTokenizerFactory.class);
|
||||
tokenizers.put("ngram", NGramTokenizerFactory.class);
|
||||
tokenizers.put("edgengram", EdgeNGramTokenizerFactory.class);
|
||||
tokenizers.put("classic", ClassicTokenizerFactory.class);
|
||||
tokenizers.put("letter", LetterTokenizerFactory.class);
|
||||
tokenizers.put("lowercase", LowerCaseTokenizerFactory.class);
|
||||
tokenizers.put("pathhierarchy", PathHierarchyTokenizerFactory.class);
|
||||
tokenizers.put("pattern", PatternTokenizerFactory.class);
|
||||
tokenizers.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class);
|
||||
tokenizers.put("whitespace", WhitespaceTokenizerFactory.class);
|
||||
return tokenizers;
|
||||
}
|
||||
|
||||
|
@ -211,10 +222,25 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
|
|||
|
||||
@Override
|
||||
protected Map<String, Class<?>> getPreConfiguredTokenizers() {
|
||||
Map<String, Class<?>> filters = new TreeMap<>(super.getPreConfiguredTokenizers());
|
||||
filters.put("keyword", null);
|
||||
filters.put("lowercase", null);
|
||||
return filters;
|
||||
Map<String, Class<?>> tokenizers = new TreeMap<>(super.getPreConfiguredTokenizers());
|
||||
tokenizers.put("keyword", null);
|
||||
tokenizers.put("lowercase", null);
|
||||
tokenizers.put("classic", null);
|
||||
tokenizers.put("uax_url_email", org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class);
|
||||
tokenizers.put("path_hierarchy", null);
|
||||
tokenizers.put("letter", null);
|
||||
tokenizers.put("whitespace", null);
|
||||
tokenizers.put("ngram", null);
|
||||
tokenizers.put("edge_ngram", null);
|
||||
tokenizers.put("pattern", null);
|
||||
tokenizers.put("thai", null);
|
||||
|
||||
// TODO drop aliases once they are moved to module
|
||||
tokenizers.put("nGram", tokenizers.get("ngram"));
|
||||
tokenizers.put("edgeNGram", tokenizers.get("edge_ngram"));
|
||||
tokenizers.put("PathHierarchy", tokenizers.get("path_hierarchy"));
|
||||
|
||||
return tokenizers;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -45,7 +45,7 @@ public class CommonGramsTokenFilterFactoryTests extends ESTokenStreamTestCase {
|
|||
.build();
|
||||
|
||||
try {
|
||||
AnalysisTestsHelper.createTestAnalysisFromSettings(settings);
|
||||
AnalysisTestsHelper.createTestAnalysisFromSettings(settings, new CommonAnalysisPlugin());
|
||||
Assert.fail("[common_words] or [common_words_path] is set");
|
||||
} catch (IllegalArgumentException e) {
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -29,12 +29,22 @@ import org.apache.lucene.search.PhraseQuery;
|
|||
import org.apache.lucene.search.MultiPhraseQuery;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.query.MatchPhraseQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.QueryStringQueryBuilder;
|
||||
import org.elasticsearch.index.query.SimpleQueryStringBuilder;
|
||||
import org.elasticsearch.index.query.SimpleQueryStringFlag;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -49,6 +59,11 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
|
|||
private static Query expectedQueryWithUnigram;
|
||||
private static Query expectedPhraseQueryWithUnigram;
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return Collections.singleton(CommonAnalysisPlugin.class);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
Settings settings = Settings.builder()
|
||||
|
@ -150,42 +165,42 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
|
|||
public void testMatchPhraseQuery() throws IOException {
|
||||
MatchPhraseQueryBuilder builder =
|
||||
new MatchPhraseQueryBuilder("text_shingle_unigram", "foo bar baz");
|
||||
Query query = builder.doToQuery(shardContext);
|
||||
Query query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder =
|
||||
new MatchPhraseQueryBuilder("text_shingle", "foo bar baz biz");
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQuery, equalTo(query));
|
||||
}
|
||||
|
||||
public void testMatchQuery() throws IOException {
|
||||
MatchQueryBuilder builder =
|
||||
new MatchQueryBuilder("text_shingle_unigram", "foo bar baz");
|
||||
Query query = builder.doToQuery(shardContext);
|
||||
Query query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder = new MatchQueryBuilder("text_shingle", "foo bar baz biz");
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQuery, equalTo(query));
|
||||
}
|
||||
|
||||
public void testMultiMatchQuery() throws IOException {
|
||||
MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar baz",
|
||||
"text_shingle_unigram");
|
||||
Query query = builder.doToQuery(shardContext);
|
||||
Query query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder.type(MatchQuery.Type.PHRASE);
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder = new MultiMatchQueryBuilder("foo bar baz biz", "text_shingle");
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQuery, equalTo(query));
|
||||
|
||||
builder.type(MatchQuery.Type.PHRASE);
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQuery, equalTo(query));
|
||||
}
|
||||
|
||||
|
@ -193,47 +208,47 @@ public class DisableGraphQueryTests extends ESSingleNodeTestCase {
|
|||
SimpleQueryStringBuilder builder = new SimpleQueryStringBuilder("foo bar baz");
|
||||
builder.field("text_shingle_unigram");
|
||||
builder.flags(SimpleQueryStringFlag.NONE);
|
||||
Query query = builder.doToQuery(shardContext);
|
||||
Query query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder = new SimpleQueryStringBuilder("\"foo bar baz\"");
|
||||
builder.field("text_shingle_unigram");
|
||||
builder.flags(SimpleQueryStringFlag.PHRASE);
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder = new SimpleQueryStringBuilder("foo bar baz biz");
|
||||
builder.field("text_shingle");
|
||||
builder.flags(SimpleQueryStringFlag.NONE);
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQuery, equalTo(query));
|
||||
|
||||
builder = new SimpleQueryStringBuilder("\"foo bar baz biz\"");
|
||||
builder.field("text_shingle");
|
||||
builder.flags(SimpleQueryStringFlag.PHRASE);
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQuery, equalTo(query));
|
||||
}
|
||||
|
||||
public void testQueryString() throws IOException {
|
||||
QueryStringQueryBuilder builder = new QueryStringQueryBuilder("foo bar baz");
|
||||
builder.field("text_shingle_unigram");
|
||||
Query query = builder.doToQuery(shardContext);
|
||||
Query query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder = new QueryStringQueryBuilder("\"foo bar baz\"");
|
||||
builder.field("text_shingle_unigram");
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQueryWithUnigram, equalTo(query));
|
||||
|
||||
builder = new QueryStringQueryBuilder("foo bar baz biz");
|
||||
builder.field("text_shingle");
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedQuery, equalTo(query));
|
||||
|
||||
builder = new QueryStringQueryBuilder("\"foo bar baz biz\"");
|
||||
builder.field("text_shingle");
|
||||
query = builder.doToQuery(shardContext);
|
||||
query = builder.toQuery(shardContext);
|
||||
assertThat(expectedPhraseQuery, equalTo(query));
|
||||
}
|
||||
}
|
|
@ -30,8 +30,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.settings.Settings.Builder;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
|
@ -17,15 +17,13 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis.synonyms;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
@ -44,7 +42,6 @@ import static org.hamcrest.Matchers.instanceOf;
|
|||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
public class SynonymsAnalysisTests extends ESTestCase {
|
||||
protected final Logger logger = Loggers.getLogger(getClass());
|
||||
private IndexAnalyzers indexAnalyzers;
|
||||
|
||||
public void testSynonymsAnalysis() throws IOException {
|
||||
|
@ -56,14 +53,14 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
|||
Files.copy(synonyms, config.resolve("synonyms.txt"));
|
||||
Files.copy(synonymsWordnet, config.resolve("synonyms_wordnet.txt"));
|
||||
|
||||
String json = "/org/elasticsearch/index/analysis/synonyms/synonyms.json";
|
||||
String json = "/org/elasticsearch/analysis/common/synonyms.json";
|
||||
Settings settings = Settings.builder().
|
||||
loadFromStream(json, getClass().getResourceAsStream(json), false)
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||
|
||||
match("synonymAnalyzer", "kimchy is the dude abides", "shay is the elasticsearch man!");
|
||||
match("synonymAnalyzer_file", "kimchy is the dude abides", "shay is the elasticsearch man!");
|
||||
|
@ -91,7 +88,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
|||
.build();
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||
try {
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||
fail("fail! due to synonym word deleted by analyzer");
|
||||
} catch (Exception e) {
|
||||
assertThat(e, instanceOf(IllegalArgumentException.class));
|
||||
|
@ -112,7 +109,7 @@ public class SynonymsAnalysisTests extends ESTestCase {
|
|||
.build();
|
||||
IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
|
||||
try {
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings).indexAnalyzers;
|
||||
indexAnalyzers = createTestAnalysis(idxSettings, settings, new CommonAnalysisPlugin()).indexAnalyzers;
|
||||
fail("fail! due to synonym word deleted by analyzer");
|
||||
} catch (Exception e) {
|
||||
assertThat(e, instanceOf(IllegalArgumentException.class));
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.analysis;
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
|
|
@ -70,3 +70,374 @@
|
|||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: foo }
|
||||
- match: { detail.tokenizer.tokens.1.token: bar }
|
||||
|
||||
---
|
||||
"thai_tokenizer":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "ภาษาไทย"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: thai
|
||||
- length: { detail.tokenizer.tokens: 2 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: ภาษา }
|
||||
- match: { detail.tokenizer.tokens.1.token: ไทย }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "ภาษาไทย"
|
||||
explain: true
|
||||
tokenizer: thai
|
||||
- length: { detail.tokenizer.tokens: 2 }
|
||||
- match: { detail.tokenizer.name: thai }
|
||||
- match: { detail.tokenizer.tokens.0.token: ภาษา }
|
||||
- match: { detail.tokenizer.tokens.1.token: ไทย }
|
||||
|
||||
---
|
||||
"ngram":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foobar"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: ngram
|
||||
min_gram: 3
|
||||
max_gram: 3
|
||||
- length: { detail.tokenizer.tokens: 4 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: foo }
|
||||
- match: { detail.tokenizer.tokens.1.token: oob }
|
||||
- match: { detail.tokenizer.tokens.2.token: oba }
|
||||
- match: { detail.tokenizer.tokens.3.token: bar }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foobar"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: nGram
|
||||
min_gram: 3
|
||||
max_gram: 3
|
||||
- length: { detail.tokenizer.tokens: 4 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: foo }
|
||||
- match: { detail.tokenizer.tokens.1.token: oob }
|
||||
- match: { detail.tokenizer.tokens.2.token: oba }
|
||||
- match: { detail.tokenizer.tokens.3.token: bar }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foo"
|
||||
explain: true
|
||||
tokenizer: ngram
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: ngram }
|
||||
- match: { detail.tokenizer.tokens.0.token: f }
|
||||
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||
- match: { detail.tokenizer.tokens.2.token: o }
|
||||
- match: { detail.tokenizer.tokens.3.token: oo }
|
||||
- match: { detail.tokenizer.tokens.4.token: o }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foo"
|
||||
explain: true
|
||||
tokenizer: nGram
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: nGram }
|
||||
- match: { detail.tokenizer.tokens.0.token: f }
|
||||
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||
- match: { detail.tokenizer.tokens.2.token: o }
|
||||
- match: { detail.tokenizer.tokens.3.token: oo }
|
||||
- match: { detail.tokenizer.tokens.4.token: o }
|
||||
|
||||
---
|
||||
"edge_ngram":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foo"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: edge_ngram
|
||||
min_gram: 1
|
||||
max_gram: 3
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: f }
|
||||
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||
- match: { detail.tokenizer.tokens.2.token: foo }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foo"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: edgeNGram
|
||||
min_gram: 1
|
||||
max_gram: 3
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: f }
|
||||
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||
- match: { detail.tokenizer.tokens.2.token: foo }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foo"
|
||||
explain: true
|
||||
tokenizer: edge_ngram
|
||||
- length: { detail.tokenizer.tokens: 2 }
|
||||
- match: { detail.tokenizer.name: edge_ngram }
|
||||
- match: { detail.tokenizer.tokens.0.token: f }
|
||||
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "foo"
|
||||
explain: true
|
||||
tokenizer: edgeNGram
|
||||
- length: { detail.tokenizer.tokens: 2 }
|
||||
- match: { detail.tokenizer.name: edgeNGram }
|
||||
- match: { detail.tokenizer.tokens.0.token: f }
|
||||
- match: { detail.tokenizer.tokens.1.token: fo }
|
||||
|
||||
---
|
||||
"classic":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Brown-Foxes don't jump."
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: classic
|
||||
- length: { detail.tokenizer.tokens: 4 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||
- match: { detail.tokenizer.tokens.2.token: don't }
|
||||
- match: { detail.tokenizer.tokens.3.token: jump }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Brown-Foxes don't jump."
|
||||
explain: true
|
||||
tokenizer: classic
|
||||
- length: { detail.tokenizer.tokens: 4 }
|
||||
- match: { detail.tokenizer.name: classic }
|
||||
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||
- match: { detail.tokenizer.tokens.2.token: don't }
|
||||
- match: { detail.tokenizer.tokens.3.token: jump }
|
||||
|
||||
---
|
||||
"letter":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Brown-Foxes don't jump."
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: letter
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||
- match: { detail.tokenizer.tokens.2.token: don }
|
||||
- match: { detail.tokenizer.tokens.3.token: t }
|
||||
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Brown-Foxes don't jump."
|
||||
explain: true
|
||||
tokenizer: letter
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: letter }
|
||||
- match: { detail.tokenizer.tokens.0.token: Brown }
|
||||
- match: { detail.tokenizer.tokens.1.token: Foxes }
|
||||
- match: { detail.tokenizer.tokens.2.token: don }
|
||||
- match: { detail.tokenizer.tokens.3.token: t }
|
||||
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||
|
||||
---
|
||||
"lowercase":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Brown-Foxes don't jump."
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: lowercase
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: brown }
|
||||
- match: { detail.tokenizer.tokens.1.token: foxes }
|
||||
- match: { detail.tokenizer.tokens.2.token: don }
|
||||
- match: { detail.tokenizer.tokens.3.token: t }
|
||||
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Brown-Foxes don't jump."
|
||||
explain: true
|
||||
tokenizer: lowercase
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: lowercase }
|
||||
- match: { detail.tokenizer.tokens.0.token: brown }
|
||||
- match: { detail.tokenizer.tokens.1.token: foxes }
|
||||
- match: { detail.tokenizer.tokens.2.token: don }
|
||||
- match: { detail.tokenizer.tokens.3.token: t }
|
||||
- match: { detail.tokenizer.tokens.4.token: jump }
|
||||
|
||||
---
|
||||
"path_hierarchy":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "a/b/c"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: path_hierarchy
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: a }
|
||||
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "a/b/c"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: PathHierarchy
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: a }
|
||||
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "a/b/c"
|
||||
explain: true
|
||||
tokenizer: path_hierarchy
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: path_hierarchy }
|
||||
- match: { detail.tokenizer.tokens.0.token: a }
|
||||
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "a/b/c"
|
||||
explain: true
|
||||
tokenizer: PathHierarchy
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: PathHierarchy }
|
||||
- match: { detail.tokenizer.tokens.0.token: a }
|
||||
- match: { detail.tokenizer.tokens.1.token: a/b }
|
||||
- match: { detail.tokenizer.tokens.2.token: a/b/c }
|
||||
|
||||
---
|
||||
"pattern":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "split by whitespace by default"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: pattern
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: split }
|
||||
- match: { detail.tokenizer.tokens.1.token: by }
|
||||
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||
- match: { detail.tokenizer.tokens.3.token: by }
|
||||
- match: { detail.tokenizer.tokens.4.token: default }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "split by whitespace by default"
|
||||
explain: true
|
||||
tokenizer: pattern
|
||||
- length: { detail.tokenizer.tokens: 5 }
|
||||
- match: { detail.tokenizer.name: pattern }
|
||||
- match: { detail.tokenizer.tokens.0.token: split }
|
||||
- match: { detail.tokenizer.tokens.1.token: by }
|
||||
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||
- match: { detail.tokenizer.tokens.3.token: by }
|
||||
- match: { detail.tokenizer.tokens.4.token: default }
|
||||
|
||||
---
|
||||
"uax_url_email":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Email me at john.smith@global-international.com"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: uax_url_email
|
||||
- length: { detail.tokenizer.tokens: 4 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: Email }
|
||||
- match: { detail.tokenizer.tokens.1.token: me }
|
||||
- match: { detail.tokenizer.tokens.2.token: at }
|
||||
- match: { detail.tokenizer.tokens.3.token: john.smith@global-international.com }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "Email me at john.smith@global-international.com"
|
||||
explain: true
|
||||
tokenizer: uax_url_email
|
||||
- length: { detail.tokenizer.tokens: 4 }
|
||||
- match: { detail.tokenizer.name: uax_url_email }
|
||||
- match: { detail.tokenizer.tokens.0.token: Email }
|
||||
- match: { detail.tokenizer.tokens.1.token: me }
|
||||
- match: { detail.tokenizer.tokens.2.token: at }
|
||||
- match: { detail.tokenizer.tokens.3.token: john.smith@global-international.com }
|
||||
|
||||
---
|
||||
"whitespace":
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "split by whitespace"
|
||||
explain: true
|
||||
tokenizer:
|
||||
type: whitespace
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: _anonymous_tokenizer }
|
||||
- match: { detail.tokenizer.tokens.0.token: split }
|
||||
- match: { detail.tokenizer.tokens.1.token: by }
|
||||
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
body:
|
||||
text: "split by whitespace"
|
||||
explain: true
|
||||
tokenizer: whitespace
|
||||
- length: { detail.tokenizer.tokens: 3 }
|
||||
- match: { detail.tokenizer.name: whitespace }
|
||||
- match: { detail.tokenizer.tokens.0.token: split }
|
||||
- match: { detail.tokenizer.tokens.1.token: by }
|
||||
- match: { detail.tokenizer.tokens.2.token: whitespace }
|
||||
|
|
|
@ -67,3 +67,33 @@
|
|||
text: "<html>foo</html>"
|
||||
- length: { tokens: 1 }
|
||||
- match: { tokens.0.token: "\nfoo\n" }
|
||||
|
||||
---
|
||||
"Synonym filter with tokenizer":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_synonym
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
analysis:
|
||||
tokenizer:
|
||||
trigram:
|
||||
type: nGram
|
||||
min_gram: 3
|
||||
max_gram: 3
|
||||
filter:
|
||||
synonym:
|
||||
type: synonym
|
||||
synonyms: ["kimchy => shay"]
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
index: test_synonym
|
||||
body:
|
||||
tokenizer: trigram
|
||||
filter: [synonym]
|
||||
text: kimchy
|
||||
- length: { tokens: 2 }
|
||||
- match: { tokens.0.token: sha }
|
||||
- match: { tokens.1.token: hay }
|
||||
|
|
|
@ -39,3 +39,97 @@
|
|||
text:
|
||||
query: foa
|
||||
- match: {hits.total: 1}
|
||||
|
||||
---
|
||||
"testNGramCopyField":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
max_ngram_diff: 9
|
||||
analysis:
|
||||
analyzer:
|
||||
my_ngram_analyzer:
|
||||
tokenizer: my_ngram_tokenizer
|
||||
tokenizer:
|
||||
my_ngram_tokenizer:
|
||||
type: ngram
|
||||
min: 1,
|
||||
max: 10
|
||||
token_chars: []
|
||||
mappings:
|
||||
doc:
|
||||
properties:
|
||||
origin:
|
||||
type: text
|
||||
copy_to: meta
|
||||
meta:
|
||||
type: text
|
||||
analyzer: my_ngram_analyzer
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test
|
||||
type: doc
|
||||
id: 1
|
||||
body: { "origin": "C.A1234.5678" }
|
||||
refresh: true
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
meta:
|
||||
query: 1234
|
||||
- match: {hits.total: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
meta:
|
||||
query: 1234.56
|
||||
- match: {hits.total: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
meta:
|
||||
query: A1234
|
||||
- match: {hits.total: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
term:
|
||||
meta:
|
||||
value: a1234
|
||||
- match: {hits.total: 0}
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
meta:
|
||||
query: A1234
|
||||
analyzer: my_ngram_analyzer
|
||||
- match: {hits.total: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
meta:
|
||||
query: a1234
|
||||
analyzer: my_ngram_analyzer
|
||||
- match: {hits.total: 1}
|
||||
|
|
|
@ -230,6 +230,11 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
|||
fixtureSupported = true
|
||||
}
|
||||
|
||||
boolean legalPath = rootProject.rootDir.toString().contains(" ") == false
|
||||
if (legalPath == false) {
|
||||
fixtureSupported = false
|
||||
}
|
||||
|
||||
// Always ignore HA integration tests in the normal integration test runner, they are included below as
|
||||
// part of their own HA-specific integration test tasks.
|
||||
integTestRunner.exclude('**/Ha*TestSuiteIT.class')
|
||||
|
@ -248,7 +253,12 @@ if (fixtureSupported) {
|
|||
// Only include the HA integration tests for the HA test task
|
||||
integTestHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class'])
|
||||
} else {
|
||||
if (legalPath) {
|
||||
logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
|
||||
} else {
|
||||
logger.warn("hdfsFixture unsupported since there are spaces in the path: '" + rootProject.rootDir.toString() + "'")
|
||||
}
|
||||
|
||||
// The normal integration test runner will just test that the plugin loads
|
||||
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
|
||||
// HA fixture is unsupported. Don't run them.
|
||||
|
|
|
@ -76,36 +76,6 @@
|
|||
- match: { detail.tokenfilters.0.name: "_anonymous_tokenfilter" }
|
||||
- match: { detail.tokenfilters.0.tokens.0.token: bar }
|
||||
|
||||
---
|
||||
"Synonym filter with tokenizer":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_synonym
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
analysis:
|
||||
tokenizer:
|
||||
trigram:
|
||||
type: nGram
|
||||
min_gram: 3
|
||||
max_gram: 3
|
||||
filter:
|
||||
synonym:
|
||||
type: synonym
|
||||
synonyms: ["kimchy => shay"]
|
||||
|
||||
- do:
|
||||
indices.analyze:
|
||||
index: test_synonym
|
||||
body:
|
||||
tokenizer: trigram
|
||||
filter: [synonym]
|
||||
text: kimchy
|
||||
- length: { tokens: 2 }
|
||||
- match: { tokens.0.token: sha }
|
||||
- match: { tokens.1.token: hay }
|
||||
|
||||
---
|
||||
"Custom normalizer in request":
|
||||
- do:
|
||||
|
|
|
@ -1,5 +1,9 @@
|
|||
---
|
||||
"Shrink index via API":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
# creates an index with one document solely allocated on the master node
|
||||
# and shrinks it into a new index with a single shard
|
||||
# we don't do the relocation to a single node after the index is created
|
||||
|
@ -62,6 +66,8 @@
|
|||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
---
|
||||
"Shrink index ignores target template mapping":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
cluster.state: {}
|
||||
# Get master node id
|
||||
|
@ -65,6 +70,8 @@
|
|||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
"Copy settings during shrink index":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: copy_settings did not exist prior to 6.4.0
|
||||
version: " - 6.99.99"
|
||||
reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
|
@ -47,8 +47,6 @@
|
|||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [true]"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -64,20 +62,19 @@
|
|||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
||||
|
||||
# now we do a actual shrink and do not copy settings
|
||||
# now we do a actual shrink and do not copy settings (by default)
|
||||
- do:
|
||||
indices.shrink:
|
||||
index: "source"
|
||||
target: "no-copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: false
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [false]"
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -92,3 +89,16 @@
|
|||
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- is_false: no-copy-settings-target.settings.index.blocks.write
|
||||
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
||||
|
||||
# now we do a actual shrink and try to set no copy settings
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
indices.shrink:
|
||||
index: "source"
|
||||
target: "explicit-no-copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: false
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
|
|
|
@ -33,8 +33,9 @@ setup:
|
|||
---
|
||||
"Split index via API":
|
||||
- skip:
|
||||
version: " - 6.0.99"
|
||||
reason: Added in 6.1.0
|
||||
version: " - 6.99.99"
|
||||
reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
|
||||
# make it read-only
|
||||
- do:
|
||||
|
@ -60,6 +61,8 @@ setup:
|
|||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 4
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -103,13 +106,13 @@ setup:
|
|||
|
||||
---
|
||||
"Split from 1 to N":
|
||||
# - skip:
|
||||
# version: " - 6.99.99"
|
||||
# reason: Added in 7.0.0
|
||||
# uncomment once AwaitsFix is resolved
|
||||
- skip:
|
||||
# when re-enabling uncomment the below skips
|
||||
version: "all"
|
||||
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
|
||||
# version: " - 6.99.99"
|
||||
# reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
- do:
|
||||
indices.create:
|
||||
index: source_one_shard
|
||||
|
@ -163,6 +166,8 @@ setup:
|
|||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 5
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -208,8 +213,9 @@ setup:
|
|||
---
|
||||
"Create illegal split indices":
|
||||
- skip:
|
||||
version: " - 6.0.99"
|
||||
reason: Added in 6.1.0
|
||||
version: " - 6.99.99"
|
||||
reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
|
||||
# try to do an illegal split with number_of_routing_shards set
|
||||
- do:
|
||||
|
@ -224,6 +230,8 @@ setup:
|
|||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 4
|
||||
index.number_of_routing_shards: 8
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
# try to do an illegal split with illegal number_of_shards
|
||||
- do:
|
||||
|
@ -237,3 +245,5 @@ setup:
|
|||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 6
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
---
|
||||
"Split index ignores target template mapping":
|
||||
# - skip:
|
||||
# version: " - 6.0.99"
|
||||
# reason: Added in 6.1.0
|
||||
# uncomment once AwaitsFix is resolved
|
||||
- skip:
|
||||
# when re-enabling uncomment the below skips
|
||||
version: "all"
|
||||
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
|
||||
# version: " - 6.99.99"
|
||||
# reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
|
||||
# create index
|
||||
- do:
|
||||
|
@ -68,6 +68,8 @@
|
|||
settings:
|
||||
index.number_of_shards: 2
|
||||
index.number_of_replicas: 0
|
||||
warnings:
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
---
|
||||
"Copy settings during split index":
|
||||
- skip:
|
||||
version: " - 6.3.99"
|
||||
reason: copy_settings did not exist prior to 6.4.0
|
||||
version: " - 6.99.99"
|
||||
reason: expects warnings that pre-7.0.0 will not send
|
||||
features: "warnings"
|
||||
|
||||
- do:
|
||||
|
@ -50,8 +50,6 @@
|
|||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [true]"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -67,21 +65,20 @@
|
|||
- match: { copy-settings-target.settings.index.blocks.write: "true" }
|
||||
- match: { copy-settings-target.settings.index.routing.allocation.include._id: $master }
|
||||
|
||||
# now we do a actual shrink and do not copy settings
|
||||
# now we do a actual shrink and do not copy settings (by default)
|
||||
- do:
|
||||
indices.split:
|
||||
index: "source"
|
||||
target: "no-copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: false
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
index.number_of_shards: 2
|
||||
index.merge.scheduler.max_thread_count: 2
|
||||
warnings:
|
||||
- "parameter [copy_settings] is deprecated but was [false]"
|
||||
- "resize operations without copying settings is deprecated; set parameter [copy_settings] to [true] for future default behavior"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -96,3 +93,15 @@
|
|||
- match: { no-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" }
|
||||
- is_false: no-copy-settings-target.settings.index.blocks.write
|
||||
- is_false: no-copy-settings-target.settings.index.routing.allocation.include._id
|
||||
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
indices.split:
|
||||
index: "source"
|
||||
target: "explicit-no-copy-settings-target"
|
||||
wait_for_active_shards: 1
|
||||
master_timeout: 10s
|
||||
copy_settings: false
|
||||
body:
|
||||
settings:
|
||||
index.number_of_replicas: 0
|
||||
|
|
|
@ -56,7 +56,7 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
private CreateIndexRequest targetIndexRequest;
|
||||
private String sourceIndex;
|
||||
private ResizeType type = ResizeType.SHRINK;
|
||||
private boolean copySettings = false;
|
||||
private Boolean copySettings;
|
||||
|
||||
ResizeRequest() {}
|
||||
|
||||
|
@ -80,6 +80,7 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
if (type == ResizeType.SPLIT && IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) {
|
||||
validationException = addValidationError("index.number_of_shards is required for split operations", validationException);
|
||||
}
|
||||
assert copySettings == null || copySettings;
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
@ -98,10 +99,12 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
} else {
|
||||
type = ResizeType.SHRINK; // BWC this used to be shrink only
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
if (in.getVersion().before(Version.V_6_4_0)) {
|
||||
copySettings = null;
|
||||
} else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){
|
||||
copySettings = in.readBoolean();
|
||||
} else {
|
||||
copySettings = false;
|
||||
copySettings = in.readOptionalBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,8 +116,12 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
|
||||
out.writeEnum(type);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
out.writeBoolean(copySettings);
|
||||
if (out.getVersion().before(Version.V_6_4_0)) {
|
||||
|
||||
} else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) {
|
||||
out.writeBoolean(copySettings == null ? false : copySettings);
|
||||
} else {
|
||||
out.writeOptionalBoolean(copySettings);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,11 +194,14 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
|
|||
return type;
|
||||
}
|
||||
|
||||
public void setCopySettings(final boolean copySettings) {
|
||||
public void setCopySettings(final Boolean copySettings) {
|
||||
if (copySettings != null && copySettings == false) {
|
||||
throw new IllegalArgumentException("[copySettings] can not be explicitly set to [false]");
|
||||
}
|
||||
this.copySettings = copySettings;
|
||||
}
|
||||
|
||||
public boolean getCopySettings() {
|
||||
public Boolean getCopySettings() {
|
||||
return copySettings;
|
||||
}
|
||||
|
||||
|
|
|
@ -190,7 +190,7 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
|
|||
.waitForActiveShards(targetIndex.waitForActiveShards())
|
||||
.recoverFrom(metaData.getIndex())
|
||||
.resizeType(resizeRequest.getResizeType())
|
||||
.copySettings(resizeRequest.getCopySettings());
|
||||
.copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.apache.lucene.analysis.core.KeywordTokenizer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
public class KeywordTokenizerFactory extends AbstractTokenizerFactory {
|
||||
|
||||
|
|
|
@ -39,11 +39,9 @@ import org.elasticsearch.index.analysis.CatalanAnalyzerProvider;
|
|||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.ChineseAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.CjkAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.ClassicTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.CzechAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.DanishAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.DutchAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.EnglishAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.FingerprintAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.FinnishAnalyzerProvider;
|
||||
|
@ -60,14 +58,9 @@ import org.elasticsearch.index.analysis.ItalianAnalyzerProvider;
|
|||
import org.elasticsearch.index.analysis.KeywordAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.LatvianAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.LetterTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.LithuanianAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.NorwegianAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.PatternAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.PatternTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.PersianAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.PortugueseAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||
|
@ -88,13 +81,10 @@ import org.elasticsearch.index.analysis.StopAnalyzerProvider;
|
|||
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.SwedishAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.ThaiAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.ThaiTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.TurkishAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.WhitespaceAnalyzerProvider;
|
||||
import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -223,36 +213,19 @@ public final class AnalysisModule {
|
|||
}
|
||||
preConfiguredTokenizers.register(name, preConfigured);
|
||||
}
|
||||
// Temporary shim for aliases. TODO deprecate after they are moved
|
||||
preConfiguredTokenizers.register("nGram", preConfiguredTokenizers.getRegistry().get("ngram"));
|
||||
preConfiguredTokenizers.register("edgeNGram", preConfiguredTokenizers.getRegistry().get("edge_ngram"));
|
||||
preConfiguredTokenizers.register("PathHierarchy", preConfiguredTokenizers.getRegistry().get("path_hierarchy"));
|
||||
|
||||
for (AnalysisPlugin plugin: plugins) {
|
||||
for (PreConfiguredTokenizer tokenizer : plugin.getPreConfiguredTokenizers()) {
|
||||
preConfiguredTokenizers.register(tokenizer.getName(), tokenizer);
|
||||
}
|
||||
}
|
||||
|
||||
return unmodifiableMap(preConfiguredTokenizers.getRegistry());
|
||||
}
|
||||
|
||||
private NamedRegistry<AnalysisProvider<TokenizerFactory>> setupTokenizers(List<AnalysisPlugin> plugins) {
|
||||
NamedRegistry<AnalysisProvider<TokenizerFactory>> tokenizers = new NamedRegistry<>("tokenizer");
|
||||
tokenizers.register("standard", StandardTokenizerFactory::new);
|
||||
tokenizers.register("uax_url_email", UAX29URLEmailTokenizerFactory::new);
|
||||
tokenizers.register("path_hierarchy", PathHierarchyTokenizerFactory::new);
|
||||
tokenizers.register("PathHierarchy", PathHierarchyTokenizerFactory::new);
|
||||
tokenizers.register("keyword", KeywordTokenizerFactory::new);
|
||||
tokenizers.register("letter", LetterTokenizerFactory::new);
|
||||
tokenizers.register("lowercase", LowerCaseTokenizerFactory::new);
|
||||
tokenizers.register("whitespace", WhitespaceTokenizerFactory::new);
|
||||
tokenizers.register("nGram", NGramTokenizerFactory::new);
|
||||
tokenizers.register("ngram", NGramTokenizerFactory::new);
|
||||
tokenizers.register("edgeNGram", EdgeNGramTokenizerFactory::new);
|
||||
tokenizers.register("edge_ngram", EdgeNGramTokenizerFactory::new);
|
||||
tokenizers.register("pattern", PatternTokenizerFactory::new);
|
||||
tokenizers.register("classic", ClassicTokenizerFactory::new);
|
||||
tokenizers.register("thai", ThaiTokenizerFactory::new);
|
||||
tokenizers.extractAndRegister(plugins, AnalysisPlugin::getTokenizers);
|
||||
return tokenizers;
|
||||
}
|
||||
|
|
|
@ -19,18 +19,8 @@
|
|||
package org.elasticsearch.indices.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.core.LetterTokenizer;
|
||||
import org.apache.lucene.analysis.core.WhitespaceTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenizer;
|
||||
import org.apache.lucene.analysis.path.PathHierarchyTokenizer;
|
||||
import org.apache.lucene.analysis.pattern.PatternTokenizer;
|
||||
import org.apache.lucene.analysis.standard.ClassicTokenizer;
|
||||
import org.apache.lucene.analysis.standard.StandardTokenizer;
|
||||
import org.apache.lucene.analysis.standard.UAX29URLEmailTokenizer;
|
||||
import org.apache.lucene.analysis.th.ThaiTokenizer;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
|
||||
|
@ -41,69 +31,6 @@ public enum PreBuiltTokenizers {
|
|||
protected Tokenizer create(Version version) {
|
||||
return new StandardTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
CLASSIC(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new ClassicTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
UAX_URL_EMAIL(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new UAX29URLEmailTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
PATH_HIERARCHY(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new PathHierarchyTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
LETTER(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new LetterTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
WHITESPACE(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new WhitespaceTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
NGRAM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new NGramTokenizer();
|
||||
}
|
||||
},
|
||||
|
||||
EDGE_NGRAM(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE);
|
||||
}
|
||||
},
|
||||
|
||||
PATTERN(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new PatternTokenizer(Regex.compile("\\W+", null), -1);
|
||||
}
|
||||
},
|
||||
|
||||
THAI(CachingStrategy.ONE) {
|
||||
@Override
|
||||
protected Tokenizer create(Version version) {
|
||||
return new ThaiTokenizer();
|
||||
}
|
||||
}
|
||||
|
||||
;
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.indices.flush;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.Assertions;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -501,8 +502,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
if (indexShard.routingEntry().primary() == false) {
|
||||
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
||||
}
|
||||
if (Assertions.ENABLED) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations());
|
||||
}
|
||||
}
|
||||
int opCount = indexShard.getActiveOperationsCount();
|
||||
logger.trace("{} in flight operations sampled at [{}]", request.shardId(), opCount);
|
||||
// Need to snapshot the debug info twice as it's updated concurrently with the permit count.
|
||||
if (Assertions.ENABLED) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations());
|
||||
}
|
||||
}
|
||||
return new InFlightOpsResponse(opCount);
|
||||
}
|
||||
|
||||
|
|
|
@ -950,6 +950,20 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
|
||||
final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots);
|
||||
try {
|
||||
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
|
||||
// attempt to write an index file with this generation failed mid-way after creating the temporary file.
|
||||
for (final String blobName : blobs.keySet()) {
|
||||
if (indexShardSnapshotsFormat.isTempBlobName(blobName)) {
|
||||
try {
|
||||
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
||||
} catch (IOException e) {
|
||||
logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blob [{}] during finalization",
|
||||
snapshotId, shardId, blobName), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we deleted all snapshots, we don't need to create a new index file
|
||||
if (snapshots.size() > 0) {
|
||||
indexShardSnapshotsFormat.writeAtomic(updatedSnapshots, blobContainer, indexGeneration);
|
||||
|
@ -957,7 +971,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
|
||||
// Delete old index files
|
||||
for (final String blobName : blobs.keySet()) {
|
||||
if (indexShardSnapshotsFormat.isTempBlobName(blobName) || blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) {
|
||||
if (blobName.startsWith(SNAPSHOT_INDEX_PREFIX)) {
|
||||
try {
|
||||
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -48,17 +48,22 @@ public abstract class RestResizeHandler extends BaseRestHandler {
|
|||
final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index"));
|
||||
resizeRequest.setResizeType(getResizeType());
|
||||
final String rawCopySettings = request.param("copy_settings");
|
||||
final boolean copySettings;
|
||||
final Boolean copySettings;
|
||||
if (rawCopySettings == null) {
|
||||
copySettings = resizeRequest.getCopySettings();
|
||||
} else {
|
||||
deprecationLogger.deprecated("parameter [copy_settings] is deprecated but was [" + rawCopySettings + "]");
|
||||
if (rawCopySettings.length() == 0) {
|
||||
} else if (rawCopySettings.isEmpty()) {
|
||||
copySettings = true;
|
||||
} else {
|
||||
copySettings = Booleans.parseBoolean(rawCopySettings);
|
||||
if (copySettings == false) {
|
||||
throw new IllegalArgumentException("parameter [copy_settings] can not be explicitly set to [false]");
|
||||
}
|
||||
}
|
||||
if (copySettings == null) {
|
||||
deprecationLogger.deprecated(
|
||||
"resize operations without copying settings is deprecated; "
|
||||
+ "set parameter [copy_settings] to [true] for future default behavior");
|
||||
}
|
||||
resizeRequest.setCopySettings(copySettings);
|
||||
request.applyContentParser(resizeRequest::fromXContent);
|
||||
resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout()));
|
||||
|
|
|
@ -287,7 +287,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
|||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> TransportAnalyzeAction.analyze(
|
||||
new AnalyzeRequest()
|
||||
.tokenizer("whitespace")
|
||||
.tokenizer("standard")
|
||||
.addTokenFilter("foobar")
|
||||
.text("the qu1ck brown fox"),
|
||||
"text", null, notGlobal ? indexAnalyzers : null, registry, environment, maxTokenCount));
|
||||
|
@ -300,7 +300,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
|||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> TransportAnalyzeAction.analyze(
|
||||
new AnalyzeRequest()
|
||||
.tokenizer("whitespace")
|
||||
.tokenizer("standard")
|
||||
.addTokenFilter("lowercase")
|
||||
.addCharFilter("foobar")
|
||||
.text("the qu1ck brown fox"),
|
||||
|
@ -322,7 +322,7 @@ public class TransportAnalyzeActionTests extends ESTestCase {
|
|||
|
||||
public void testNonPreBuildTokenFilter() throws IOException {
|
||||
AnalyzeRequest request = new AnalyzeRequest();
|
||||
request.tokenizer("whitespace");
|
||||
request.tokenizer("standard");
|
||||
request.addTokenFilter("stop"); // stop token filter is not prebuilt in AnalysisModule#setupPreConfiguredTokenFilters()
|
||||
request.text("the quick brown fox");
|
||||
AnalyzeResponse analyze = TransportAnalyzeAction.analyze(request, "text", null, indexAnalyzers, registry, environment, maxTokenCount);
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.search.Sort;
|
|||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortedSetSelector;
|
||||
import org.apache.lucene.search.SortedSetSortField;
|
||||
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
|
@ -76,6 +77,7 @@ import static org.hamcrest.Matchers.containsString;
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416")
|
||||
public class ShrinkIndexIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
|
@ -83,7 +85,6 @@ public class ShrinkIndexIT extends ESIntegTestCase {
|
|||
return Arrays.asList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416")
|
||||
public void testCreateShrinkIndexToN() {
|
||||
int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}};
|
||||
int[] shardSplits = randomFrom(possibleShardSplits);
|
||||
|
|
|
@ -31,12 +31,34 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
|
||||
public class ResizeRequestTests extends ESTestCase {
|
||||
|
||||
public void testCopySettingsValidation() {
|
||||
runTestCopySettingsValidation(false, r -> {
|
||||
final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, r::get);
|
||||
assertThat(e, hasToString(containsString("[copySettings] can not be explicitly set to [false]")));
|
||||
});
|
||||
|
||||
runTestCopySettingsValidation(null, r -> assertNull(r.get().getCopySettings()));
|
||||
runTestCopySettingsValidation(true, r -> assertTrue(r.get().getCopySettings()));
|
||||
}
|
||||
|
||||
private void runTestCopySettingsValidation(final Boolean copySettings, final Consumer<Supplier<ResizeRequest>> consumer) {
|
||||
consumer.accept(() -> {
|
||||
final ResizeRequest request = new ResizeRequest();
|
||||
request.setCopySettings(copySettings);
|
||||
return request;
|
||||
});
|
||||
}
|
||||
|
||||
public void testToXContent() throws IOException {
|
||||
{
|
||||
ResizeRequest request = new ResizeRequest("target", "source");
|
||||
|
|
|
@ -188,7 +188,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
.addAlias(new Alias("alias"))
|
||||
.setSettings(Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
client().prepareIndex("test", "type1", Integer.toString(i))
|
||||
|
@ -260,7 +260,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
.endObject().endObject();
|
||||
assertAcked(prepareCreate("test").addMapping("type1", mapping)
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
client().prepareIndex("test", "type1", Integer.toString(i))
|
||||
|
@ -394,7 +394,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
|
|||
.addMapping("type1", mapping)
|
||||
.setSettings(Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.tv_test.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.tv_test.filter", "lowercase")));
|
||||
|
||||
ensureGreen();
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.action.termvectors;
|
||||
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.payloads.FloatEncoder;
|
||||
|
@ -35,6 +36,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenizer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
|
@ -93,6 +95,12 @@ public class GetTermVectorsTests extends ESSingleNodeTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
|
||||
return Collections.singletonList(PreConfiguredTokenizer.singleton("mock-whitespace",
|
||||
() -> new MockTokenizer(MockTokenizer.WHITESPACE, false), null));
|
||||
}
|
||||
|
||||
// Based on DelimitedPayloadTokenFilter:
|
||||
final class MockPayloadTokenFilter extends TokenFilter {
|
||||
private final char delimiter;
|
||||
|
@ -151,7 +159,7 @@ public class GetTermVectorsTests extends ESSingleNodeTestCase {
|
|||
.startObject("field").field("type", "text").field("term_vector", "with_positions_offsets_payloads")
|
||||
.field("analyzer", "payload_test").endObject().endObject().endObject().endObject();
|
||||
Settings setting = Settings.builder()
|
||||
.put("index.analysis.analyzer.payload_test.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.payload_test.tokenizer", "mock-whitespace")
|
||||
.putList("index.analysis.analyzer.payload_test.filter", "my_delimited_payload")
|
||||
.put("index.analysis.filter.my_delimited_payload.delimiter", delimiter)
|
||||
.put("index.analysis.filter.my_delimited_payload.encoding", encodingString)
|
||||
|
|
|
@ -35,10 +35,8 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
|
||||
public class AnalyzeActionIT extends ESIntegTestCase {
|
||||
public void testSimpleAnalyzerTests() throws Exception {
|
||||
assertAcked(prepareCreate("test").addAlias(new Alias("alias")));
|
||||
|
@ -333,14 +331,14 @@ public class AnalyzeActionIT extends ESIntegTestCase {
|
|||
AnalyzeResponse analyzeResponse = client().admin().indices()
|
||||
.prepareAnalyze()
|
||||
.setText("Foo buzz test")
|
||||
.setTokenizer("whitespace")
|
||||
.setTokenizer("standard")
|
||||
.addTokenFilter("lowercase")
|
||||
.addTokenFilter(stopFilterSettings)
|
||||
.setExplain(true)
|
||||
.get();
|
||||
|
||||
//tokenizer
|
||||
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("whitespace"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("standard"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("Foo"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
|
||||
|
@ -393,41 +391,6 @@ public class AnalyzeActionIT extends ESIntegTestCase {
|
|||
assertThat(analyzeResponse.detail().tokenfilters()[1].getTokens()[0].getPositionLength(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testCustomTokenizerInRequest() throws Exception {
|
||||
Map<String, Object> tokenizerSettings = new HashMap<>();
|
||||
tokenizerSettings.put("type", "nGram");
|
||||
tokenizerSettings.put("min_gram", 2);
|
||||
tokenizerSettings.put("max_gram", 2);
|
||||
|
||||
AnalyzeResponse analyzeResponse = client().admin().indices()
|
||||
.prepareAnalyze()
|
||||
.setText("good")
|
||||
.setTokenizer(tokenizerSettings)
|
||||
.setExplain(true)
|
||||
.get();
|
||||
|
||||
//tokenizer
|
||||
assertThat(analyzeResponse.detail().tokenizer().getName(), equalTo("_anonymous_tokenizer"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens().length, equalTo(3));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getTerm(), equalTo("go"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getStartOffset(), equalTo(0));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getEndOffset(), equalTo(2));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPosition(), equalTo(0));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[0].getPositionLength(), equalTo(1));
|
||||
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getTerm(), equalTo("oo"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getStartOffset(), equalTo(1));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getEndOffset(), equalTo(3));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getPosition(), equalTo(1));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[1].getPositionLength(), equalTo(1));
|
||||
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getTerm(), equalTo("od"));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getStartOffset(), equalTo(2));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getEndOffset(), equalTo(4));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getPosition(), equalTo(2));
|
||||
assertThat(analyzeResponse.detail().tokenizer().getTokens()[2].getPositionLength(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testAnalyzeKeywordField() throws IOException {
|
||||
assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "keyword", "type=keyword"));
|
||||
ensureGreen("test");
|
||||
|
|
|
@ -254,8 +254,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
result.totalShards(), result.failed(), result.failureReason(), detail);
|
||||
}
|
||||
|
||||
@TestLogging("_root:DEBUG")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
||||
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
||||
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||
|
@ -297,8 +296,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
}
|
||||
|
||||
@TestLogging("_root:DEBUG")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
||||
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
||||
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||
|
|
|
@ -677,7 +677,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||
" \"analysis\" : {\n" +
|
||||
" \"analyzer\" : {\n" +
|
||||
" \"custom_1\" : {\n" +
|
||||
" \"tokenizer\" : \"whitespace\"\n" +
|
||||
" \"tokenizer\" : \"standard\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
|
|
|
@ -20,15 +20,20 @@
|
|||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.FakeRestRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class RestResizeHandlerTests extends ESTestCase {
|
||||
|
@ -36,27 +41,41 @@ public class RestResizeHandlerTests extends ESTestCase {
|
|||
public void testShrinkCopySettingsDeprecated() throws IOException {
|
||||
final RestResizeHandler.RestShrinkIndexAction handler =
|
||||
new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class));
|
||||
final String copySettings = randomFrom("true", "false");
|
||||
final FakeRestRequest request =
|
||||
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
|
||||
.withParams(Collections.singletonMap("copy_settings", copySettings))
|
||||
.withPath("source/_shrink/target")
|
||||
.build();
|
||||
handler.prepareRequest(request, mock(NodeClient.class));
|
||||
assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]");
|
||||
for (final String copySettings : new String[]{null, "", "true", "false"}) {
|
||||
runTestResizeCopySettingsDeprecated(handler, "shrink", copySettings);
|
||||
}
|
||||
}
|
||||
|
||||
public void testSplitCopySettingsDeprecated() throws IOException {
|
||||
final RestResizeHandler.RestSplitIndexAction handler =
|
||||
new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class));
|
||||
final String copySettings = randomFrom("true", "false");
|
||||
final FakeRestRequest request =
|
||||
for (final String copySettings : new String[]{null, "", "true", "false"}) {
|
||||
runTestResizeCopySettingsDeprecated(handler, "split", copySettings);
|
||||
}
|
||||
}
|
||||
|
||||
private void runTestResizeCopySettingsDeprecated(
|
||||
final RestResizeHandler handler, final String resizeOperation, final String copySettings) throws IOException {
|
||||
final FakeRestRequest.Builder builder =
|
||||
new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY)
|
||||
.withParams(Collections.singletonMap("copy_settings", copySettings))
|
||||
.withPath("source/_split/target")
|
||||
.build();
|
||||
.withPath(String.format(Locale.ROOT, "source/_%s/target", resizeOperation));
|
||||
if (copySettings != null) {
|
||||
builder.withParams(Collections.singletonMap("copy_settings", copySettings));
|
||||
}
|
||||
final FakeRestRequest request = builder.build();
|
||||
if ("false".equals(copySettings)) {
|
||||
final IllegalArgumentException e =
|
||||
expectThrows(IllegalArgumentException.class, () -> handler.prepareRequest(request, mock(NodeClient.class)));
|
||||
assertThat(e, hasToString(containsString("parameter [copy_settings] can not be explicitly set to [false]")));
|
||||
} else {
|
||||
handler.prepareRequest(request, mock(NodeClient.class));
|
||||
assertWarnings("parameter [copy_settings] is deprecated but was [" + copySettings + "]");
|
||||
if (copySettings == null) {
|
||||
assertWarnings(
|
||||
"resize operations without copying settings is deprecated; "
|
||||
+ "set parameter [copy_settings] to [true] for future default behavior");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1359,7 +1359,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
public void testPhrasePrefix() throws IOException {
|
||||
Builder builder = Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put("index.analysis.analyzer.synonym.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.synonym.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
|
||||
.put("index.analysis.filter.synonym.type", "synonym")
|
||||
.putList("index.analysis.filter.synonym.synonyms", "quick => fast");
|
||||
|
@ -2804,7 +2804,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
public void testSynonyms() throws IOException {
|
||||
Builder builder = Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put("index.analysis.analyzer.synonym.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.synonym.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase")
|
||||
.put("index.analysis.filter.synonym.type", "synonym")
|
||||
.putList("index.analysis.filter.synonym.synonyms", "fast,quick");
|
||||
|
|
|
@ -156,7 +156,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
|
||||
public void testMoreDocs() throws Exception {
|
||||
Builder builder = Settings.builder();
|
||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
|
||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "standard");
|
||||
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
||||
builder.put("index.analysis.filter.synonym.type", "synonym");
|
||||
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||
|
@ -234,7 +234,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
// Tests a rescore window smaller than number of hits:
|
||||
public void testSmallRescoreWindow() throws Exception {
|
||||
Builder builder = Settings.builder();
|
||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
|
||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "standard");
|
||||
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
||||
builder.put("index.analysis.filter.synonym.type", "synonym");
|
||||
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||
|
@ -306,7 +306,7 @@ public class QueryRescorerIT extends ESIntegTestCase {
|
|||
// Tests a rescorer that penalizes the scores:
|
||||
public void testRescorerMadeScoresWorse() throws Exception {
|
||||
Builder builder = Settings.builder();
|
||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "whitespace");
|
||||
builder.put("index.analysis.analyzer.synonym.tokenizer", "standard");
|
||||
builder.putList("index.analysis.analyzer.synonym.filter", "synonym", "lowercase");
|
||||
builder.put("index.analysis.filter.synonym.type", "synonym");
|
||||
builder.putList("index.analysis.filter.synonym.synonyms", "ave => ave, avenue", "street => str, street");
|
||||
|
|
|
@ -82,7 +82,7 @@ public class MultiMatchQueryIT extends ESIntegTestCase {
|
|||
.put("index.analysis.analyzer.perfect_match.tokenizer", "keyword")
|
||||
.put("index.analysis.analyzer.perfect_match.filter", "lowercase")
|
||||
.put("index.analysis.analyzer.category.type", "custom")
|
||||
.put("index.analysis.analyzer.category.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.category.tokenizer", "standard")
|
||||
.put("index.analysis.analyzer.category.filter", "lowercase")
|
||||
);
|
||||
assertAcked(builder.addMapping("test", createMapping()));
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.search.query;
|
||||
|
||||
import org.apache.lucene.util.English;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
|
@ -30,7 +29,6 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.query.BoolQueryBuilder;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.MultiMatchQueryBuilder;
|
||||
|
@ -351,7 +349,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
.put(SETTING_NUMBER_OF_SHARDS,1)
|
||||
.put("index.analysis.filter.syns.type","synonym")
|
||||
.putList("index.analysis.filter.syns.synonyms","quick,fast")
|
||||
.put("index.analysis.analyzer.syns.tokenizer","whitespace")
|
||||
.put("index.analysis.analyzer.syns.tokenizer","standard")
|
||||
.put("index.analysis.analyzer.syns.filter","syns")
|
||||
)
|
||||
.addMapping("type1", "field1", "type=text,analyzer=syns", "field2", "type=text,analyzer=syns"));
|
||||
|
@ -1764,56 +1762,6 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
|
||||
}
|
||||
|
||||
// see #5120
|
||||
public void testNGramCopyField() {
|
||||
CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(indexSettings())
|
||||
.put(IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey(), 9)
|
||||
.put("index.analysis.analyzer.my_ngram_analyzer.type", "custom")
|
||||
.put("index.analysis.analyzer.my_ngram_analyzer.tokenizer", "my_ngram_tokenizer")
|
||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.type", "nGram")
|
||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.min_gram", "1")
|
||||
.put("index.analysis.tokenizer.my_ngram_tokenizer.max_gram", "10")
|
||||
.putList("index.analysis.tokenizer.my_ngram_tokenizer.token_chars", new String[0]));
|
||||
assertAcked(builder.addMapping("test", "origin", "type=text,copy_to=meta", "meta", "type=text,analyzer=my_ngram_analyzer"));
|
||||
// we only have ngrams as the index analyzer so searches will get standard analyzer
|
||||
|
||||
|
||||
client().prepareIndex("test", "test", "1").setSource("origin", "C.A1234.5678")
|
||||
.setRefreshPolicy(IMMEDIATE)
|
||||
.get();
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("test")
|
||||
.setQuery(matchQuery("meta", "1234"))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(matchQuery("meta", "1234.56"))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(termQuery("meta", "A1234"))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1L);
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(termQuery("meta", "a1234"))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 0L); // it's upper case
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(matchQuery("meta", "A1234").analyzer("my_ngram_analyzer"))
|
||||
.get(); // force ngram analyzer
|
||||
assertHitCount(searchResponse, 1L);
|
||||
|
||||
searchResponse = client().prepareSearch("test")
|
||||
.setQuery(matchQuery("meta", "a1234").analyzer("my_ngram_analyzer"))
|
||||
.get(); // this one returns a hit since it's default operator is OR
|
||||
assertHitCount(searchResponse, 1L);
|
||||
}
|
||||
|
||||
public void testMatchPhrasePrefixQuery() throws ExecutionException, InterruptedException {
|
||||
createIndex("test1");
|
||||
indexRandom(true, client().prepareIndex("test1", "type1", "1").setSource("field", "Johnnie Walker Black Label"),
|
||||
|
|
|
@ -427,7 +427,7 @@ public class SuggestSearchIT extends ESIntegTestCase {
|
|||
public void testStopwordsOnlyPhraseSuggest() throws IOException {
|
||||
assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=text,analyzer=stopwd").setSettings(
|
||||
Settings.builder()
|
||||
.put("index.analysis.analyzer.stopwd.tokenizer", "whitespace")
|
||||
.put("index.analysis.analyzer.stopwd.tokenizer", "standard")
|
||||
.putList("index.analysis.analyzer.stopwd.filter", "stop")
|
||||
));
|
||||
ensureGreen();
|
||||
|
|
|
@ -3094,7 +3094,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertEquals("IndexShardSnapshotFailedException[Aborted]", snapshotInfo.shardFailures().get(0).reason());
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30507")
|
||||
public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception {
|
||||
logger.info("--> creating repository");
|
||||
final Path repoPath = randomRepoPath();
|
||||
|
|
|
@ -22,18 +22,10 @@ package org.elasticsearch.indices.analysis;
|
|||
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.index.analysis.ClassicTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.EdgeNGramTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.HunspellTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.KeywordTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.LetterTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.LowerCaseTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.MultiTermAwareComponent;
|
||||
import org.elasticsearch.index.analysis.NGramTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.PathHierarchyTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.PatternTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredCharFilter;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenizer;
|
||||
|
@ -43,9 +35,6 @@ import org.elasticsearch.index.analysis.StandardTokenizerFactory;
|
|||
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.SynonymGraphTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.SynonymTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.ThaiTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.UAX29URLEmailTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.WhitespaceTokenizerFactory;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -88,20 +77,20 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase {
|
|||
|
||||
static final Map<String,Class<?>> KNOWN_TOKENIZERS = new MapBuilder<String,Class<?>>()
|
||||
// exposed in ES
|
||||
.put("classic", ClassicTokenizerFactory.class)
|
||||
.put("edgengram", EdgeNGramTokenizerFactory.class)
|
||||
.put("classic", MovedToAnalysisCommon.class)
|
||||
.put("edgengram", MovedToAnalysisCommon.class)
|
||||
.put("keyword", KeywordTokenizerFactory.class)
|
||||
.put("letter", LetterTokenizerFactory.class)
|
||||
.put("lowercase", LowerCaseTokenizerFactory.class)
|
||||
.put("ngram", NGramTokenizerFactory.class)
|
||||
.put("pathhierarchy", PathHierarchyTokenizerFactory.class)
|
||||
.put("pattern", PatternTokenizerFactory.class)
|
||||
.put("letter", MovedToAnalysisCommon.class)
|
||||
.put("lowercase", MovedToAnalysisCommon.class)
|
||||
.put("ngram", MovedToAnalysisCommon.class)
|
||||
.put("pathhierarchy", MovedToAnalysisCommon.class)
|
||||
.put("pattern", MovedToAnalysisCommon.class)
|
||||
.put("simplepattern", MovedToAnalysisCommon.class)
|
||||
.put("simplepatternsplit", MovedToAnalysisCommon.class)
|
||||
.put("standard", StandardTokenizerFactory.class)
|
||||
.put("thai", ThaiTokenizerFactory.class)
|
||||
.put("uax29urlemail", UAX29URLEmailTokenizerFactory.class)
|
||||
.put("whitespace", WhitespaceTokenizerFactory.class)
|
||||
.put("thai", MovedToAnalysisCommon.class)
|
||||
.put("uax29urlemail", MovedToAnalysisCommon.class)
|
||||
.put("whitespace", MovedToAnalysisCommon.class)
|
||||
|
||||
// this one "seems to mess up offsets". probably shouldn't be a tokenizer...
|
||||
.put("wikipedia", Void.class)
|
||||
|
@ -292,23 +281,8 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase {
|
|||
Map<String, Class<?>> tokenizers = new HashMap<>();
|
||||
// TODO drop this temporary shim when all the old style tokenizers have been migrated to new style
|
||||
for (PreBuiltTokenizers tokenizer : PreBuiltTokenizers.values()) {
|
||||
final Class<?> luceneFactoryClazz;
|
||||
switch (tokenizer) {
|
||||
case UAX_URL_EMAIL:
|
||||
luceneFactoryClazz = org.apache.lucene.analysis.standard.UAX29URLEmailTokenizerFactory.class;
|
||||
break;
|
||||
case PATH_HIERARCHY:
|
||||
luceneFactoryClazz = Void.class;
|
||||
break;
|
||||
default:
|
||||
luceneFactoryClazz = null;
|
||||
tokenizers.put(tokenizer.name().toLowerCase(Locale.ROOT), null);
|
||||
}
|
||||
tokenizers.put(tokenizer.name().toLowerCase(Locale.ROOT), luceneFactoryClazz);
|
||||
}
|
||||
// TODO drop aliases once they are moved to module
|
||||
tokenizers.put("nGram", tokenizers.get("ngram"));
|
||||
tokenizers.put("edgeNGram", tokenizers.get("edge_ngram"));
|
||||
tokenizers.put("PathHierarchy", tokenizers.get("path_hierarchy"));
|
||||
return tokenizers;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,10 +91,10 @@ public class StateProcessor extends AbstractComponent {
|
|||
}
|
||||
|
||||
void persist(String jobId, BytesReference bytes) throws IOException {
|
||||
logger.trace("[{}] ES API CALL: bulk index", jobId);
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(bytes, AnomalyDetectorsIndex.jobStateIndexName(), ElasticsearchMappings.DOC_TYPE, XContentType.JSON);
|
||||
if (bulkRequest.numberOfActions() > 0) {
|
||||
logger.trace("[{}] Persisting job state document", jobId);
|
||||
try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), ML_ORIGIN)) {
|
||||
client.bulk(bulkRequest).actionGet();
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.sql.plan.logical.command.sys;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexInfo;
|
||||
import org.elasticsearch.xpack.sql.analysis.index.IndexResolver.IndexType;
|
||||
import org.elasticsearch.xpack.sql.expression.Attribute;
|
||||
import org.elasticsearch.xpack.sql.expression.regex.LikePattern;
|
||||
|
@ -18,6 +19,7 @@ import org.elasticsearch.xpack.sql.tree.NodeInfo;
|
|||
import org.elasticsearch.xpack.sql.util.CollectionUtils;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
@ -93,6 +95,8 @@ public class SysTables extends Command {
|
|||
enumeration[3] = type.toSql();
|
||||
values.add(asList(enumeration));
|
||||
}
|
||||
|
||||
values.sort(Comparator.comparing(l -> l.get(3).toString()));
|
||||
listener.onResponse(Rows.of(output(), values));
|
||||
return;
|
||||
}
|
||||
|
@ -112,6 +116,9 @@ public class SysTables extends Command {
|
|||
|
||||
session.indexResolver().resolveNames(index, regex, types, ActionListener.wrap(result -> listener.onResponse(
|
||||
Rows.of(output(), result.stream()
|
||||
// sort by type (which might be legacy), then by name
|
||||
.sorted(Comparator.<IndexInfo, String> comparing(i -> legacyName(i.type()))
|
||||
.thenComparing(Comparator.comparing(i -> i.name())))
|
||||
.map(t -> asList(cluster,
|
||||
EMPTY,
|
||||
t.name(),
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.xpack.sql.type.DataTypes;
|
|||
import org.elasticsearch.xpack.sql.type.EsField;
|
||||
import org.elasticsearch.xpack.sql.type.TypesTests;
|
||||
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
|
@ -57,30 +58,30 @@ public class SysTablesTests extends ESTestCase {
|
|||
|
||||
public void testSysTablesNoTypes() throws Exception {
|
||||
executeCommand("SYS TABLES", r -> {
|
||||
assertEquals("alias", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals(2, r.size());
|
||||
assertEquals("test", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals("alias", r.column(2));
|
||||
}, index, alias);
|
||||
}
|
||||
|
||||
public void testSysTablesPattern() throws Exception {
|
||||
executeCommand("SYS TABLES LIKE '%'", r -> {
|
||||
assertEquals("alias", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals(2, r.size());
|
||||
assertEquals("test", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals("alias", r.column(2));
|
||||
}, index, alias);
|
||||
}
|
||||
|
||||
public void testSysTablesPatternParameterized() throws Exception {
|
||||
List<SqlTypedParamValue> params = asList(param("%"));
|
||||
executeCommand("SYS TABLES LIKE ?", params, r -> {
|
||||
assertEquals("alias", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals(2, r.size());
|
||||
assertEquals("test", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals("alias", r.column(2));
|
||||
}, index, alias);
|
||||
}, alias, index);
|
||||
}
|
||||
|
||||
public void testSysTablesOnlyAliases() throws Exception {
|
||||
|
@ -131,32 +132,32 @@ public class SysTablesTests extends ESTestCase {
|
|||
|
||||
public void testSysTablesOnlyIndicesAndAliases() throws Exception {
|
||||
executeCommand("SYS TABLES LIKE 'test' TYPE 'ALIAS', 'BASE TABLE'", r -> {
|
||||
assertEquals("alias", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals(2, r.size());
|
||||
assertEquals("test", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals("alias", r.column(2));
|
||||
}, index, alias);
|
||||
}
|
||||
|
||||
public void testSysTablesOnlyIndicesAndAliasesParameterized() throws Exception {
|
||||
List<SqlTypedParamValue> params = asList(param("ALIAS"), param("BASE TABLE"));
|
||||
executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> {
|
||||
assertEquals("alias", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals(2, r.size());
|
||||
assertEquals("test", r.column(2));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals("alias", r.column(2));
|
||||
}, index, alias);
|
||||
}
|
||||
|
||||
public void testSysTablesOnlyIndicesLegacyAndAliasesParameterized() throws Exception {
|
||||
List<SqlTypedParamValue> params = asList(param("ALIAS"), param("TABLE"));
|
||||
executeCommand("SYS TABLES LIKE 'test' TYPE ?, ?", params, r -> {
|
||||
assertEquals("alias", r.column(2));
|
||||
assertEquals("ALIAS", r.column(3));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals(2, r.size());
|
||||
assertEquals("test", r.column(2));
|
||||
assertEquals("TABLE", r.column(3));
|
||||
assertTrue(r.advanceRow());
|
||||
assertEquals("alias", r.column(2));
|
||||
assertEquals("ALIAS", r.column(3));
|
||||
}, index, alias);
|
||||
}
|
||||
|
||||
|
@ -188,7 +189,7 @@ public class SysTablesTests extends ESTestCase {
|
|||
executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> {
|
||||
assertEquals(2, r.size());
|
||||
|
||||
Iterator<IndexType> it = IndexType.VALID.iterator();
|
||||
Iterator<IndexType> it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator();
|
||||
|
||||
for (int t = 0; t < r.size(); t++) {
|
||||
assertEquals(it.next().toSql(), r.column(3));
|
||||
|
@ -209,7 +210,7 @@ public class SysTablesTests extends ESTestCase {
|
|||
executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> {
|
||||
assertEquals(2, r.size());
|
||||
|
||||
Iterator<IndexType> it = IndexType.VALID.iterator();
|
||||
Iterator<IndexType> it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator();
|
||||
|
||||
for (int t = 0; t < r.size(); t++) {
|
||||
assertEquals(it.next().toSql(), r.column(3));
|
||||
|
|
|
@ -141,7 +141,7 @@ subprojects {
|
|||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||
dependsOn copyTestNodeKeystore
|
||||
if (version.before('6.3.0')) {
|
||||
plugin xpackProject('plugin').path
|
||||
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
|
||||
}
|
||||
bwcVersion = version
|
||||
numBwcNodes = 2
|
||||
|
|
|
@ -82,7 +82,7 @@ for (Version version : bwcVersions.wireCompatible) {
|
|||
|
||||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||
if (version.before('6.3.0')) {
|
||||
plugin xpackProject('plugin').path
|
||||
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
|
||||
}
|
||||
bwcVersion = version
|
||||
numBwcNodes = 2
|
||||
|
|
|
@ -123,7 +123,7 @@ subprojects {
|
|||
configure(extensions.findByName("${baseName}#oldClusterTestCluster")) {
|
||||
dependsOn copyTestNodeKeystore
|
||||
if (version.before('6.3.0')) {
|
||||
plugin xpackProject('plugin').path
|
||||
mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${version}"
|
||||
}
|
||||
String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users'
|
||||
setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser'
|
||||
|
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.qa.sql.multinode;
|
|||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -53,7 +54,7 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
|||
String firstHostName = null;
|
||||
|
||||
String match = firstHost.getHostName() + ":" + firstHost.getPort();
|
||||
Map<String, Object> nodesInfo = responseToMap(client().performRequest("GET", "/_nodes"));
|
||||
Map<String, Object> nodesInfo = responseToMap(client().performRequest(new Request("GET", "/_nodes")));
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> nodes = (Map<String, Object>) nodesInfo.get("nodes");
|
||||
for (Map.Entry<String, Object> node : nodes.entrySet()) {
|
||||
|
@ -74,7 +75,9 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
|||
}
|
||||
index.endObject();
|
||||
index.endObject();
|
||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity(Strings.toString(index), ContentType.APPLICATION_JSON));
|
||||
Request request = new Request("PUT", "/test");
|
||||
request.setJsonEntity(Strings.toString(index));
|
||||
client().performRequest(request);
|
||||
int documents = between(10, 100);
|
||||
createTestData(documents);
|
||||
|
||||
|
@ -84,6 +87,9 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
|||
}
|
||||
|
||||
private void createTestData(int documents) throws UnsupportedCharsetException, IOException {
|
||||
Request request = new Request("PUT", "/test/test/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (int i = 0; i < documents; i++) {
|
||||
int a = 3 * i;
|
||||
|
@ -92,8 +98,9 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
|||
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}\n");
|
||||
bulk.append("{\"a\": " + a + ", \"b\": " + b + ", \"c\": " + c + "}\n");
|
||||
}
|
||||
client().performRequest("PUT", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
private Map<String, Object> responseToMap(Response response) throws IOException {
|
||||
|
@ -108,14 +115,12 @@ public class RestSqlMultinodeIT extends ESRestTestCase {
|
|||
expected.put("columns", singletonList(columnInfo(mode, "COUNT(1)", "long", JDBCType.BIGINT, 20)));
|
||||
expected.put("rows", singletonList(singletonList(count)));
|
||||
|
||||
Map<String, String> params = new TreeMap<>();
|
||||
params.put("format", "json"); // JSON is easier to parse then a table
|
||||
if (Strings.hasText(mode)) {
|
||||
params.put("mode", mode); // JDBC or PLAIN mode
|
||||
Request request = new Request("POST", "/_xpack/sql");
|
||||
if (false == mode.isEmpty()) {
|
||||
request.addParameter("mode", mode);
|
||||
}
|
||||
|
||||
Map<String, Object> actual = responseToMap(client.performRequest("POST", "/_xpack/sql", params,
|
||||
new StringEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}", ContentType.APPLICATION_JSON)));
|
||||
request.setJsonEntity("{\"query\": \"SELECT COUNT(*) FROM test\"}");
|
||||
Map<String, Object> actual = responseToMap(client.performRequest(request));
|
||||
|
||||
if (false == expected.equals(actual)) {
|
||||
NotEqualMessageBuilder message = new NotEqualMessageBuilder();
|
||||
|
|
|
@ -10,6 +10,7 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -176,14 +177,15 @@ public class RestSqlSecurityIT extends SqlSecurityTestCase {
|
|||
}
|
||||
|
||||
private static Map<String, Object> runSql(@Nullable String asUser, String mode, HttpEntity entity) throws IOException {
|
||||
Map<String, String> params = new TreeMap<>();
|
||||
params.put("format", "json"); // JSON is easier to parse then a table
|
||||
if (Strings.hasText(mode)) {
|
||||
params.put("mode", mode); // JDBC or PLAIN mode
|
||||
Request request = new Request("POST", "/_xpack/sql");
|
||||
if (false == mode.isEmpty()) {
|
||||
request.addParameter("mode", mode);
|
||||
}
|
||||
Header[] headers = asUser == null ? new Header[0] : new Header[] {new BasicHeader("es-security-runas-user", asUser)};
|
||||
Response response = client().performRequest("POST", "/_xpack/sql", params, entity, headers);
|
||||
return toMap(response);
|
||||
if (asUser != null) {
|
||||
request.setHeaders(new BasicHeader("es-security-runas-user", asUser));
|
||||
}
|
||||
request.setEntity(entity);
|
||||
return toMap(client().performRequest(request));
|
||||
}
|
||||
|
||||
private static void assertResponse(Map<String, Object> expected, Map<String, Object> actual) {
|
||||
|
|
|
@ -11,6 +11,7 @@ import org.apache.lucene.util.SuppressForbidden;
|
|||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -41,7 +42,6 @@ import java.util.TreeMap;
|
|||
import java.util.function.Function;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
|
@ -135,6 +135,9 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
|||
* write the test data once. */
|
||||
return;
|
||||
}
|
||||
Request request = new Request("PUT", "/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_index\": \"test\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
|
||||
bulk.append("{\"a\": 1, \"b\": 2, \"c\": 3}\n");
|
||||
|
@ -142,8 +145,8 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
|||
bulk.append("{\"a\": 4, \"b\": 5, \"c\": 6}\n");
|
||||
bulk.append("{\"index\":{\"_index\": \"bort\", \"_type\": \"doc\", \"_id\":\"1\"}\n");
|
||||
bulk.append("{\"a\": \"test\"}\n");
|
||||
client().performRequest("PUT", "/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client().performRequest(request);
|
||||
oneTimeSetup = true;
|
||||
}
|
||||
|
||||
|
@ -173,7 +176,7 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
|||
@AfterClass
|
||||
public static void wipeIndicesAfterTests() throws IOException {
|
||||
try {
|
||||
adminClient().performRequest("DELETE", "*");
|
||||
adminClient().performRequest(new Request("DELETE", "*"));
|
||||
} catch (ResponseException e) {
|
||||
// 404 here just means we had no indexes
|
||||
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
|
||||
|
@ -472,13 +475,15 @@ public abstract class SqlSecurityTestCase extends ESRestTestCase {
|
|||
}
|
||||
|
||||
protected static void createUser(String name, String role) throws IOException {
|
||||
XContentBuilder user = JsonXContent.contentBuilder().prettyPrint().startObject(); {
|
||||
Request request = new Request("PUT", "/_xpack/security/user/" + name);
|
||||
XContentBuilder user = JsonXContent.contentBuilder().prettyPrint();
|
||||
user.startObject(); {
|
||||
user.field("password", "testpass");
|
||||
user.field("roles", role);
|
||||
}
|
||||
user.endObject();
|
||||
client().performRequest("PUT", "/_xpack/security/user/" + name, emptyMap(),
|
||||
new StringEntity(Strings.toString(user), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(Strings.toString(user));
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
protected AuditLogAsserter createAuditLogAsserter() {
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.qa.sql.cli;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -19,7 +19,6 @@ import org.junit.Before;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
|
||||
|
||||
public abstract class CliIntegrationTestCase extends ESRestTestCase {
|
||||
|
@ -60,11 +59,13 @@ public abstract class CliIntegrationTestCase extends ESRestTestCase {
|
|||
}
|
||||
|
||||
protected void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
|
||||
Request request = new Request("PUT", "/" + index + "/doc/1");
|
||||
request.addParameter("refresh", "true");
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
|
||||
body.accept(builder);
|
||||
builder.endObject();
|
||||
HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc);
|
||||
request.setJsonEntity(Strings.toString(builder));
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
public String command(String command) throws IOException {
|
||||
|
|
|
@ -8,8 +8,7 @@ package org.elasticsearch.xpack.qa.sql.cli;
|
|||
import java.io.IOException;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import org.elasticsearch.client.Request;
|
||||
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
|
@ -41,7 +40,9 @@ public abstract class ErrorsTestCase extends CliIntegrationTestCase implements o
|
|||
@Override
|
||||
public void testSelectFromIndexWithoutTypes() throws Exception {
|
||||
// Create an index without any types
|
||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON));
|
||||
Request request = new Request("PUT", "/test");
|
||||
request.setJsonEntity("{}");
|
||||
client().performRequest(request);
|
||||
|
||||
assertFoundOneProblem(command("SELECT * FROM test"));
|
||||
assertEquals("line 1:15: [test] doesn't have any types so it is incompatible with sql" + END, readLine());
|
||||
|
|
|
@ -7,10 +7,10 @@ package org.elasticsearch.xpack.qa.sql.cli;
|
|||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Request;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
/**
|
||||
|
@ -18,13 +18,16 @@ import static org.hamcrest.Matchers.containsString;
|
|||
*/
|
||||
public abstract class FetchSizeTestCase extends CliIntegrationTestCase {
|
||||
public void testSelect() throws IOException {
|
||||
Request request = new Request("PUT", "/test/doc/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
bulk.append("{\"index\":{}}\n");
|
||||
bulk.append("{\"test_field\":" + i + "}\n");
|
||||
}
|
||||
client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client().performRequest(request);
|
||||
|
||||
assertEquals("[?1l>[?1000l[?2004lfetch size set to [90m4[0m", command("fetch size = 4"));
|
||||
assertEquals("[?1l>[?1000l[?2004lfetch separator set to \"[90m -- fetch sep -- [0m\"",
|
||||
command("fetch separator = \" -- fetch sep -- \""));
|
||||
|
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.xpack.qa.sql.jdbc;
|
|||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -55,6 +56,7 @@ public class DataLoader {
|
|||
.endObject();
|
||||
}
|
||||
protected static void loadDatasetIntoEs(RestClient client, String index) throws Exception {
|
||||
Request request = new Request("PUT", "/" + index);
|
||||
XContentBuilder createIndex = JsonXContent.contentBuilder().startObject();
|
||||
createIndex.startObject("settings");
|
||||
{
|
||||
|
@ -91,10 +93,8 @@ public class DataLoader {
|
|||
createIndex.endObject();
|
||||
}
|
||||
createIndex.endObject().endObject();
|
||||
|
||||
client.performRequest("PUT", "/" + index, emptyMap(), new StringEntity(Strings.toString(createIndex),
|
||||
ContentType.APPLICATION_JSON));
|
||||
|
||||
request.setJsonEntity(Strings.toString(createIndex));
|
||||
client.performRequest(request);
|
||||
|
||||
Map<String, String> deps = new LinkedHashMap<>();
|
||||
csvToLines("departments", (titles, fields) -> deps.put(fields.get(0), fields.get(1)));
|
||||
|
@ -119,6 +119,8 @@ public class DataLoader {
|
|||
list.add(dep);
|
||||
});
|
||||
|
||||
request = new Request("POST", "/" + index + "/emp/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
csvToLines("employees", (titles, fields) -> {
|
||||
bulk.append("{\"index\":{}}\n");
|
||||
|
@ -149,14 +151,13 @@ public class DataLoader {
|
|||
|
||||
bulk.append("}\n");
|
||||
});
|
||||
|
||||
client.performRequest("POST", "/" + index + "/emp/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client.performRequest(request);
|
||||
}
|
||||
|
||||
protected static void makeAlias(RestClient client, String aliasName, String... indices) throws Exception {
|
||||
for (String index : indices) {
|
||||
client.performRequest("POST", "/" + index + "/_alias/" + aliasName);
|
||||
client.performRequest(new Request("POST", "/" + index + "/_alias/" + aliasName));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,7 @@ import java.sql.Connection;
|
|||
import java.sql.SQLException;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import org.elasticsearch.client.Request;
|
||||
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
|
@ -37,7 +36,9 @@ public class ErrorsTestCase extends JdbcIntegrationTestCase implements org.elast
|
|||
@Override
|
||||
public void testSelectFromIndexWithoutTypes() throws Exception {
|
||||
// Create an index without any types
|
||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON));
|
||||
Request request = new Request("PUT", "/test");
|
||||
request.setJsonEntity("{}");
|
||||
client().performRequest(request);
|
||||
|
||||
try (Connection c = esJdbc()) {
|
||||
SQLException e = expectThrows(SQLException.class, () -> c.prepareStatement("SELECT * FROM test").executeQuery());
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.qa.sql.jdbc;
|
|||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -15,7 +16,6 @@ import java.sql.ResultSet;
|
|||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearchContexts;
|
||||
|
||||
/**
|
||||
|
@ -25,13 +25,15 @@ import static org.elasticsearch.xpack.qa.sql.rest.RestSqlTestCase.assertNoSearch
|
|||
public class FetchSizeTestCase extends JdbcIntegrationTestCase {
|
||||
@Before
|
||||
public void createTestIndex() throws IOException {
|
||||
Request request = new Request("PUT", "/test/doc/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
bulk.append("{\"index\":{}}\n");
|
||||
bulk.append("{\"test_field\":" + i + "}\n");
|
||||
}
|
||||
client().performRequest("PUT", "/test/doc/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.common.CheckedConsumer;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -85,16 +86,18 @@ public abstract class JdbcIntegrationTestCase extends ESRestTestCase {
|
|||
}
|
||||
|
||||
public static void index(String index, CheckedConsumer<XContentBuilder, IOException> body) throws IOException {
|
||||
Request request = new Request("PUT", "/" + index + "/doc/1");
|
||||
request.addParameter("refresh", "true");
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().startObject();
|
||||
body.accept(builder);
|
||||
builder.endObject();
|
||||
HttpEntity doc = new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON);
|
||||
client().performRequest("PUT", "/" + index + "/doc/1", singletonMap("refresh", "true"), doc);
|
||||
request.setJsonEntity(Strings.toString(builder));
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
protected String clusterName() {
|
||||
try {
|
||||
String response = EntityUtils.toString(client().performRequest("GET", "/").getEntity());
|
||||
String response = EntityUtils.toString(client().performRequest(new Request("GET", "/")).getEntity());
|
||||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, response, false).get("cluster_name").toString();
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.qa.sql.jdbc;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
@ -49,7 +50,7 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas
|
|||
|
||||
@Before
|
||||
public void setupTestDataIfNeeded() throws Exception {
|
||||
if (client().performRequest("HEAD", "/test_emp").getStatusLine().getStatusCode() == 404) {
|
||||
if (client().performRequest(new Request("HEAD", "/test_emp")).getStatusLine().getStatusCode() == 404) {
|
||||
DataLoader.loadDatasetIntoEs(client());
|
||||
}
|
||||
}
|
||||
|
@ -62,7 +63,7 @@ public abstract class SpecBaseIntegrationTestCase extends JdbcIntegrationTestCas
|
|||
@AfterClass
|
||||
public static void wipeTestData() throws IOException {
|
||||
try {
|
||||
adminClient().performRequest("DELETE", "/*");
|
||||
adminClient().performRequest(new Request("DELETE", "/*"));
|
||||
} catch (ResponseException e) {
|
||||
// 404 here just means we had no indexes
|
||||
if (e.getResponse().getStatusLine().getStatusCode() != 404) {
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.common.CheckedSupplier;
|
||||
|
@ -74,16 +75,19 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
}
|
||||
|
||||
public void testNextPage() throws IOException {
|
||||
Request request = new Request("POST", "/test/test/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
String mode = randomMode();
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n");
|
||||
bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n");
|
||||
}
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client().performRequest(request);
|
||||
|
||||
String request = "{\"query\":\""
|
||||
String sqlRequest =
|
||||
"{\"query\":\""
|
||||
+ " SELECT text, number, SQRT(number) AS s, SCORE()"
|
||||
+ " FROM test"
|
||||
+ " ORDER BY number, SCORE()\", "
|
||||
|
@ -94,7 +98,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
for (int i = 0; i < 20; i += 2) {
|
||||
Map<String, Object> response;
|
||||
if (i == 0) {
|
||||
response = runSql(mode, new StringEntity(request, ContentType.APPLICATION_JSON));
|
||||
response = runSql(mode, new StringEntity(sqlRequest, ContentType.APPLICATION_JSON));
|
||||
} else {
|
||||
response = runSql(mode, new StringEntity("{\"cursor\":\"" + cursor + "\"}",
|
||||
ContentType.APPLICATION_JSON));
|
||||
|
@ -138,12 +142,14 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
}
|
||||
|
||||
public void testScoreWithFieldNamedScore() throws IOException {
|
||||
Request request = new Request("POST", "/test/test/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
String mode = randomMode();
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"name\":\"test\", \"score\":10}\n");
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client().performRequest(request);
|
||||
|
||||
Map<String, Object> expected = new HashMap<>();
|
||||
expected.put("columns", Arrays.asList(
|
||||
|
@ -209,7 +215,9 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
@Override
|
||||
public void testSelectFromIndexWithoutTypes() throws Exception {
|
||||
// Create an index without any types
|
||||
client().performRequest("PUT", "/test", emptyMap(), new StringEntity("{}", ContentType.APPLICATION_JSON));
|
||||
Request request = new Request("PUT", "/test");
|
||||
request.setJsonEntity("{}");
|
||||
client().performRequest(request);
|
||||
String mode = randomFrom("jdbc", "plain");
|
||||
expectBadRequest(() -> runSql(mode, "SELECT * FROM test"),
|
||||
containsString("1:15: [test] doesn't have any types so it is incompatible with sql"));
|
||||
|
@ -229,24 +237,9 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
containsString("1:8: Unknown function [missing]"));
|
||||
}
|
||||
|
||||
private void index(String... docs) throws IOException {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (String doc : docs) {
|
||||
bulk.append("{\"index\":{}\n");
|
||||
bulk.append(doc + "\n");
|
||||
}
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testSelectProjectScoreInAggContext() throws Exception {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"foo\":1}\n");
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
|
||||
index("{\"foo\":1}");
|
||||
expectBadRequest(() -> runSql(randomMode(),
|
||||
" SELECT foo, SCORE(), COUNT(*)"
|
||||
+ " FROM test"
|
||||
|
@ -256,12 +249,7 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
|
||||
@Override
|
||||
public void testSelectOrderByScoreInAggContext() throws Exception {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"foo\":1}\n");
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
|
||||
index("{\"foo\":1}");
|
||||
expectBadRequest(() -> runSql(randomMode(),
|
||||
" SELECT foo, COUNT(*)"
|
||||
+ " FROM test"
|
||||
|
@ -272,36 +260,21 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
|
||||
@Override
|
||||
public void testSelectGroupByScore() throws Exception {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"foo\":1}\n");
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
|
||||
index("{\"foo\":1}");
|
||||
expectBadRequest(() -> runSql(randomMode(), "SELECT COUNT(*) FROM test GROUP BY SCORE()"),
|
||||
containsString("Cannot use [SCORE()] for grouping"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testSelectScoreSubField() throws Exception {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"foo\":1}\n");
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
|
||||
index("{\"foo\":1}");
|
||||
expectBadRequest(() -> runSql(randomMode(), "SELECT SCORE().bar FROM test"),
|
||||
containsString("line 1:15: extraneous input '.' expecting {<EOF>, ','"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testSelectScoreInScalar() throws Exception {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"foo\":1}\n");
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
|
||||
index("{\"foo\":1}");
|
||||
expectBadRequest(() -> runSql(randomMode(), "SELECT SIN(SCORE()) FROM test"),
|
||||
containsString("line 1:12: [SCORE()] cannot be an argument to a function"));
|
||||
}
|
||||
|
@ -340,37 +313,32 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
}
|
||||
|
||||
private Map<String, Object> runSql(String mode, HttpEntity sql, String suffix) throws IOException {
|
||||
Map<String, String> params = new TreeMap<>();
|
||||
params.put("error_trace", "true"); // Helps with debugging in case something crazy happens on the server.
|
||||
params.put("pretty", "true"); // Improves error reporting readability
|
||||
Request request = new Request("POST", "/_xpack/sql" + suffix);
|
||||
request.addParameter("error_trace", "true"); // Helps with debugging in case something crazy happens on the server.
|
||||
request.addParameter("pretty", "true"); // Improves error reporting readability
|
||||
if (randomBoolean()) {
|
||||
// We default to JSON but we force it randomly for extra coverage
|
||||
params.put("format", "json");
|
||||
request.addParameter("format", "json");
|
||||
}
|
||||
if (Strings.hasText(mode)) {
|
||||
params.put("mode", mode); // JDBC or PLAIN mode
|
||||
if (false == mode.isEmpty()) {
|
||||
request.addParameter("mode", mode); // JDBC or PLAIN mode
|
||||
}
|
||||
Header[] headers = randomFrom(
|
||||
request.setHeaders(randomFrom(
|
||||
new Header[] {},
|
||||
new Header[] {new BasicHeader("Accept", "*/*")},
|
||||
new Header[] {new BasicHeader("Accpet", "application/json")});
|
||||
Response response = client().performRequest("POST", "/_xpack/sql" + suffix, params, sql);
|
||||
new Header[] {new BasicHeader("Accpet", "application/json")}));
|
||||
request.setEntity(sql);
|
||||
Response response = client().performRequest(request);
|
||||
try (InputStream content = response.getEntity().getContent()) {
|
||||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
||||
}
|
||||
}
|
||||
|
||||
public void testBasicTranslateQuery() throws IOException {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
bulk.append("{\"index\":{\"_id\":\"1\"}}\n");
|
||||
bulk.append("{\"test\":\"test\"}\n");
|
||||
bulk.append("{\"index\":{\"_id\":\"2\"}}\n");
|
||||
bulk.append("{\"test\":\"test\"}\n");
|
||||
client().performRequest("POST", "/test_translate/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
index("{\"test\":\"test\"}", "{\"test\":\"test\"}");
|
||||
|
||||
Map<String, Object> response = runSql(randomMode(), "SELECT * FROM test_translate", "/translate/");
|
||||
assertEquals(response.get("size"), 1000);
|
||||
Map<String, Object> response = runSql(randomMode(), "SELECT * FROM test", "/translate/");
|
||||
assertEquals(1000, response.get("size"));
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> source = (Map<String, Object>) response.get("_source");
|
||||
assertNotNull(source);
|
||||
|
@ -459,13 +427,12 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
}
|
||||
|
||||
public void testNextPageText() throws IOException {
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
bulk.append("{\"index\":{\"_id\":\"" + i + "\"}}\n");
|
||||
bulk.append("{\"text\":\"text" + i + "\", \"number\":" + i + "}\n");
|
||||
int size = 20;
|
||||
String[] docs = new String[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
docs[i] = "{\"text\":\"text" + i + "\", \"number\":" + i + "}\n";
|
||||
}
|
||||
client().performRequest("POST", "/test/test/_bulk", singletonMap("refresh", "true"),
|
||||
new StringEntity(bulk.toString(), ContentType.APPLICATION_JSON));
|
||||
index(docs);
|
||||
|
||||
String request = "{\"query\":\"SELECT text, number, number + 5 AS sum FROM test ORDER BY number\", \"fetch_size\":2}";
|
||||
|
||||
|
@ -563,23 +530,33 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
return runSqlAsText("", new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON), accept);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run SQL as text using the {@code Accept} header to specify the format
|
||||
* rather than the {@code format} parameter.
|
||||
*/
|
||||
private Tuple<String, String> runSqlAsText(String suffix, HttpEntity entity, String accept) throws IOException {
|
||||
Response response = client().performRequest("POST", "/_xpack/sql" + suffix, singletonMap("error_trace", "true"),
|
||||
entity, new BasicHeader("Accept", accept));
|
||||
Request request = new Request("POST", "/_xpack/sql" + suffix);
|
||||
request.addParameter("error_trace", "true");
|
||||
request.setEntity(entity);
|
||||
request.setHeaders(new BasicHeader("Accept", accept));
|
||||
Response response = client().performRequest(request);
|
||||
return new Tuple<>(
|
||||
Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),
|
||||
response.getHeader("Cursor")
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Run SQL as text using the {@code format} parameter to specify the format
|
||||
* rather than an {@code Accept} header.
|
||||
*/
|
||||
private Tuple<String, String> runSqlAsTextFormat(String sql, String format) throws IOException {
|
||||
StringEntity entity = new StringEntity("{\"query\":\"" + sql + "\"}", ContentType.APPLICATION_JSON);
|
||||
Request request = new Request("POST", "/_xpack/sql");
|
||||
request.addParameter("error_trace", "true");
|
||||
request.addParameter("format", format);
|
||||
request.setJsonEntity("{\"query\":\"" + sql + "\"}");
|
||||
|
||||
Map<String, String> params = new HashMap<>();
|
||||
params.put("error_trace", "true");
|
||||
params.put("format", format);
|
||||
|
||||
Response response = client().performRequest("POST", "/_xpack/sql", params, entity);
|
||||
Response response = client().performRequest(request);
|
||||
return new Tuple<>(
|
||||
Streams.copyToString(new InputStreamReader(response.getEntity().getContent(), StandardCharsets.UTF_8)),
|
||||
response.getHeader("Cursor")
|
||||
|
@ -595,23 +572,14 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
}
|
||||
|
||||
public static int getNumberOfSearchContexts(String index) throws IOException {
|
||||
Response response = client().performRequest("GET", "/_stats/search");
|
||||
Map<String, Object> stats;
|
||||
try (InputStream content = response.getEntity().getContent()) {
|
||||
stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
||||
}
|
||||
return getOpenContexts(stats, index);
|
||||
return getOpenContexts(searchStats(), index);
|
||||
}
|
||||
|
||||
public static void assertNoSearchContexts() throws IOException {
|
||||
Response response = client().performRequest("GET", "/_stats/search");
|
||||
Map<String, Object> stats;
|
||||
try (InputStream content = response.getEntity().getContent()) {
|
||||
stats = XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
||||
}
|
||||
Map<String, Object> stats = searchStats();
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> indexStats = (Map<String, Object>) stats.get("indices");
|
||||
for (String index : indexStats.keySet()) {
|
||||
Map<String, Object> indicesStats = (Map<String, Object>) stats.get("indices");
|
||||
for (String index : indicesStats.keySet()) {
|
||||
if (index.startsWith(".") == false) { // We are not interested in internal indices
|
||||
assertEquals(index + " should have no search contexts", 0, getOpenContexts(stats, index));
|
||||
}
|
||||
|
@ -619,12 +587,34 @@ public abstract class RestSqlTestCase extends ESRestTestCase implements ErrorsTe
|
|||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static int getOpenContexts(Map<String, Object> indexStats, String index) {
|
||||
return (int) ((Map<String, Object>) ((Map<String, Object>) ((Map<String, Object>) ((Map<String, Object>)
|
||||
indexStats.get("indices")).get(index)).get("total")).get("search")).get("open_contexts");
|
||||
private static int getOpenContexts(Map<String, Object> stats, String index) {
|
||||
stats = (Map<String, Object>) stats.get("indices");
|
||||
stats = (Map<String, Object>) stats.get(index);
|
||||
stats = (Map<String, Object>) stats.get("total");
|
||||
stats = (Map<String, Object>) stats.get("search");
|
||||
return (Integer) stats.get("open_contexts");
|
||||
}
|
||||
|
||||
private static Map<String, Object> searchStats() throws IOException {
|
||||
Response response = client().performRequest(new Request("GET", "/_stats/search"));
|
||||
try (InputStream content = response.getEntity().getContent()) {
|
||||
return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false);
|
||||
}
|
||||
}
|
||||
|
||||
public static String randomMode() {
|
||||
return randomFrom("", "jdbc", "plain");
|
||||
}
|
||||
|
||||
private void index(String... docs) throws IOException {
|
||||
Request request = new Request("POST", "/test/test/_bulk");
|
||||
request.addParameter("refresh", "true");
|
||||
StringBuilder bulk = new StringBuilder();
|
||||
for (String doc : docs) {
|
||||
bulk.append("{\"index\":{}\n");
|
||||
bulk.append(doc + "\n");
|
||||
}
|
||||
request.setJsonEntity(bulk.toString());
|
||||
client().performRequest(request);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue