Merge remote-tracking branch 'es/master' into feature/ingest
This commit is contained in:
commit
aaacf096d2
|
@ -78,7 +78,7 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
@Input
|
||||
String argLine = null
|
||||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
Map<String, Object> systemProperties = new HashMap<>()
|
||||
PatternFilterable patternSet = new PatternSet()
|
||||
|
||||
RandomizedTestingTask() {
|
||||
|
@ -100,7 +100,7 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
jvmArgs.add(argument)
|
||||
}
|
||||
|
||||
void systemProperty(String property, String value) {
|
||||
void systemProperty(String property, Object value) {
|
||||
systemProperties.put(property, value)
|
||||
}
|
||||
|
||||
|
@ -245,8 +245,8 @@ class RandomizedTestingTask extends DefaultTask {
|
|||
exclude(name: excludePattern)
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, String> prop : systemProperties) {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue()
|
||||
for (Map.Entry<String, Object> prop : systemProperties) {
|
||||
sysproperty key: prop.getKey(), value: prop.getValue().toString()
|
||||
}
|
||||
makeListeners()
|
||||
}
|
||||
|
|
|
@ -61,11 +61,14 @@ public class ForbiddenPatternsTask extends DefaultTask {
|
|||
// add mandatory rules
|
||||
patterns.put('nocommit', /nocommit/)
|
||||
patterns.put('tab', /\t/)
|
||||
|
||||
inputs.property("excludes", filesFilter.excludes)
|
||||
inputs.property("rules", patterns)
|
||||
}
|
||||
|
||||
/** Adds a file glob pattern to be excluded */
|
||||
public void exclude(String... excludes) {
|
||||
this.filesFilter.exclude(excludes)
|
||||
filesFilter.exclude(excludes)
|
||||
}
|
||||
|
||||
/** Adds a pattern to forbid. T */
|
||||
|
|
|
@ -33,10 +33,10 @@ class ClusterConfiguration {
|
|||
int numNodes = 1
|
||||
|
||||
@Input
|
||||
int baseHttpPort = 9400
|
||||
int httpPort = 0
|
||||
|
||||
@Input
|
||||
int baseTransportPort = 9500
|
||||
int transportPort = 0
|
||||
|
||||
@Input
|
||||
boolean daemonize = true
|
||||
|
@ -55,7 +55,7 @@ class ClusterConfiguration {
|
|||
@Input
|
||||
Closure waitCondition = { NodeInfo node, AntBuilder ant ->
|
||||
File tmpFile = new File(node.cwd, 'wait.success')
|
||||
ant.get(src: "http://localhost:${node.httpPort()}",
|
||||
ant.get(src: "http://${node.httpUri()}",
|
||||
dest: tmpFile.toString(),
|
||||
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
|
||||
retries: 10)
|
||||
|
|
|
@ -38,8 +38,10 @@ class ClusterFormationTasks {
|
|||
|
||||
/**
|
||||
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
|
||||
*
|
||||
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
|
||||
*/
|
||||
static void setup(Project project, Task task, ClusterConfiguration config) {
|
||||
static Object setup(Project project, Task task, ClusterConfiguration config) {
|
||||
if (task.getEnabled() == false) {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
|
@ -55,6 +57,9 @@ class ClusterFormationTasks {
|
|||
|
||||
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
|
||||
task.dependsOn(wait)
|
||||
|
||||
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
|
||||
return "${-> nodes[0].transportUri()}"
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
|
@ -200,17 +205,24 @@ class ClusterFormationTasks {
|
|||
/** Adds a task to write elasticsearch.yml for the given node configuration */
|
||||
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
Map esConfig = [
|
||||
'cluster.name' : node.clusterName,
|
||||
'http.port' : node.httpPort(),
|
||||
'transport.tcp.port' : node.transportPort(),
|
||||
'pidfile' : node.pidFile,
|
||||
'discovery.zen.ping.unicast.hosts': (0..<node.config.numNodes).collect{"127.0.0.1:${node.config.baseTransportPort + it}"}.join(','),
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls' : 'http://snapshot.test*'
|
||||
'cluster.name' : node.clusterName,
|
||||
'pidfile' : node.pidFile,
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
]
|
||||
if (node.config.numNodes == 1) {
|
||||
esConfig['http.port'] = node.config.httpPort
|
||||
esConfig['transport.tcp.port'] = node.config.transportPort
|
||||
} else {
|
||||
// TODO: fix multi node so it doesn't use hardcoded prots
|
||||
esConfig['http.port'] = 9400 + node.nodeNum
|
||||
esConfig['transport.tcp.port'] = 9500 + node.nodeNum
|
||||
esConfig['discovery.zen.ping.unicast.hosts'] = (0..<node.config.numNodes).collect{"localhost:${9500 + it}"}.join(',')
|
||||
|
||||
}
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
|
@ -400,7 +412,12 @@ class ClusterFormationTasks {
|
|||
resourceexists {
|
||||
file(file: node.pidFile.toString())
|
||||
}
|
||||
socket(server: '127.0.0.1', port: node.httpPort())
|
||||
resourceexists {
|
||||
file(file: node.httpPortsFile.toString())
|
||||
}
|
||||
resourceexists {
|
||||
file(file: node.transportPortsFile.toString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -444,6 +461,8 @@ class ClusterFormationTasks {
|
|||
logger.error("|-----------------------------------------")
|
||||
logger.error("| failure marker exists: ${node.failedMarker.exists()}")
|
||||
logger.error("| pid file exists: ${node.pidFile.exists()}")
|
||||
logger.error("| http ports file exists: ${node.httpPortsFile.exists()}")
|
||||
logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}")
|
||||
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
|
||||
logger.error("|\n| [ant output]")
|
||||
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }
|
||||
|
|
|
@ -43,6 +43,12 @@ class NodeInfo {
|
|||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for http */
|
||||
File httpPortsFile
|
||||
|
||||
/** a file written by elasticsearch containing the ports of each bound address for transport */
|
||||
File transportPortsFile
|
||||
|
||||
/** elasticsearch home dir */
|
||||
File homeDir
|
||||
|
||||
|
@ -92,6 +98,10 @@ class NodeInfo {
|
|||
homeDir = homeDir(baseDir, config.distribution)
|
||||
confDir = confDir(baseDir, config.distribution)
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
// even for rpm/deb, the logs are under home because we dont start with real services
|
||||
File logsDir = new File(homeDir, 'logs')
|
||||
httpPortsFile = new File(logsDir, 'http.ports')
|
||||
transportPortsFile = new File(logsDir, 'transport.ports')
|
||||
cwd = new File(baseDir, "cwd")
|
||||
failedMarker = new File(cwd, 'run.failed')
|
||||
startLog = new File(cwd, 'run.log')
|
||||
|
@ -119,6 +129,7 @@ class NodeInfo {
|
|||
'JAVA_HOME' : project.javaHome,
|
||||
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
|
||||
]
|
||||
args.add("-Des.tests.portsfile=true")
|
||||
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
|
||||
for (Map.Entry<String, String> property : System.properties.entrySet()) {
|
||||
if (property.getKey().startsWith('es.')) {
|
||||
|
@ -159,14 +170,14 @@ class NodeInfo {
|
|||
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
|
||||
}
|
||||
|
||||
/** Returns the http port for this node */
|
||||
int httpPort() {
|
||||
return config.baseHttpPort + nodeNum
|
||||
/** Returns an address and port suitable for a uri to connect to this node over http */
|
||||
String httpUri() {
|
||||
return httpPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the transport port for this node */
|
||||
int transportPort() {
|
||||
return config.baseTransportPort + nodeNum
|
||||
/** Returns an address and port suitable for a uri to connect to this node over transport protocol */
|
||||
String transportUri() {
|
||||
return transportPortsFile.readLines("UTF-8").get(0)
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
|
|
|
@ -57,12 +57,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
RestSpecHack.configureDependencies(project)
|
||||
project.afterEvaluate {
|
||||
dependsOn(RestSpecHack.configureTask(project, includePackaged))
|
||||
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
|
||||
}
|
||||
// this must run after all projects have been configured, so we know any project
|
||||
// references can be accessed as a fully configured
|
||||
project.gradle.projectsEvaluated {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
systemProperty('tests.cluster', clusterUri)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import org.gradle.util.ConfigureUtil
|
|||
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false)
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
|
||||
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
|
|
|
@ -123,3 +123,6 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r
|
|||
java.util.Collections#EMPTY_LIST
|
||||
java.util.Collections#EMPTY_MAP
|
||||
java.util.Collections#EMPTY_SET
|
||||
|
||||
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
|
||||
java.util.Random#<init>() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness
|
||||
|
|
|
@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser {
|
|||
static {
|
||||
Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>();
|
||||
fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension());
|
||||
fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension());
|
||||
FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.queryparser.classic;
|
||||
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.elasticsearch.index.query.MissingQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class MissingFieldQueryExtension implements FieldQueryExtension {
|
||||
|
||||
public static final String NAME = "_missing_";
|
||||
|
||||
@Override
|
||||
public Query query(QueryShardContext context, String queryText) {
|
||||
Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE);
|
||||
if (query != null) {
|
||||
return new ConstantScoreQuery(query);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -51,12 +51,12 @@ public class BulkItemResponse implements Streamable, StatusToXContent {
|
|||
builder.startObject(opType);
|
||||
if (failure == null) {
|
||||
response.toXContent(builder, params);
|
||||
builder.field(Fields.STATUS, response.status());
|
||||
builder.field(Fields.STATUS, response.status().getStatus());
|
||||
} else {
|
||||
builder.field(Fields._INDEX, failure.getIndex());
|
||||
builder.field(Fields._TYPE, failure.getType());
|
||||
builder.field(Fields._ID, failure.getId());
|
||||
builder.field(Fields.STATUS, failure.getStatus());
|
||||
builder.field(Fields.STATUS, failure.getStatus().getStatus());
|
||||
builder.startObject(Fields.ERROR);
|
||||
ElasticsearchException.toXContent(builder, params, failure.getCause());
|
||||
builder.endObject();
|
||||
|
|
|
@ -164,10 +164,6 @@ public class ClusterModule extends AbstractModule {
|
|||
registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
|
||||
registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME);
|
||||
registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, Validator.POSITIVE_BYTES_SIZE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, Validator.INTEGER);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, Validator.BYTES_SIZE);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_COMPRESS, Validator.EMPTY);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER);
|
||||
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
|
||||
|
|
|
@ -746,8 +746,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
/** All known byte-sized cluster settings. */
|
||||
public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
|
||||
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC,
|
||||
RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE,
|
||||
RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE,
|
||||
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC));
|
||||
|
||||
|
||||
|
|
|
@ -27,18 +27,11 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
|
@ -671,7 +664,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
|
||||
public void shuffle() {
|
||||
Collections.shuffle(unassigned);
|
||||
Randomness.shuffle(unassigned);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
|||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
|
|
@ -0,0 +1,120 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
/**
|
||||
* Provides factory methods for producing reproducible sources of
|
||||
* randomness. Reproducible sources of randomness contribute to
|
||||
* reproducible tests. When running the Elasticsearch test suite, the
|
||||
* test runner will establish a global random seed accessible via the
|
||||
* system property "tests.seed". By seeding a random number generator
|
||||
* with this global seed, we ensure that instances of Random produced
|
||||
* with this class produce reproducible sources of randomness under
|
||||
* when running under the Elasticsearch test suite. Alternatively,
|
||||
* a reproducible source of randomness can be produced by providing a
|
||||
* setting a reproducible seed. When running the Elasticsearch server
|
||||
* process, non-reproducible sources of randomness are provided (unless
|
||||
* a setting is provided for a module that exposes a seed setting (e.g.,
|
||||
* DiscoveryService#SETTING_DISCOVERY_SEED)).
|
||||
*/
|
||||
public final class Randomness {
|
||||
private static final Method currentMethod;
|
||||
private static final Method getRandomMethod;
|
||||
|
||||
static {
|
||||
Method maybeCurrentMethod;
|
||||
Method maybeGetRandomMethod;
|
||||
try {
|
||||
Class<?> clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext");
|
||||
maybeCurrentMethod = clazz.getMethod("current");
|
||||
maybeGetRandomMethod = clazz.getMethod("getRandom");
|
||||
} catch (Throwable t) {
|
||||
maybeCurrentMethod = null;
|
||||
maybeGetRandomMethod = null;
|
||||
}
|
||||
currentMethod = maybeCurrentMethod;
|
||||
getRandomMethod = maybeGetRandomMethod;
|
||||
}
|
||||
|
||||
private Randomness() {}
|
||||
|
||||
/**
|
||||
* Provides a reproducible source of randomness seeded by a long
|
||||
* seed in the settings with the key setting.
|
||||
*
|
||||
* @param settings the settings containing the seed
|
||||
* @param setting the key to access the seed
|
||||
* @return a reproducible source of randomness
|
||||
*/
|
||||
public static Random get(Settings settings, String setting) {
|
||||
Long maybeSeed = settings.getAsLong(setting, null);
|
||||
if (maybeSeed != null) {
|
||||
return new Random(maybeSeed);
|
||||
} else {
|
||||
return get();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a source of randomness that is reproducible when
|
||||
* running under the Elasticsearch test suite, and otherwise
|
||||
* produces a non-reproducible source of randomness. Reproducible
|
||||
* sources of randomness are created when the system property
|
||||
* "tests.seed" is set and the security policy allows reading this
|
||||
* system property. Otherwise, non-reproducible sources of
|
||||
* randomness are created.
|
||||
*
|
||||
* @return a source of randomness
|
||||
* @throws IllegalStateException if running tests but was not able
|
||||
* to acquire an instance of Random from
|
||||
* RandomizedContext or tests are
|
||||
* running but tests.seed is not set
|
||||
*/
|
||||
public static Random get() {
|
||||
if (currentMethod != null && getRandomMethod != null) {
|
||||
try {
|
||||
Object randomizedContext = currentMethod.invoke(null);
|
||||
return (Random) getRandomMethod.invoke(randomizedContext);
|
||||
} catch (ReflectiveOperationException e) {
|
||||
// unexpected, bail
|
||||
throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e);
|
||||
}
|
||||
} else {
|
||||
return getWithoutSeed();
|
||||
}
|
||||
}
|
||||
|
||||
private static Random getWithoutSeed() {
|
||||
assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
|
||||
return ThreadLocalRandom.current();
|
||||
}
|
||||
|
||||
public static void shuffle(List<?> list) {
|
||||
Collections.shuffle(list, get());
|
||||
}
|
||||
}
|
|
@ -20,6 +20,8 @@
|
|||
package org.elasticsearch.common.io;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
@ -68,6 +70,7 @@ public abstract class Streams {
|
|||
public static long copy(InputStream in, OutputStream out, byte[] buffer) throws IOException {
|
||||
Objects.requireNonNull(in, "No InputStream specified");
|
||||
Objects.requireNonNull(out, "No OutputStream specified");
|
||||
boolean success = false;
|
||||
try {
|
||||
long byteCount = 0;
|
||||
int bytesRead;
|
||||
|
@ -76,17 +79,13 @@ public abstract class Streams {
|
|||
byteCount += bytesRead;
|
||||
}
|
||||
out.flush();
|
||||
success = true;
|
||||
return byteCount;
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
}
|
||||
try {
|
||||
out.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
if (success) {
|
||||
IOUtils.close(in, out);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(in, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,6 +129,7 @@ public abstract class Streams {
|
|||
public static int copy(Reader in, Writer out) throws IOException {
|
||||
Objects.requireNonNull(in, "No Reader specified");
|
||||
Objects.requireNonNull(out, "No Writer specified");
|
||||
boolean success = false;
|
||||
try {
|
||||
int byteCount = 0;
|
||||
char[] buffer = new char[BUFFER_SIZE];
|
||||
|
@ -139,17 +139,13 @@ public abstract class Streams {
|
|||
byteCount += bytesRead;
|
||||
}
|
||||
out.flush();
|
||||
success = true;
|
||||
return byteCount;
|
||||
} finally {
|
||||
try {
|
||||
in.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
}
|
||||
try {
|
||||
out.close();
|
||||
} catch (IOException ex) {
|
||||
// do nothing
|
||||
if (success) {
|
||||
IOUtils.close(in, out);
|
||||
} else {
|
||||
IOUtils.closeWhileHandlingException(in, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
|
|||
public static final String SETTING_INITIAL_STATE_TIMEOUT = "discovery.initial_state_timeout";
|
||||
public static final String SETTING_DISCOVERY_SEED = "discovery.id.seed";
|
||||
|
||||
|
||||
private static class InitialStateListener implements InitialStateDiscoveryListener {
|
||||
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
|
@ -132,10 +132,7 @@ public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryServic
|
|||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
String seed = settings.get(DiscoveryService.SETTING_DISCOVERY_SEED);
|
||||
if (seed != null) {
|
||||
return Strings.randomBase64UUID(new Random(Long.parseLong(seed)));
|
||||
}
|
||||
return Strings.randomBase64UUID();
|
||||
Random random = Randomness.get(settings, DiscoveryService.SETTING_DISCOVERY_SEED);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -193,7 +193,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
} else if (Names.COERCE.equals(fieldName)) {
|
||||
builder.coerce(nodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) {
|
||||
} else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)
|
||||
&& builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) {
|
||||
builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
}
|
||||
|
@ -284,6 +285,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName());
|
||||
termStrategy.setDistErrPct(distanceErrorPct());
|
||||
defaultStrategy = resolveStrategy(strategyName);
|
||||
defaultStrategy.setPointsOnly(pointsOnly);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -347,6 +349,9 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
public void setStrategyName(String strategyName) {
|
||||
checkIfFrozen();
|
||||
this.strategyName = strategyName;
|
||||
if (this.strategyName.equals(SpatialStrategy.TERM)) {
|
||||
this.pointsOnly = true;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean pointsOnly() {
|
||||
|
@ -406,7 +411,6 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
|
||||
public PrefixTreeStrategy resolveStrategy(String strategyName) {
|
||||
if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) {
|
||||
recursiveStrategy.setPointsOnly(pointsOnly());
|
||||
return recursiveStrategy;
|
||||
}
|
||||
if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) {
|
||||
|
@ -446,7 +450,7 @@ public class GeoShapeFieldMapper extends FieldMapper {
|
|||
}
|
||||
shape = shapeBuilder.build();
|
||||
}
|
||||
if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) {
|
||||
if (fieldType().pointsOnly() && !(shape instanceof Point)) {
|
||||
throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " +
|
||||
((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found");
|
||||
}
|
||||
|
|
|
@ -29,13 +29,10 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.compress.Compressor;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
|
@ -48,17 +45,13 @@ import org.elasticsearch.index.mapper.MergeResult;
|
|||
import org.elasticsearch.index.mapper.MetadataFieldMapper;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -72,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
public static class Defaults {
|
||||
public static final String NAME = SourceFieldMapper.NAME;
|
||||
public static final boolean ENABLED = true;
|
||||
public static final long COMPRESS_THRESHOLD = -1;
|
||||
public static final String FORMAT = null; // default format is to use the one provided
|
||||
|
||||
public static final MappedFieldType FIELD_TYPE = new SourceFieldType();
|
||||
|
||||
|
@ -93,12 +84,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
private boolean enabled = Defaults.ENABLED;
|
||||
|
||||
private long compressThreshold = Defaults.COMPRESS_THRESHOLD;
|
||||
|
||||
private Boolean compress = null;
|
||||
|
||||
private String format = Defaults.FORMAT;
|
||||
|
||||
private String[] includes = null;
|
||||
private String[] excludes = null;
|
||||
|
||||
|
@ -111,21 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder compress(boolean compress) {
|
||||
this.compress = compress;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder compressThreshold(long compressThreshold) {
|
||||
this.compressThreshold = compressThreshold;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder format(String format) {
|
||||
this.format = format;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder includes(String[] includes) {
|
||||
this.includes = includes;
|
||||
return this;
|
||||
|
@ -138,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
|
||||
@Override
|
||||
public SourceFieldMapper build(BuilderContext context) {
|
||||
return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings());
|
||||
return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,24 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
if (fieldName.equals("enabled")) {
|
||||
builder.enabled(nodeBooleanValue(fieldNode));
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
|
||||
if (fieldNode != null) {
|
||||
builder.compress(nodeBooleanValue(fieldNode));
|
||||
}
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
|
||||
if (fieldNode != null) {
|
||||
if (fieldNode instanceof Number) {
|
||||
builder.compressThreshold(((Number) fieldNode).longValue());
|
||||
builder.compress(true);
|
||||
} else {
|
||||
builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes());
|
||||
builder.compress(true);
|
||||
}
|
||||
}
|
||||
iterator.remove();
|
||||
} else if ("format".equals(fieldName)) {
|
||||
builder.format(nodeStringValue(fieldNode, null));
|
||||
} else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) {
|
||||
// ignore on old indices, reject on and after 3.0
|
||||
iterator.remove();
|
||||
} else if (fieldName.equals("includes")) {
|
||||
List<Object> values = (List<Object>) fieldNode;
|
||||
|
@ -242,30 +196,18 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
/** indicates whether the source will always exist and be complete, for use by features like the update API */
|
||||
private final boolean complete;
|
||||
|
||||
private Boolean compress;
|
||||
private long compressThreshold;
|
||||
|
||||
private final String[] includes;
|
||||
private final String[] excludes;
|
||||
|
||||
private String format;
|
||||
|
||||
private XContentType formatContentType;
|
||||
|
||||
private SourceFieldMapper(Settings indexSettings) {
|
||||
this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings);
|
||||
this(Defaults.ENABLED, null, null, indexSettings);
|
||||
}
|
||||
|
||||
private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold,
|
||||
String[] includes, String[] excludes, Settings indexSettings) {
|
||||
private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) {
|
||||
super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored.
|
||||
this.enabled = enabled;
|
||||
this.compress = compress;
|
||||
this.compressThreshold = compressThreshold;
|
||||
this.includes = includes;
|
||||
this.excludes = excludes;
|
||||
this.format = format;
|
||||
this.formatContentType = format == null ? null : XContentType.fromRestContentType(format);
|
||||
this.complete = enabled && includes == null && excludes == null;
|
||||
}
|
||||
|
||||
|
@ -321,71 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
Tuple<XContentType, Map<String, Object>> mapTuple = XContentHelper.convertToMap(source, true);
|
||||
Map<String, Object> filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes);
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
StreamOutput streamOutput = bStream;
|
||||
if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) {
|
||||
streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
}
|
||||
XContentType contentType = formatContentType;
|
||||
if (contentType == null) {
|
||||
contentType = mapTuple.v1();
|
||||
}
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource);
|
||||
XContentType contentType = mapTuple.v1();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource);
|
||||
builder.close();
|
||||
|
||||
source = bStream.bytes();
|
||||
} else if (compress != null && compress && !CompressorFactory.isCompressed(source)) {
|
||||
if (compressThreshold == -1 || source.length() > compressThreshold) {
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
XContentType contentType = XContentFactory.xContentType(source);
|
||||
if (formatContentType != null && formatContentType != contentType) {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream));
|
||||
builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
|
||||
builder.close();
|
||||
} else {
|
||||
StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
source.writeTo(streamOutput);
|
||||
streamOutput.close();
|
||||
}
|
||||
source = bStream.bytes();
|
||||
// update the data in the context, so it can be compressed and stored compressed outside...
|
||||
context.source(source);
|
||||
}
|
||||
} else if (formatContentType != null) {
|
||||
// see if we need to convert the content type
|
||||
Compressor compressor = CompressorFactory.compressor(source);
|
||||
if (compressor != null) {
|
||||
InputStream compressedStreamInput = compressor.streamInput(source.streamInput());
|
||||
if (compressedStreamInput.markSupported() == false) {
|
||||
compressedStreamInput = new BufferedInputStream(compressedStreamInput);
|
||||
}
|
||||
XContentType contentType = XContentFactory.xContentType(compressedStreamInput);
|
||||
if (contentType != formatContentType) {
|
||||
// we need to reread and store back, compressed....
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream);
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput);
|
||||
builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput));
|
||||
builder.close();
|
||||
source = bStream.bytes();
|
||||
// update the data in the context, so we store it in the translog in this format
|
||||
context.source(source);
|
||||
} else {
|
||||
compressedStreamInput.close();
|
||||
}
|
||||
} else {
|
||||
XContentType contentType = XContentFactory.xContentType(source);
|
||||
if (contentType != formatContentType) {
|
||||
// we need to reread and store back
|
||||
// we need to reread and store back, compressed....
|
||||
BytesStreamOutput bStream = new BytesStreamOutput();
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream);
|
||||
builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source));
|
||||
builder.close();
|
||||
source = bStream.bytes();
|
||||
// update the data in the context, so we store it in the translog in this format
|
||||
context.source(source);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!source.hasArray()) {
|
||||
source = source.toBytesArray();
|
||||
|
@ -403,26 +285,13 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
boolean includeDefaults = params.paramAsBoolean("include_defaults", false);
|
||||
|
||||
// all are defaults, no need to write it at all
|
||||
if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) {
|
||||
if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) {
|
||||
return builder;
|
||||
}
|
||||
builder.startObject(contentType());
|
||||
if (includeDefaults || enabled != Defaults.ENABLED) {
|
||||
builder.field("enabled", enabled);
|
||||
}
|
||||
if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) {
|
||||
builder.field("format", format);
|
||||
}
|
||||
if (compress != null) {
|
||||
builder.field("compress", compress);
|
||||
} else if (includeDefaults) {
|
||||
builder.field("compress", false);
|
||||
}
|
||||
if (compressThreshold != -1) {
|
||||
builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString());
|
||||
} else if (includeDefaults) {
|
||||
builder.field("compress_threshold", -1);
|
||||
}
|
||||
|
||||
if (includes != null) {
|
||||
builder.field("includes", includes);
|
||||
|
@ -453,13 +322,6 @@ public class SourceFieldMapper extends MetadataFieldMapper {
|
|||
if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) {
|
||||
mergeResult.addConflict("Cannot update excludes setting for [_source]");
|
||||
}
|
||||
} else {
|
||||
if (sourceMergeWith.compress != null) {
|
||||
this.compress = sourceMergeWith.compress;
|
||||
}
|
||||
if (sourceMergeWith.compressThreshold != -1) {
|
||||
this.compressThreshold = sourceMergeWith.compressThreshold;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,234 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
|
||||
import org.elasticsearch.index.mapper.object.ObjectMapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Constructs a filter that have only null values or no value in the original field.
|
||||
*/
|
||||
public class MissingQueryBuilder extends AbstractQueryBuilder<MissingQueryBuilder> {
|
||||
|
||||
public static final String NAME = "missing";
|
||||
|
||||
public static final boolean DEFAULT_NULL_VALUE = false;
|
||||
|
||||
public static final boolean DEFAULT_EXISTENCE_VALUE = true;
|
||||
|
||||
private final String fieldPattern;
|
||||
|
||||
private final boolean nullValue;
|
||||
|
||||
private final boolean existence;
|
||||
|
||||
static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE);
|
||||
|
||||
/**
|
||||
* Constructs a filter that returns documents with only null values or no value in the original field.
|
||||
* @param fieldPattern the field to query
|
||||
* @param nullValue should the missing filter automatically include fields with null value configured in the
|
||||
* mappings. Defaults to <tt>false</tt>.
|
||||
* @param existence should the missing filter include documents where the field doesn't exist in the docs.
|
||||
* Defaults to <tt>true</tt>.
|
||||
* @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false
|
||||
*/
|
||||
public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) {
|
||||
if (Strings.isEmpty(fieldPattern)) {
|
||||
throw new IllegalArgumentException("missing query must be provided with a [field]");
|
||||
}
|
||||
if (nullValue == false && existence == false) {
|
||||
throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true");
|
||||
}
|
||||
this.fieldPattern = fieldPattern;
|
||||
this.nullValue = nullValue;
|
||||
this.existence = existence;
|
||||
}
|
||||
|
||||
public MissingQueryBuilder(String fieldPattern) {
|
||||
this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE);
|
||||
}
|
||||
|
||||
public String fieldPattern() {
|
||||
return this.fieldPattern;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the missing filter will include documents where the field contains a null value, otherwise
|
||||
* these documents will not be included.
|
||||
*/
|
||||
public boolean nullValue() {
|
||||
return this.nullValue;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the missing filter will include documents where the field has no values, otherwise
|
||||
* these documents will not be included.
|
||||
*/
|
||||
public boolean existence() {
|
||||
return this.existence;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(NAME);
|
||||
builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern);
|
||||
builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue);
|
||||
builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence);
|
||||
printBoostAndQueryName(builder);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Query doToQuery(QueryShardContext context) throws IOException {
|
||||
return newFilter(context, fieldPattern, existence, nullValue);
|
||||
}
|
||||
|
||||
public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) {
|
||||
if (!existence && !nullValue) {
|
||||
throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true");
|
||||
}
|
||||
|
||||
final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME);
|
||||
if (fieldNamesFieldType == null) {
|
||||
// can only happen when no types exist, so no docs exist either
|
||||
return Queries.newMatchNoDocsQuery();
|
||||
}
|
||||
|
||||
ObjectMapper objectMapper = context.getObjectMapper(fieldPattern);
|
||||
if (objectMapper != null) {
|
||||
// automatic make the object mapper pattern
|
||||
fieldPattern = fieldPattern + ".*";
|
||||
}
|
||||
|
||||
Collection<String> fields = context.simpleMatchToIndexNames(fieldPattern);
|
||||
if (fields.isEmpty()) {
|
||||
if (existence) {
|
||||
// if we ask for existence of fields, and we found none, then we should match on all
|
||||
return Queries.newMatchAllQuery();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
Query existenceFilter = null;
|
||||
Query nullFilter = null;
|
||||
|
||||
if (existence) {
|
||||
BooleanQuery.Builder boolFilter = new BooleanQuery.Builder();
|
||||
for (String field : fields) {
|
||||
MappedFieldType fieldType = context.fieldMapper(field);
|
||||
Query filter = null;
|
||||
if (fieldNamesFieldType.isEnabled()) {
|
||||
final String f;
|
||||
if (fieldType != null) {
|
||||
f = fieldType.names().indexName();
|
||||
} else {
|
||||
f = field;
|
||||
}
|
||||
filter = fieldNamesFieldType.termQuery(f, context);
|
||||
}
|
||||
// if _field_names are not indexed, we need to go the slow way
|
||||
if (filter == null && fieldType != null) {
|
||||
filter = fieldType.rangeQuery(null, null, true, true);
|
||||
}
|
||||
if (filter == null) {
|
||||
filter = new TermRangeQuery(field, null, null, true, true);
|
||||
}
|
||||
boolFilter.add(filter, BooleanClause.Occur.SHOULD);
|
||||
}
|
||||
|
||||
existenceFilter = boolFilter.build();
|
||||
existenceFilter = Queries.not(existenceFilter);;
|
||||
}
|
||||
|
||||
if (nullValue) {
|
||||
for (String field : fields) {
|
||||
MappedFieldType fieldType = context.fieldMapper(field);
|
||||
if (fieldType != null) {
|
||||
nullFilter = fieldType.nullValueQuery();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Query filter;
|
||||
if (nullFilter != null) {
|
||||
if (existenceFilter != null) {
|
||||
filter = new BooleanQuery.Builder()
|
||||
.add(existenceFilter, BooleanClause.Occur.SHOULD)
|
||||
.add(nullFilter, BooleanClause.Occur.SHOULD)
|
||||
.build();
|
||||
} else {
|
||||
filter = nullFilter;
|
||||
}
|
||||
} else {
|
||||
filter = existenceFilter;
|
||||
}
|
||||
|
||||
if (filter == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new ConstantScoreQuery(filter);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException {
|
||||
return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doWriteTo(StreamOutput out) throws IOException {
|
||||
out.writeString(fieldPattern);
|
||||
out.writeBoolean(nullValue);
|
||||
out.writeBoolean(existence);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected int doHashCode() {
|
||||
return Objects.hash(fieldPattern, nullValue, existence);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean doEquals(MissingQueryBuilder other) {
|
||||
return Objects.equals(fieldPattern, other.fieldPattern) &&
|
||||
Objects.equals(nullValue, other.nullValue) &&
|
||||
Objects.equals(existence, other.existence);
|
||||
}
|
||||
}
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Parser for missing query
|
||||
*/
|
||||
public class MissingQueryParser implements QueryParser<MissingQueryBuilder> {
|
||||
|
||||
public static final ParseField FIELD_FIELD = new ParseField("field");
|
||||
public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value");
|
||||
public static final ParseField EXISTENCE_FIELD = new ParseField("existence");
|
||||
|
||||
@Override
|
||||
public String[] names() {
|
||||
return new String[]{MissingQueryBuilder.NAME};
|
||||
}
|
||||
|
||||
@Override
|
||||
public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException {
|
||||
XContentParser parser = parseContext.parser();
|
||||
|
||||
String fieldPattern = null;
|
||||
String queryName = null;
|
||||
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
|
||||
boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE;
|
||||
boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE;
|
||||
|
||||
XContentParser.Token token;
|
||||
String currentFieldName = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) {
|
||||
fieldPattern = parser.text();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) {
|
||||
nullValue = parser.booleanValue();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) {
|
||||
existence = parser.booleanValue();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) {
|
||||
queryName = parser.text();
|
||||
} else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
|
||||
boost = parser.floatValue();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]");
|
||||
}
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
if (fieldPattern == null) {
|
||||
throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]");
|
||||
}
|
||||
return new MissingQueryBuilder(fieldPattern, nullValue, existence)
|
||||
.boost(boost)
|
||||
.queryName(queryName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MissingQueryBuilder getBuilderPrototype() {
|
||||
return MissingQueryBuilder.PROTOTYPE;
|
||||
}
|
||||
}
|
|
@ -810,27 +810,6 @@ public abstract class QueryBuilders {
|
|||
return new ExistsQueryBuilder(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter to filter only documents where a field does not exists in them.
|
||||
* @param name the field to query
|
||||
*/
|
||||
public static MissingQueryBuilder missingQuery(String name) {
|
||||
return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* A filter to filter only documents where a field does not exists in them.
|
||||
* @param name the field to query
|
||||
* @param nullValue should the missing filter automatically include fields with null value configured in the
|
||||
* mappings. Defaults to <tt>false</tt>.
|
||||
* @param existence should the missing filter include documents where the field doesn't exist in the docs.
|
||||
* Defaults to <tt>true</tt>.
|
||||
* @throws IllegalArgumentException when both <tt>existence</tt> and <tt>nullValue</tt> are set to false
|
||||
*/
|
||||
public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) {
|
||||
return new MissingQueryBuilder(name, nullValue, existence);
|
||||
}
|
||||
|
||||
private QueryBuilders() {
|
||||
|
||||
}
|
||||
|
|
|
@ -120,7 +120,6 @@ public class IndicesModule extends AbstractModule {
|
|||
registerQueryParser(GeohashCellQuery.Parser.class);
|
||||
registerQueryParser(GeoPolygonQueryParser.class);
|
||||
registerQueryParser(ExistsQueryParser.class);
|
||||
registerQueryParser(MissingQueryParser.class);
|
||||
registerQueryParser(MatchNoneQueryParser.class);
|
||||
|
||||
if (ShapesAvailability.JTS_AVAILABLE) {
|
||||
|
|
|
@ -40,10 +40,6 @@ import java.util.concurrent.TimeUnit;
|
|||
*/
|
||||
public class RecoverySettings extends AbstractComponent implements Closeable {
|
||||
|
||||
public static final String INDICES_RECOVERY_FILE_CHUNK_SIZE = "indices.recovery.file_chunk_size";
|
||||
public static final String INDICES_RECOVERY_TRANSLOG_OPS = "indices.recovery.translog_ops";
|
||||
public static final String INDICES_RECOVERY_TRANSLOG_SIZE = "indices.recovery.translog_size";
|
||||
public static final String INDICES_RECOVERY_COMPRESS = "indices.recovery.compress";
|
||||
public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams";
|
||||
public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams";
|
||||
public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec";
|
||||
|
@ -75,11 +71,7 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
|
||||
public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes();
|
||||
|
||||
private volatile ByteSizeValue fileChunkSize;
|
||||
|
||||
private volatile boolean compress;
|
||||
private volatile int translogOps;
|
||||
private volatile ByteSizeValue translogSize;
|
||||
public static final ByteSizeValue DEFAULT_CHUNK_SIZE = new ByteSizeValue(512, ByteSizeUnit.KB);
|
||||
|
||||
private volatile int concurrentStreams;
|
||||
private volatile int concurrentSmallFileStreams;
|
||||
|
@ -94,16 +86,12 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
private volatile TimeValue internalActionTimeout;
|
||||
private volatile TimeValue internalActionLongTimeout;
|
||||
|
||||
private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE;
|
||||
|
||||
@Inject
|
||||
public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) {
|
||||
super(settings);
|
||||
|
||||
this.fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB));
|
||||
this.translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, 1000);
|
||||
this.translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, new ByteSizeValue(512, ByteSizeUnit.KB));
|
||||
this.compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, true);
|
||||
|
||||
this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500));
|
||||
// doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes)
|
||||
// and we want to give the master time to remove a faulty node
|
||||
|
@ -132,8 +120,8 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac());
|
||||
}
|
||||
|
||||
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}], file_chunk_size [{}], translog_size [{}], translog_ops [{}], and compress [{}]",
|
||||
maxBytesPerSec, concurrentStreams, fileChunkSize, translogSize, translogOps, compress);
|
||||
logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}]",
|
||||
maxBytesPerSec, concurrentStreams);
|
||||
|
||||
nodeSettingsService.addListener(new ApplySettings());
|
||||
}
|
||||
|
@ -144,26 +132,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
ThreadPool.terminate(concurrentSmallFileStreamPool, 1, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public ByteSizeValue fileChunkSize() {
|
||||
return fileChunkSize;
|
||||
}
|
||||
|
||||
public boolean compress() {
|
||||
return compress;
|
||||
}
|
||||
|
||||
public int translogOps() {
|
||||
return translogOps;
|
||||
}
|
||||
|
||||
public ByteSizeValue translogSize() {
|
||||
return translogSize;
|
||||
}
|
||||
|
||||
public int concurrentStreams() {
|
||||
return concurrentStreams;
|
||||
}
|
||||
|
||||
public ThreadPoolExecutor concurrentStreamPool() {
|
||||
return concurrentStreamPool;
|
||||
}
|
||||
|
@ -196,6 +164,15 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
return internalActionLongTimeout;
|
||||
}
|
||||
|
||||
public ByteSizeValue getChunkSize() { return chunkSize; }
|
||||
|
||||
void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests
|
||||
if (chunkSize.bytesAsInt() <= 0) {
|
||||
throw new IllegalArgumentException("chunkSize must be > 0");
|
||||
}
|
||||
this.chunkSize = chunkSize;
|
||||
}
|
||||
|
||||
|
||||
class ApplySettings implements NodeSettingsService.Listener {
|
||||
@Override
|
||||
|
@ -213,30 +190,6 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
ByteSizeValue fileChunkSize = settings.getAsBytesSize(INDICES_RECOVERY_FILE_CHUNK_SIZE, RecoverySettings.this.fileChunkSize);
|
||||
if (!fileChunkSize.equals(RecoverySettings.this.fileChunkSize)) {
|
||||
logger.info("updating [indices.recovery.file_chunk_size] from [{}] to [{}]", RecoverySettings.this.fileChunkSize, fileChunkSize);
|
||||
RecoverySettings.this.fileChunkSize = fileChunkSize;
|
||||
}
|
||||
|
||||
int translogOps = settings.getAsInt(INDICES_RECOVERY_TRANSLOG_OPS, RecoverySettings.this.translogOps);
|
||||
if (translogOps != RecoverySettings.this.translogOps) {
|
||||
logger.info("updating [indices.recovery.translog_ops] from [{}] to [{}]", RecoverySettings.this.translogOps, translogOps);
|
||||
RecoverySettings.this.translogOps = translogOps;
|
||||
}
|
||||
|
||||
ByteSizeValue translogSize = settings.getAsBytesSize(INDICES_RECOVERY_TRANSLOG_SIZE, RecoverySettings.this.translogSize);
|
||||
if (!translogSize.equals(RecoverySettings.this.translogSize)) {
|
||||
logger.info("updating [indices.recovery.translog_size] from [{}] to [{}]", RecoverySettings.this.translogSize, translogSize);
|
||||
RecoverySettings.this.translogSize = translogSize;
|
||||
}
|
||||
|
||||
boolean compress = settings.getAsBoolean(INDICES_RECOVERY_COMPRESS, RecoverySettings.this.compress);
|
||||
if (compress != RecoverySettings.this.compress) {
|
||||
logger.info("updating [indices.recovery.compress] from [{}] to [{}]", RecoverySettings.this.compress, compress);
|
||||
RecoverySettings.this.compress = compress;
|
||||
}
|
||||
|
||||
int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams);
|
||||
if (concurrentStreams != RecoverySettings.this.concurrentStreams) {
|
||||
logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams);
|
||||
|
|
|
@ -36,7 +36,9 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.CancellableThreads.Interruptable;
|
||||
import org.elasticsearch.index.engine.RecoveryEngineException;
|
||||
|
@ -49,6 +51,7 @@ import org.elasticsearch.transport.RemoteTransportException;
|
|||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.util.ArrayList;
|
||||
|
@ -57,6 +60,7 @@ import java.util.List;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
@ -77,9 +81,9 @@ public class RecoverySourceHandler {
|
|||
private final StartRecoveryRequest request;
|
||||
private final RecoverySettings recoverySettings;
|
||||
private final TransportService transportService;
|
||||
private final int chunkSizeInBytes;
|
||||
|
||||
protected final RecoveryResponse response;
|
||||
private final TransportRequestOptions requestOptions;
|
||||
|
||||
private final CancellableThreads cancellableThreads = new CancellableThreads() {
|
||||
@Override
|
||||
|
@ -106,14 +110,8 @@ public class RecoverySourceHandler {
|
|||
this.transportService = transportService;
|
||||
this.indexName = this.request.shardId().index().name();
|
||||
this.shardId = this.request.shardId().id();
|
||||
|
||||
this.chunkSizeInBytes = recoverySettings.getChunkSize().bytesAsInt();
|
||||
this.response = new RecoveryResponse();
|
||||
this.requestOptions = TransportRequestOptions.builder()
|
||||
.withCompress(recoverySettings.compress())
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionTimeout())
|
||||
.build();
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -218,7 +216,7 @@ public class RecoverySourceHandler {
|
|||
totalSize += md.length();
|
||||
}
|
||||
List<StoreFileMetaData> phase1Files = new ArrayList<>(diff.different.size() + diff.missing.size());
|
||||
phase1Files.addAll(diff.different);
|
||||
phase1Files.addAll(diff.different);
|
||||
phase1Files.addAll(diff.missing);
|
||||
for (StoreFileMetaData md : phase1Files) {
|
||||
if (request.metadataSnapshot().asMap().containsKey(md.name())) {
|
||||
|
@ -249,7 +247,7 @@ public class RecoverySourceHandler {
|
|||
});
|
||||
// How many bytes we've copied since we last called RateLimiter.pause
|
||||
final AtomicLong bytesSinceLastPause = new AtomicLong();
|
||||
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new RecoveryOutputStream(md, bytesSinceLastPause, translogView);
|
||||
final Function<StoreFileMetaData, OutputStream> outputStreamFactories = (md) -> new BufferedOutputStream(new RecoveryOutputStream(md, bytesSinceLastPause, translogView), chunkSizeInBytes);
|
||||
sendFiles(store, phase1Files.toArray(new StoreFileMetaData[phase1Files.size()]), outputStreamFactories);
|
||||
cancellableThreads.execute(() -> {
|
||||
// Send the CLEAN_FILES request, which takes all of the files that
|
||||
|
@ -432,7 +430,7 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
final TransportRequestOptions recoveryOptions = TransportRequestOptions.builder()
|
||||
.withCompress(recoverySettings.compress())
|
||||
.withCompress(true)
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionLongTimeout())
|
||||
.build();
|
||||
|
@ -451,9 +449,9 @@ public class RecoverySourceHandler {
|
|||
size += operation.estimateSize();
|
||||
totalOperations++;
|
||||
|
||||
// Check if this request is past the size or bytes threshold, and
|
||||
// Check if this request is past bytes threshold, and
|
||||
// if so, send it off
|
||||
if (ops >= recoverySettings.translogOps() || size >= recoverySettings.translogSize().bytes()) {
|
||||
if (size >= chunkSizeInBytes) {
|
||||
|
||||
// don't throttle translog, since we lock for phase3 indexing,
|
||||
// so we need to move it as fast as possible. Note, since we
|
||||
|
@ -537,7 +535,7 @@ public class RecoverySourceHandler {
|
|||
|
||||
@Override
|
||||
public final void write(int b) throws IOException {
|
||||
write(new byte[]{(byte) b}, 0, 1);
|
||||
throw new UnsupportedOperationException("we can't send single bytes over the wire");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -548,6 +546,11 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
private void sendNextChunk(long position, BytesArray content, boolean lastChunk) throws IOException {
|
||||
final TransportRequestOptions chunkSendOptions = TransportRequestOptions.builder()
|
||||
.withCompress(false) // lucene files are already compressed and therefore compressing this won't really help much so we are safing the cpu for other things
|
||||
.withType(TransportRequestOptions.Type.RECOVERY)
|
||||
.withTimeout(recoverySettings.internalActionTimeout())
|
||||
.build();
|
||||
cancellableThreads.execute(() -> {
|
||||
// Pause using the rate limiter, if desired, to throttle the recovery
|
||||
final long throttleTimeInNanos;
|
||||
|
@ -577,7 +580,7 @@ public class RecoverySourceHandler {
|
|||
* see how many translog ops we accumulate while copying files across the network. A future optimization
|
||||
* would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
|
||||
*/
|
||||
throttleTimeInNanos), requestOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
throttleTimeInNanos), chunkSendOptions, EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
});
|
||||
if (shard.state() == IndexShardState.CLOSED) { // check if the shard got closed on us
|
||||
throw new IndexShardClosedException(request.shardId());
|
||||
|
@ -670,9 +673,10 @@ public class RecoverySourceHandler {
|
|||
pool = recoverySettings.concurrentSmallFileStreamPool();
|
||||
}
|
||||
Future<Void> future = pool.submit(() -> {
|
||||
try (final OutputStream outputStream = outputStreamFactory.apply(md);
|
||||
final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) {
|
||||
Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStream);
|
||||
try (final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) {
|
||||
// it's fine that we are only having the indexInput int he try/with block. The copy methods handles
|
||||
// exceptions during close correctly and doesn't hide the original exception.
|
||||
Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStreamFactory.apply(md));
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.elasticsearch.cluster.ClusterService;
|
|||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.common.StopWatch;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
|
@ -42,11 +43,14 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
|
@ -59,6 +63,7 @@ import org.elasticsearch.gateway.GatewayModule;
|
|||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.http.HttpServer;
|
||||
import org.elasticsearch.http.HttpServerModule;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.indices.IndicesModule;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
|
@ -97,7 +102,16 @@ import org.elasticsearch.tribe.TribeService;
|
|||
import org.elasticsearch.watcher.ResourceWatcherModule;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.IOException;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.Charset;
|
||||
import java.nio.file.CopyOption;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -274,6 +288,15 @@ public class Node implements Releasable {
|
|||
injector.getInstance(ResourceWatcherService.class).start();
|
||||
injector.getInstance(TribeService.class).start();
|
||||
|
||||
if (System.getProperty("es.tests.portsfile", "false").equals("true")) {
|
||||
if (settings.getAsBoolean("http.enabled", true)) {
|
||||
HttpServerTransport http = injector.getInstance(HttpServerTransport.class);
|
||||
writePortsFile("http", http.boundAddress());
|
||||
}
|
||||
TransportService transport = injector.getInstance(TransportService.class);
|
||||
writePortsFile("transport", transport.boundAddress());
|
||||
}
|
||||
|
||||
logger.info("started");
|
||||
|
||||
return this;
|
||||
|
@ -425,4 +448,27 @@ public class Node implements Releasable {
|
|||
public Injector injector() {
|
||||
return this.injector;
|
||||
}
|
||||
|
||||
/** Writes a file to the logs dir containing the ports for the given transport type */
|
||||
private void writePortsFile(String type, BoundTransportAddress boundAddress) {
|
||||
Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp");
|
||||
try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) {
|
||||
for (TransportAddress address : boundAddress.boundAddresses()) {
|
||||
InetAddress inetAddress = InetAddress.getByName(address.getAddress());
|
||||
if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) {
|
||||
// no link local, just causes problems
|
||||
continue;
|
||||
}
|
||||
writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n");
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to write ports file", e);
|
||||
}
|
||||
Path portsFile = environment.logsFile().resolve(type + ".ports");
|
||||
try {
|
||||
Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to rename ports file", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.plugins;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.*;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.cli.Terminal;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -66,7 +67,7 @@ public class PluginManager {
|
|||
"plugin",
|
||||
"plugin.bat",
|
||||
"service.bat"));
|
||||
|
||||
|
||||
static final Set<String> MODULES = unmodifiableSet(newHashSet(
|
||||
"lang-expression",
|
||||
"lang-groovy"));
|
||||
|
@ -124,7 +125,7 @@ public class PluginManager {
|
|||
checkForForbiddenName(pluginHandle.name);
|
||||
} else {
|
||||
// if we have no name but url, use temporary name that will be overwritten later
|
||||
pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null);
|
||||
pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null);
|
||||
}
|
||||
|
||||
Path pluginFile = download(pluginHandle, terminal);
|
||||
|
@ -224,7 +225,7 @@ public class PluginManager {
|
|||
PluginInfo info = PluginInfo.readFromProperties(root);
|
||||
terminal.println(VERBOSE, "%s", info);
|
||||
|
||||
// don't let luser install plugin as a module...
|
||||
// don't let luser install plugin as a module...
|
||||
// they might be unavoidably in maven central and are packaged up the same way)
|
||||
if (MODULES.contains(info.getName())) {
|
||||
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
|
||||
|
|
|
@ -87,6 +87,7 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
|
|||
import org.jboss.netty.util.HashedWheelTimer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.BindException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
|
@ -763,6 +764,11 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
|
|||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
ctx.getChannel().close();
|
||||
disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
|
||||
} else if (e.getCause() instanceof BindException) {
|
||||
logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel());
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
ctx.getChannel().close();
|
||||
disconnectFromNodeChannel(ctx.getChannel(), e.getCause());
|
||||
} else if (e.getCause() instanceof CancelledKeyException) {
|
||||
logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel());
|
||||
// close the channel as safe measure, which will cause a node to be disconnected if relevant
|
||||
|
|
|
@ -23,19 +23,8 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.FieldType;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexOptions;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.MultiReader;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.BooleanClause;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.DisjunctionMaxQuery;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.QueryUtils;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.similarities.BM25Similarity;
|
||||
import org.apache.lucene.search.similarities.DefaultSimilarity;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
|
@ -44,11 +33,7 @@ import org.apache.lucene.util.TestUtil;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
|
|
@ -26,20 +26,12 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.transport.NodeDisconnectedException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -119,7 +111,7 @@ public class IndicesShardStoreResponseTests extends ESTestCase {
|
|||
orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted")));
|
||||
|
||||
List<IndicesShardStoresResponse.StoreStatus> storeStatuses = new ArrayList<>(orderedStoreStatuses);
|
||||
Collections.shuffle(storeStatuses);
|
||||
Collections.shuffle(storeStatuses, random());
|
||||
CollectionUtil.timSort(storeStatuses);
|
||||
assertThat(storeStatuses, equalTo(orderedStoreStatuses));
|
||||
}
|
||||
|
|
|
@ -865,7 +865,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
assertAcked(prepareCreate("test")
|
||||
.addMapping("type", "field", "type=string")
|
||||
.addAlias(new Alias("alias1"))
|
||||
.addAlias(new Alias("alias2").filter(QueryBuilders.missingQuery("field")))
|
||||
.addAlias(new Alias("alias2").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("field"))))
|
||||
.addAlias(new Alias("alias3").indexRouting("index").searchRouting("search")));
|
||||
|
||||
checkAliases();
|
||||
|
|
|
@ -71,7 +71,6 @@ import java.util.concurrent.ExecutionException;
|
|||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.missingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
|
@ -440,25 +439,9 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
|
|||
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
|
||||
countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
|
||||
countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
|
||||
countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(missingQuery("field1"))).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
|
||||
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
|
||||
// wildcard check
|
||||
countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("x*")).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
|
||||
// object check
|
||||
countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("obj1")).get();
|
||||
assertHitCount(countResponse, 2l);
|
||||
if (!backwardsCluster().upgradeOneNode()) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -32,8 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT;
|
|||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -62,23 +60,12 @@ import org.junit.Before;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.*;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
|
||||
// needs at least 2 nodes since it bumps replicas to 1
|
||||
|
@ -271,7 +258,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
public void testOldIndexes() throws Exception {
|
||||
setupCluster();
|
||||
|
||||
Collections.shuffle(indexes, getRandom());
|
||||
Collections.shuffle(indexes, random());
|
||||
for (String index : indexes) {
|
||||
long startTime = System.currentTimeMillis();
|
||||
logger.info("--> Testing old index " + index);
|
||||
|
@ -341,13 +328,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
|||
searchRsp = searchReq.get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertEquals(numDocs, searchRsp.getHits().getTotalHits());
|
||||
|
||||
logger.info("--> testing missing filter");
|
||||
// the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache
|
||||
searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.missingQuery("long_sort"));
|
||||
searchRsp = searchReq.get();
|
||||
ElasticsearchAssertions.assertNoFailures(searchRsp);
|
||||
assertEquals(0, searchRsp.getHits().getTotalHits());
|
||||
}
|
||||
|
||||
void assertBasicAggregationWorks(String indexName) {
|
||||
|
|
|
@ -29,11 +29,7 @@ import org.junit.BeforeClass;
|
|||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
@ -246,7 +242,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase {
|
|||
private Settings shuffleSettings(Settings source) {
|
||||
Settings.Builder settings = Settings.settingsBuilder();
|
||||
List<String> keys = new ArrayList<>(source.getAsMap().keySet());
|
||||
Collections.shuffle(keys, getRandom());
|
||||
Collections.shuffle(keys, random());
|
||||
for (String o : keys) {
|
||||
settings.put(o, source.getAsMap().get(o));
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ public class DelayedAllocationIT extends ESIntegTestCase {
|
|||
private String findNodeWithShard() {
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
List<ShardRouting> startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED);
|
||||
Collections.shuffle(startedShards, getRandom());
|
||||
Collections.shuffle(startedShards,random());
|
||||
return state.nodes().get(startedShards.get(0).currentNodeId()).getName();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -388,7 +387,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase {
|
|||
logger.info("Removing [{}] nodes", numNodes);
|
||||
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
|
||||
ArrayList<DiscoveryNode> discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes());
|
||||
Collections.shuffle(discoveryNodes, getRandom());
|
||||
Collections.shuffle(discoveryNodes, random());
|
||||
for (DiscoveryNode node : discoveryNodes) {
|
||||
nodes.remove(node.id());
|
||||
numNodes--;
|
||||
|
|
|
@ -39,15 +39,10 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -199,7 +194,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
|
|||
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
int numNodes = between(1, 20);
|
||||
if (nodes.size() > numNodes) {
|
||||
Collections.shuffle(nodes, getRandom());
|
||||
Collections.shuffle(nodes, random());
|
||||
nodes = nodes.subList(0, numNodes);
|
||||
} else {
|
||||
for (int j = nodes.size(); j < numNodes; j++) {
|
||||
|
|
|
@ -17,20 +17,14 @@
|
|||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.lucene;
|
||||
|
||||
import org.apache.lucene.analysis.MockAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.Field.Store;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.NoDeletionPolicy;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.RandomIndexWriter;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
|
@ -41,11 +35,7 @@ import org.apache.lucene.util.Version;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
|
|
|
@ -24,13 +24,7 @@ import org.apache.lucene.document.Document;
|
|||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.document.TextField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -42,14 +36,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
@ -201,7 +188,7 @@ public class FreqTermsEnumTests extends ESTestCase {
|
|||
for (int i = 0; i < cycles; i++) {
|
||||
List<String> terms = new ArrayList<>(Arrays.asList(this.terms));
|
||||
|
||||
Collections.shuffle(terms, getRandom());
|
||||
Collections.shuffle(terms, random());
|
||||
for (String term : terms) {
|
||||
if (!termsEnum.seekExact(new BytesRef(term))) {
|
||||
assertThat("term : " + term, reference.get(term).docFreq, is(0));
|
||||
|
|
|
@ -25,14 +25,7 @@ import org.apache.lucene.util.BytesRefBuilder;
|
|||
import org.apache.lucene.util.Counter;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.SortedSet;
|
||||
import java.util.TreeSet;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.common.util.CollectionUtils.eagerPartition;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -80,7 +73,7 @@ public class CollectionUtilsTests extends ESTestCase {
|
|||
array.append(new BytesRef(s));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
Collections.shuffle(tmpList, getRandom());
|
||||
Collections.shuffle(tmpList, random());
|
||||
for (BytesRef ref : tmpList) {
|
||||
array.append(ref);
|
||||
}
|
||||
|
@ -111,7 +104,7 @@ public class CollectionUtilsTests extends ESTestCase {
|
|||
array.append(new BytesRef(s));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
Collections.shuffle(values, getRandom());
|
||||
Collections.shuffle(values, random());
|
||||
}
|
||||
int[] indices = new int[array.size()];
|
||||
for (int i = 0; i < indices.length; i++) {
|
||||
|
|
|
@ -27,13 +27,7 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.PriorityBlockingQueue;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -46,7 +40,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
|||
public void testPriorityQueue() throws Exception {
|
||||
PriorityBlockingQueue<Priority> queue = new PriorityBlockingQueue<>();
|
||||
List<Priority> priorities = Arrays.asList(Priority.values());
|
||||
Collections.shuffle(priorities);
|
||||
Collections.shuffle(priorities, random());
|
||||
|
||||
for (Priority priority : priorities) {
|
||||
queue.add(priority);
|
||||
|
|
|
@ -473,7 +473,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
docsPerIndexer = 1 + randomInt(5);
|
||||
logger.info("indexing " + docsPerIndexer + " docs per indexer during partition");
|
||||
countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size()));
|
||||
Collections.shuffle(semaphores);
|
||||
Collections.shuffle(semaphores, random());
|
||||
for (Semaphore semaphore : semaphores) {
|
||||
assertThat(semaphore.availablePermits(), equalTo(0));
|
||||
semaphore.release(docsPerIndexer);
|
||||
|
@ -683,7 +683,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
ensureGreen("test");
|
||||
|
||||
nodes = new ArrayList<>(nodes);
|
||||
Collections.shuffle(nodes, getRandom());
|
||||
Collections.shuffle(nodes, random());
|
||||
String isolatedNode = nodes.get(0);
|
||||
String notIsolatedNode = nodes.get(1);
|
||||
|
||||
|
@ -1038,7 +1038,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
new NetworkDisconnectPartition(getRandom()),
|
||||
new SlowClusterStateProcessing(getRandom())
|
||||
);
|
||||
Collections.shuffle(list);
|
||||
Collections.shuffle(list, random());
|
||||
setDisruptionScheme(list.get(0));
|
||||
return list.get(0);
|
||||
}
|
||||
|
|
|
@ -26,11 +26,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress;
|
|||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
public class ElectMasterServiceTests extends ESTestCase {
|
||||
|
||||
|
@ -53,7 +49,7 @@ public class ElectMasterServiceTests extends ESTestCase {
|
|||
nodes.add(node);
|
||||
}
|
||||
|
||||
Collections.shuffle(nodes, getRandom());
|
||||
Collections.shuffle(nodes, random());
|
||||
return nodes;
|
||||
}
|
||||
|
||||
|
|
|
@ -244,7 +244,7 @@ public class NodeJoinControllerTests extends ESTestCase {
|
|||
|
||||
// add
|
||||
|
||||
Collections.shuffle(nodesToJoin);
|
||||
Collections.shuffle(nodesToJoin, random());
|
||||
logger.debug("--> joining [{}] unique master nodes. Total of [{}] join requests", initialJoins, nodesToJoin.size());
|
||||
for (DiscoveryNode node : nodesToJoin) {
|
||||
pendingJoins.add(joinNodeAsync(node));
|
||||
|
@ -269,7 +269,7 @@ public class NodeJoinControllerTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
Collections.shuffle(nodesToJoin);
|
||||
Collections.shuffle(nodesToJoin, random());
|
||||
logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", finalJoins, nodesToJoin.size());
|
||||
for (DiscoveryNode node : nodesToJoin) {
|
||||
pendingJoins.add(joinNodeAsync(node));
|
||||
|
@ -316,7 +316,7 @@ public class NodeJoinControllerTests extends ESTestCase {
|
|||
nodesToJoin.add(node);
|
||||
}
|
||||
}
|
||||
Collections.shuffle(nodesToJoin);
|
||||
Collections.shuffle(nodesToJoin, random());
|
||||
logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", initialJoins, nodesToJoin.size());
|
||||
for (DiscoveryNode node : nodesToJoin) {
|
||||
pendingJoins.add(joinNodeAsync(node));
|
||||
|
|
|
@ -62,7 +62,7 @@ public class ZenPingTests extends ESTestCase {
|
|||
}
|
||||
|
||||
// shuffle
|
||||
Collections.shuffle(pings);
|
||||
Collections.shuffle(pings, random());
|
||||
|
||||
ZenPing.PingCollection collection = new ZenPing.PingCollection();
|
||||
collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()]));
|
||||
|
|
|
@ -21,17 +21,14 @@ package org.elasticsearch.discovery.zen.publish;
|
|||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
@ -47,35 +44,20 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.test.transport.MockTransportService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BytesTransportRequest;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportConnectionListener;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportResponseOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.emptyIterable;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
@TestLogging("discovery.zen.publish:TRACE")
|
||||
public class PublishClusterStateActionTests extends ESTestCase {
|
||||
|
@ -675,7 +657,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
|
||||
logger.info("--> committing states");
|
||||
|
||||
Collections.shuffle(states, random());
|
||||
Randomness.shuffle(states);
|
||||
for (ClusterState state : states) {
|
||||
node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel);
|
||||
assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE));
|
||||
|
|
|
@ -19,12 +19,7 @@
|
|||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.lucene.codecs.CodecUtil;
|
||||
import org.apache.lucene.store.ChecksumIndexInput;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.apache.lucene.store.*;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -33,11 +28,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -50,20 +41,10 @@ import java.nio.file.DirectoryStream;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS
|
||||
public class MetaDataStateFormatTests extends ESTestCase {
|
||||
|
@ -349,7 +330,7 @@ public class MetaDataStateFormatTests extends ESTestCase {
|
|||
|
||||
}
|
||||
List<Path> dirList = Arrays.asList(dirs);
|
||||
Collections.shuffle(dirList, getRandom());
|
||||
Collections.shuffle(dirList, random());
|
||||
MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0]));
|
||||
MetaData latestMetaData = meta.get(numStates-1);
|
||||
assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_")));
|
||||
|
|
|
@ -49,21 +49,12 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
|
|||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.*;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath;
|
||||
import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasItem;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
||||
|
||||
|
@ -251,7 +242,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
|
|||
if (randomBoolean()) {
|
||||
booleanOptionList.add(new Tuple<>("store_term_vector_payloads", tv_payloads = randomBoolean()));
|
||||
}
|
||||
Collections.shuffle(booleanOptionList, getRandom());
|
||||
Collections.shuffle(booleanOptionList, random());
|
||||
for (Tuple<String, Boolean> option : booleanOptionList) {
|
||||
mappingBuilder.field(option.v1(), option.v2().booleanValue());
|
||||
}
|
||||
|
|
|
@ -19,11 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.mapper.core;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.CannedTokenStream;
|
||||
import org.apache.lucene.analysis.MockTokenizer;
|
||||
import org.apache.lucene.analysis.Token;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.*;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.DocumentMapperParser;
|
||||
|
@ -85,7 +81,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
|
|||
t2.setPositionIncrement(2); // Count funny tokens with more than one increment
|
||||
int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them
|
||||
Token[] tokens = new Token[] {t1, t2, t3};
|
||||
Collections.shuffle(Arrays.asList(tokens), getRandom());
|
||||
Collections.shuffle(Arrays.asList(tokens), random());
|
||||
final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens);
|
||||
// TODO: we have no CannedAnalyzer?
|
||||
Analyzer analyzer = new Analyzer() {
|
||||
|
|
|
@ -479,7 +479,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase {
|
|||
.startObject("my_field").field("type", "string").startObject("fields").startObject(MY_MULTI_FIELD)
|
||||
.field("type", "string").startObject("fielddata");
|
||||
String[] keys = possibleSettings.keySet().toArray(new String[]{});
|
||||
Collections.shuffle(Arrays.asList(keys));
|
||||
Collections.shuffle(Arrays.asList(keys), random());
|
||||
for(int i = randomIntBetween(0, possibleSettings.size()-1); i >= 0; --i)
|
||||
builder.field(keys[i], possibleSettings.get(keys[i]));
|
||||
builder.endObject().endObject().endObject().endObject().endObject().endObject().endObject();
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.source;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.compress.CompressorFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class CompressSourceMappingTests extends ESSingleNodeTestCase {
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
|
||||
|
||||
public void testCompressDisabled() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_source").field("compress", false).endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
|
||||
|
||||
ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.field("field2", "value2")
|
||||
.endObject().bytes());
|
||||
BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false));
|
||||
}
|
||||
|
||||
public void testCompressEnabled() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_source").field("compress", true).endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
|
||||
|
||||
ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.field("field2", "value2")
|
||||
.endObject().bytes());
|
||||
|
||||
BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
|
||||
}
|
||||
|
||||
public void testCompressThreshold() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_source").field("compress_threshold", "200b").endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping);
|
||||
|
||||
ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.endObject().bytes());
|
||||
|
||||
BytesRef bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false));
|
||||
|
||||
doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field1", "value1")
|
||||
.field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
|
||||
.field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
|
||||
.field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
|
||||
.field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz")
|
||||
.endObject().bytes());
|
||||
|
||||
bytes = doc.rootDoc().getBinaryValue("_source");
|
||||
assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true));
|
||||
}
|
||||
}
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.mapper.*;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -63,51 +64,16 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase {
|
|||
assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE));
|
||||
}
|
||||
|
||||
public void testJsonFormat() throws Exception {
|
||||
public void testFormatBackCompat() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_source").field("format", "json").endObject()
|
||||
.endObject().endObject().string();
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0))
|
||||
.build();
|
||||
|
||||
DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser();
|
||||
DocumentMapper documentMapper = parser.parse(mapping);
|
||||
ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject().bytes());
|
||||
|
||||
assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
|
||||
|
||||
documentMapper = parser.parse(mapping);
|
||||
doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject().bytes());
|
||||
|
||||
assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON));
|
||||
}
|
||||
|
||||
public void testJsonFormatCompressedBackcompat() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type")
|
||||
.startObject("_source").field("format", "json").field("compress", true).endObject()
|
||||
.endObject().endObject().string();
|
||||
|
||||
Settings backcompatSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build();
|
||||
DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser();
|
||||
DocumentMapper documentMapper = parser.parse(mapping);
|
||||
ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject().bytes());
|
||||
|
||||
assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
|
||||
byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
|
||||
assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
|
||||
|
||||
documentMapper = parser.parse(mapping);
|
||||
doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject().bytes());
|
||||
|
||||
assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true));
|
||||
uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes();
|
||||
assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON));
|
||||
DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser();
|
||||
parser.parse(mapping); // no exception
|
||||
}
|
||||
|
||||
public void testIncludes() throws Exception {
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.query;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class MissingQueryBuilderTests extends AbstractQueryTestCase<MissingQueryBuilder> {
|
||||
|
||||
@Override
|
||||
protected MissingQueryBuilder doCreateTestQueryBuilder() {
|
||||
String fieldName = randomBoolean() ? randomFrom(MAPPED_FIELD_NAMES) : randomAsciiOfLengthBetween(1, 10);
|
||||
Boolean existence = randomBoolean();
|
||||
Boolean nullValue = randomBoolean();
|
||||
if (existence == false && nullValue == false) {
|
||||
if (randomBoolean()) {
|
||||
existence = true;
|
||||
} else {
|
||||
nullValue = true;
|
||||
}
|
||||
}
|
||||
return new MissingQueryBuilder(fieldName, nullValue, existence);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doAssertLuceneQuery(MissingQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException {
|
||||
// too many mapping dependent cases to test, we don't want to end up
|
||||
// duplication the toQuery method
|
||||
}
|
||||
|
||||
public void testIllegalArguments() {
|
||||
try {
|
||||
if (randomBoolean()) {
|
||||
new MissingQueryBuilder("", true, true);
|
||||
} else {
|
||||
new MissingQueryBuilder(null, true, true);
|
||||
}
|
||||
fail("must not be null or empty");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
new MissingQueryBuilder("fieldname", false, false);
|
||||
fail("existence and nullValue cannot both be false");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
new MissingQueryBuilder("fieldname", MissingQueryBuilder.DEFAULT_NULL_VALUE, false);
|
||||
fail("existence and nullValue cannot both be false");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testBothNullValueAndExistenceFalse() throws IOException {
|
||||
QueryShardContext context = createShardContext();
|
||||
context.setAllowUnmappedFields(true);
|
||||
try {
|
||||
MissingQueryBuilder.newFilter(context, "field", false, false);
|
||||
fail("Expected QueryShardException");
|
||||
} catch (QueryShardException e) {
|
||||
assertThat(e.getMessage(), containsString("missing must have either existence, or null_value"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testFromJson() throws IOException {
|
||||
String json =
|
||||
"{\n" +
|
||||
" \"missing\" : {\n" +
|
||||
" \"field\" : \"user\",\n" +
|
||||
" \"null_value\" : false,\n" +
|
||||
" \"existence\" : true,\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
MissingQueryBuilder parsed = (MissingQueryBuilder) parseQuery(json);
|
||||
checkGeneratedJson(json, parsed);
|
||||
|
||||
assertEquals(json, false, parsed.nullValue());
|
||||
assertEquals(json, true, parsed.existence());
|
||||
assertEquals(json, "user", parsed.fieldPattern());
|
||||
}
|
||||
}
|
|
@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.indicesQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.missingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.nestedQuery;
|
||||
|
@ -240,10 +239,6 @@ public class QueryDSLDocumentationTests extends ESTestCase {
|
|||
matchQuery("name", "kimchy elasticsearch");
|
||||
}
|
||||
|
||||
public void testMissing() {
|
||||
missingQuery("user", true, true);
|
||||
}
|
||||
|
||||
public void testMLT() {
|
||||
String[] fields = {"name.first", "name.last"};
|
||||
String[] texts = {"text like this one"};
|
||||
|
|
|
@ -23,7 +23,9 @@ import org.apache.lucene.search.NumericRangeQuery;
|
|||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermRangeQuery;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.hamcrest.core.IsEqual;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -353,4 +355,42 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase<RangeQueryBuil
|
|||
assertEquals(json, "2015-01-01 00:00:00", parsed.from());
|
||||
assertEquals(json, "now", parsed.to());
|
||||
}
|
||||
|
||||
public void testNamedQueryParsing() throws IOException {
|
||||
String json =
|
||||
"{\n" +
|
||||
" \"range\" : {\n" +
|
||||
" \"timestamp\" : {\n" +
|
||||
" \"from\" : \"2015-01-01 00:00:00\",\n" +
|
||||
" \"to\" : \"now\",\n" +
|
||||
" \"boost\" : 1.0,\n" +
|
||||
" \"_name\" : \"my_range\"\n" +
|
||||
" }\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
assertNotNull(parseQuery(json));
|
||||
|
||||
json =
|
||||
"{\n" +
|
||||
" \"range\" : {\n" +
|
||||
" \"timestamp\" : {\n" +
|
||||
" \"from\" : \"2015-01-01 00:00:00\",\n" +
|
||||
" \"to\" : \"now\",\n" +
|
||||
" \"boost\" : 1.0\n" +
|
||||
" },\n" +
|
||||
" \"_name\" : \"my_range\"\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
// non strict parsing should accept "_name" on top level
|
||||
assertNotNull(parseQuery(json, ParseFieldMatcher.EMPTY));
|
||||
|
||||
// with strict parsing, ParseField will throw exception
|
||||
try {
|
||||
parseQuery(json, ParseFieldMatcher.STRICT);
|
||||
fail("Strict parsing should trigger exception for '_name' on top level");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Deprecated field [_name] used, replaced by [query name is not supported in short version of range query]"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,26 +19,21 @@
|
|||
package org.elasticsearch.index.store;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
import org.apache.lucene.index.CheckIndex;
|
||||
import org.apache.lucene.index.IndexFileNames;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -48,7 +43,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.shard.*;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
|
||||
|
@ -75,14 +69,7 @@ import java.nio.charset.StandardCharsets;
|
|||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -91,16 +78,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||
import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE)
|
||||
public class CorruptedFileIT extends ESIntegTestCase {
|
||||
|
@ -320,7 +299,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
|
||||
Collections.shuffle(dataNodeStats, getRandom());
|
||||
Collections.shuffle(dataNodeStats, random());
|
||||
NodeStats primariesNode = dataNodeStats.get(0);
|
||||
NodeStats unluckyNode = dataNodeStats.get(1);
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
|
@ -380,7 +359,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
|
||||
Collections.shuffle(dataNodeStats, getRandom());
|
||||
Collections.shuffle(dataNodeStats, random());
|
||||
NodeStats primariesNode = dataNodeStats.get(0);
|
||||
NodeStats unluckyNode = dataNodeStats.get(1);
|
||||
|
||||
|
|
|
@ -138,21 +138,25 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
private void slowDownRecovery(ByteSizeValue shardSize) {
|
||||
long chunkSize = shardSize.bytes() / 10;
|
||||
long chunkSize = Math.max(1, shardSize.bytes() / 10);
|
||||
for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) {
|
||||
setChunkSize(settings, new ByteSizeValue(chunkSize, ByteSizeUnit.BYTES));
|
||||
}
|
||||
assertTrue(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
// one chunk per sec..
|
||||
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES)
|
||||
.put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, chunkSize, ByteSizeUnit.BYTES)
|
||||
)
|
||||
.get().isAcknowledged());
|
||||
}
|
||||
|
||||
private void restoreRecoverySpeed() {
|
||||
for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) {
|
||||
setChunkSize(settings, RecoverySettings.DEFAULT_CHUNK_SIZE);
|
||||
}
|
||||
assertTrue(client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder()
|
||||
.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb")
|
||||
.put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, "512kb")
|
||||
)
|
||||
.get().isAcknowledged());
|
||||
}
|
||||
|
@ -631,4 +635,8 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
transport.sendRequest(node, requestId, action, request, options);
|
||||
}
|
||||
}
|
||||
|
||||
public static void setChunkSize(RecoverySettings recoverySettings, ByteSizeValue chunksSize) {
|
||||
recoverySettings.setChunkSize(chunksSize);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,13 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.File;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.Index;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.Stage;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.Timer;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.Translog;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.Type;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.VerifyIndex;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState.*;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -43,14 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.Matchers.arrayContainingInAnyOrder;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
public class RecoveryStateTests extends ESTestCase {
|
||||
abstract class Streamer<T extends Streamable> extends Thread {
|
||||
|
@ -201,7 +188,7 @@ public class RecoveryStateTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
Collections.shuffle(Arrays.asList(files));
|
||||
Collections.shuffle(Arrays.asList(files), random());
|
||||
final RecoveryState.Index index = new RecoveryState.Index();
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
|
|
@ -560,7 +560,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||
.setOrder(0)
|
||||
.addAlias(new Alias("alias1"))
|
||||
.addAlias(new Alias("{index}-alias"))
|
||||
.addAlias(new Alias("alias3").filter(QueryBuilders.missingQuery("test")))
|
||||
.addAlias(new Alias("alias3").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("test"))))
|
||||
.addAlias(new Alias("alias4")).get();
|
||||
|
||||
client().admin().indices().preparePutTemplate("template2")
|
||||
|
|
|
@ -32,24 +32,6 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testAllSettingsAreDynamicallyUpdatable() {
|
||||
innerTestSettings(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() {
|
||||
@Override
|
||||
public void validate(RecoverySettings recoverySettings, int expectedValue) {
|
||||
assertEquals(expectedValue, recoverySettings.fileChunkSize().bytesAsInt());
|
||||
}
|
||||
});
|
||||
innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_OPS, randomIntBetween(1, 200), new Validator() {
|
||||
@Override
|
||||
public void validate(RecoverySettings recoverySettings, int expectedValue) {
|
||||
assertEquals(expectedValue, recoverySettings.translogOps());
|
||||
}
|
||||
});
|
||||
innerTestSettings(RecoverySettings.INDICES_RECOVERY_TRANSLOG_SIZE, randomIntBetween(1, 200), ByteSizeUnit.BYTES, new Validator() {
|
||||
@Override
|
||||
public void validate(RecoverySettings recoverySettings, int expectedValue) {
|
||||
assertEquals(expectedValue, recoverySettings.translogSize().bytesAsInt());
|
||||
}
|
||||
});
|
||||
innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() {
|
||||
@Override
|
||||
public void validate(RecoverySettings recoverySettings, int expectedValue) {
|
||||
|
@ -98,13 +80,6 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase {
|
|||
assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis());
|
||||
}
|
||||
});
|
||||
|
||||
innerTestSettings(RecoverySettings.INDICES_RECOVERY_COMPRESS, false, new Validator() {
|
||||
@Override
|
||||
public void validate(RecoverySettings recoverySettings, boolean expectedValue) {
|
||||
assertEquals(expectedValue, recoverySettings.compress());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static class Validator {
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.indices.recovery.IndexRecoveryIT;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
|
@ -58,13 +58,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
|||
@ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0, scope = ESIntegTestCase.Scope.TEST)
|
||||
@SuppressCodecs("*") // test relies on exact file extensions
|
||||
public class TruncatedRecoveryIT extends ESIntegTestCase {
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder builder = Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(RecoverySettings.INDICES_RECOVERY_FILE_CHUNK_SIZE, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES));
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
|
@ -78,6 +71,10 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
* Later we allow full recovery to ensure we can still recover and don't run into corruptions.
|
||||
*/
|
||||
public void testCancelRecoveryAndResume() throws Exception {
|
||||
for(RecoverySettings settings : internalCluster().getInstances(RecoverySettings.class)) {
|
||||
IndexRecoveryIT.setChunkSize(settings, new ByteSizeValue(randomIntBetween(50, 300), ByteSizeUnit.BYTES));
|
||||
}
|
||||
|
||||
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
|
||||
List<NodeStats> dataNodeStats = new ArrayList<>();
|
||||
for (NodeStats stat : nodeStats.getNodes()) {
|
||||
|
@ -86,7 +83,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
|
||||
Collections.shuffle(dataNodeStats, getRandom());
|
||||
Collections.shuffle(dataNodeStats, random());
|
||||
// we use 2 nodes a lucky and unlucky one
|
||||
// the lucky one holds the primary
|
||||
// the unlucky one gets the replica and the truncated leftovers
|
||||
|
|
|
@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.matchPhrasePrefixQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.missingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
|
@ -67,6 +66,7 @@ import static org.elasticsearch.index.query.QueryBuilders.regexpQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.typeQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.existsQuery;
|
||||
import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight;
|
||||
import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
|
||||
logger.info("--> highlighting and searching on field1");
|
||||
SearchSourceBuilder source = searchSource().query(boolQuery()
|
||||
.should(constantScoreQuery(QueryBuilders.missingQuery("field1")))
|
||||
.should(boolQuery().mustNot(QueryBuilders.existsQuery("field1")))
|
||||
.should(matchQuery("field1", "test"))
|
||||
.should(constantScoreQuery(queryStringQuery("field1:photo*"))))
|
||||
.highlighter(highlight().field("field1"));
|
||||
|
@ -2501,7 +2501,9 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
refresh();
|
||||
|
||||
logger.info("--> highlighting and searching on field1");
|
||||
SearchSourceBuilder source = searchSource().query(boolQuery().must(queryStringQuery("field1:photo*")).filter(missingQuery("field_null")))
|
||||
SearchSourceBuilder source = searchSource().query(boolQuery()
|
||||
.must(queryStringQuery("field1:photo*"))
|
||||
.mustNot(existsQuery("field_null")))
|
||||
.highlighter(highlight().field("field1"));
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get();
|
||||
assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The <em>photography</em> word will get highlighted"));
|
||||
|
|
|
@ -43,7 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC
|
|||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
|
||||
public class ExistsMissingIT extends ESIntegTestCase {
|
||||
public class ExistsIT extends ESIntegTestCase {
|
||||
|
||||
// TODO: move this to a unit test somewhere...
|
||||
public void testEmptyIndex() throws Exception {
|
||||
|
@ -51,11 +51,11 @@ public class ExistsMissingIT extends ESIntegTestCase {
|
|||
ensureYellow("test");
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("foo")).execute().actionGet();
|
||||
assertSearchResponse(resp);
|
||||
resp = client().prepareSearch("test").setQuery(QueryBuilders.missingQuery("foo")).execute().actionGet();
|
||||
resp = client().prepareSearch("test").setQuery(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("foo"))).execute().actionGet();
|
||||
assertSearchResponse(resp);
|
||||
}
|
||||
|
||||
public void testExistsMissing() throws Exception {
|
||||
public void testExists() throws Exception {
|
||||
XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent)
|
||||
.startObject()
|
||||
.startObject("type")
|
||||
|
@ -145,62 +145,6 @@ public class ExistsMissingIT extends ESIntegTestCase {
|
|||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
// missing
|
||||
resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery(fieldName)).execute().actionGet();
|
||||
assertSearchResponse(resp);
|
||||
assertEquals(String.format(Locale.ROOT, "missing(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), numDocs - count, resp.getHits().totalHits());
|
||||
}
|
||||
}
|
||||
|
||||
public void testNullValueUnset() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed"));
|
||||
indexRandom(true,
|
||||
client().prepareIndex("idx", "type", "1").setSource("f", "foo"),
|
||||
client().prepareIndex("idx", "type", "2").setSource("f", null),
|
||||
client().prepareIndex("idx", "type", "3").setSource("g", "bar"),
|
||||
client().prepareIndex("idx", "type", "4").setSource("f", "bar"));
|
||||
|
||||
SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get();
|
||||
assertSearchHits(resp, "2", "3");
|
||||
|
||||
resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get();
|
||||
assertSearchHits(resp, "2", "3");
|
||||
|
||||
resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get();
|
||||
assertSearchHits(resp);
|
||||
|
||||
try {
|
||||
client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get();
|
||||
fail("both existence and null_value can't be false");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
public void testNullValueSet() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed,null_value=bar"));
|
||||
indexRandom(true,
|
||||
client().prepareIndex("idx", "type", "1").setSource("f", "foo"),
|
||||
client().prepareIndex("idx", "type", "2").setSource("f", null),
|
||||
client().prepareIndex("idx", "type", "3").setSource("g", "bar"),
|
||||
client().prepareIndex("idx", "type", "4").setSource("f", "bar"));
|
||||
|
||||
SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get();
|
||||
assertSearchHits(resp, "2", "3", "4");
|
||||
|
||||
resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get();
|
||||
assertSearchHits(resp, "3");
|
||||
|
||||
resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get();
|
||||
assertSearchHits(resp, "2", "4");
|
||||
|
||||
try {
|
||||
client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get();
|
||||
fail("both existence and null_value can't be false");
|
||||
} catch (IllegalArgumentException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -70,7 +70,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery;
|
|||
import static org.elasticsearch.index.query.QueryBuilders.indicesQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.missingQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.prefixQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
|
@ -805,32 +804,6 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
searchResponse = client().prepareSearch().setQuery(existsQuery("obj1")).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "1", "2");
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingQuery("field1"))).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(queryStringQuery("_missing_:field1")).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
// wildcard check
|
||||
searchResponse = client().prepareSearch().setQuery(missingQuery("x*")).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
|
||||
// object check
|
||||
searchResponse = client().prepareSearch().setQuery(missingQuery("obj1")).get();
|
||||
assertHitCount(searchResponse, 2l);
|
||||
assertSearchHits(searchResponse, "3", "4");
|
||||
}
|
||||
|
||||
public void testPassQueryOrFilterAsJSONString() throws Exception {
|
||||
|
|
|
@ -942,13 +942,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase {
|
|||
|
||||
}
|
||||
public void assertSuggestions(String suggestion, String... suggestions) {
|
||||
String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
|
||||
String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10);
|
||||
CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10);
|
||||
assertSuggestions(suggestionName, suggestionBuilder, suggestions);
|
||||
}
|
||||
|
||||
public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) {
|
||||
String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10);
|
||||
String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10);
|
||||
SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion(
|
||||
SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10)
|
||||
).execute().actionGet();
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
[[java-query-dsl-missing-query]]
|
||||
==== Missing Query
|
||||
|
||||
See {ref}/query-dsl-missing-query.html[Missing Query]
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
QueryBuilder qb = missingQuery("user", <1>
|
||||
true, <2>
|
||||
true); <3>
|
||||
--------------------------------------------------
|
||||
<1> field
|
||||
<2> find missing field with an explicit `null` value
|
||||
<3> find missing field that doesn’t exist
|
|
@ -30,11 +30,6 @@ The queries in this group are:
|
|||
|
||||
Find documents where the field specified contains any non-null value.
|
||||
|
||||
<<java-query-dsl-missing-query,`missing` query>>::
|
||||
|
||||
Find documents where the field specified does is missing or contains only
|
||||
`null` values.
|
||||
|
||||
<<java-query-dsl-prefix-query,`prefix` query>>::
|
||||
|
||||
Find documents where the field specified contains terms which being with
|
||||
|
@ -75,8 +70,6 @@ include::range-query.asciidoc[]
|
|||
|
||||
include::exists-query.asciidoc[]
|
||||
|
||||
include::missing-query.asciidoc[]
|
||||
|
||||
include::prefix-query.asciidoc[]
|
||||
|
||||
include::wildcard-query.asciidoc[]
|
||||
|
@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[]
|
|||
include::type-query.asciidoc[]
|
||||
|
||||
include::ids-query.asciidoc[]
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -3,9 +3,8 @@
|
|||
|
||||
The `_field_names` field indexes the names of every field in a document that
|
||||
contains any value other than `null`. This field is used by the
|
||||
<<query-dsl-exists-query,`exists`>> and <<query-dsl-missing-query,`missing`>>
|
||||
queries to find documents that either have or don't have any non-+null+ value
|
||||
for a particular field.
|
||||
<<query-dsl-exists-query,`exists`>> query to find documents that
|
||||
either have or don't have any non-+null+ value for a particular field.
|
||||
|
||||
The value of the `_field_name` field is accessible in queries, aggregations, and
|
||||
scripts:
|
||||
|
@ -49,7 +48,6 @@ GET my_index/_search
|
|||
--------------------------
|
||||
// AUTOSENSE
|
||||
|
||||
<1> Querying on the `_field_names` field (also see the <<query-dsl-exists-query,`exists`>> and <<query-dsl-missing-query,`missing`>> queries)
|
||||
<1> Querying on the `_field_names` field (also see the <<query-dsl-exists-query,`exists`>> query)
|
||||
<2> Aggregating on the `_field_names` field
|
||||
<3> Accessing the `_field_names` field in scripts (inline scripts must be <<enable-dynamic-scripting,enabled>> for this example to work)
|
||||
|
||||
|
|
|
@ -53,7 +53,3 @@ IMPORTANT: The `null_value` needs to be the same datatype as the field. For
|
|||
instance, a `long` field cannot have a string `null_value`. String fields
|
||||
which are `analyzed` will also pass the `null_value` through the configured
|
||||
analyzer.
|
||||
|
||||
Also see the <<query-dsl-missing-query,`missing` query>> for its `null_value` support.
|
||||
|
||||
|
||||
|
|
|
@ -17,13 +17,13 @@ The geo_shape mapping maps geo_json geometry objects to the geo_shape
|
|||
type. To enable it, users must explicitly map fields to the geo_shape
|
||||
type.
|
||||
|
||||
[cols="<,<",options="header",]
|
||||
[cols="<,<,<",options="header",]
|
||||
|=======================================================================
|
||||
|Option |Description
|
||||
|Option |Description| Default
|
||||
|
||||
|`tree` |Name of the PrefixTree implementation to be used: `geohash` for
|
||||
GeohashPrefixTree and `quadtree` for QuadPrefixTree. Defaults to
|
||||
`geohash`.
|
||||
GeohashPrefixTree and `quadtree` for QuadPrefixTree.
|
||||
| `geohash`
|
||||
|
||||
|`precision` |This parameter may be used instead of `tree_levels` to set
|
||||
an appropriate value for the `tree_levels` parameter. The value
|
||||
|
@ -31,7 +31,8 @@ specifies the desired precision and Elasticsearch will calculate the
|
|||
best tree_levels value to honor this precision. The value should be a
|
||||
number followed by an optional distance unit. Valid distance units
|
||||
include: `in`, `inch`, `yd`, `yard`, `mi`, `miles`, `km`, `kilometers`,
|
||||
`m`,`meters` (default), `cm`,`centimeters`, `mm`, `millimeters`.
|
||||
`m`,`meters`, `cm`,`centimeters`, `mm`, `millimeters`.
|
||||
| `meters`
|
||||
|
||||
|`tree_levels` |Maximum number of layers to be used by the PrefixTree.
|
||||
This can be used to control the precision of shape representations and
|
||||
|
@ -41,27 +42,40 @@ certain level of understanding of the underlying implementation, users
|
|||
may use the `precision` parameter instead. However, Elasticsearch only
|
||||
uses the tree_levels parameter internally and this is what is returned
|
||||
via the mapping API even if you use the precision parameter.
|
||||
| `50m`
|
||||
|
||||
|`strategy` |The strategy parameter defines the approach for how to
|
||||
represent shapes at indexing and search time. It also influences the
|
||||
capabilities available so it is recommended to let Elasticsearch set
|
||||
this parameter automatically. There are two strategies available:
|
||||
`recursive` and `term`. Term strategy supports point types only (the
|
||||
`points_only` parameter will be automatically set to true) while
|
||||
Recursive strategy supports all shape types. (IMPORTANT: see
|
||||
<<prefix-trees, Prefix trees>> for more detailed information)
|
||||
| `recursive`
|
||||
|
||||
|`distance_error_pct` |Used as a hint to the PrefixTree about how
|
||||
precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum
|
||||
supported value. PERFORMANCE NOTE: This value will be default to 0 if a `precision` or
|
||||
supported value. PERFORMANCE NOTE: This value will default to 0 if a `precision` or
|
||||
`tree_level` definition is explicitly defined. This guarantees spatial precision
|
||||
at the level defined in the mapping. This can lead to significant memory usage
|
||||
for high resolution shapes with low error (e.g., large shapes at 1m with < 0.001 error).
|
||||
To improve indexing performance (at the cost of query accuracy) explicitly define
|
||||
`tree_level` or `precision` along with a reasonable `distance_error_pct`, noting
|
||||
that large shapes will have greater false positives.
|
||||
| `0.025`
|
||||
|
||||
|`orientation` |Optionally define how to interpret vertex order for
|
||||
polygons / multipolygons. This parameter defines one of two coordinate
|
||||
system rules (Right-hand or Left-hand) each of which can be specified in three
|
||||
different ways. 1. Right-hand rule (default): `right`, `ccw`, `counterclockwise`,
|
||||
different ways. 1. Right-hand rule: `right`, `ccw`, `counterclockwise`,
|
||||
2. Left-hand rule: `left`, `cw`, `clockwise`. The default orientation
|
||||
(`counterclockwise`) complies with the OGC standard which defines
|
||||
outer ring vertices in counterclockwise order with inner ring(s) vertices (holes)
|
||||
in clockwise order. Setting this parameter in the geo_shape mapping explicitly
|
||||
sets vertex order for the coordinate list of a geo_shape field but can be
|
||||
overridden in each individual GeoJSON document.
|
||||
| `ccw`
|
||||
|
||||
|`points_only` |Setting this option to `true` (defaults to `false`) configures
|
||||
the `geo_shape` field type for point shapes only (NOTE: Multi-Points are not
|
||||
|
@ -70,18 +84,21 @@ yet supported). This optimizes index and search performance for the `geohash` an
|
|||
queries can not be executed on `geo_point` field types. This option bridges the gap
|
||||
by improving point performance on a `geo_shape` field so that `geo_shape` queries are
|
||||
optimal on a point only field.
|
||||
| `false`
|
||||
|
||||
|
||||
|=======================================================================
|
||||
|
||||
[[prefix-trees]]
|
||||
[float]
|
||||
==== Prefix trees
|
||||
|
||||
To efficiently represent shapes in the index, Shapes are converted into
|
||||
a series of hashes representing grid squares using implementations of a
|
||||
PrefixTree. The tree notion comes from the fact that the PrefixTree uses
|
||||
multiple grid layers, each with an increasing level of precision to
|
||||
represent the Earth.
|
||||
a series of hashes representing grid squares (commonly referred to as "rasters")
|
||||
using implementations of a PrefixTree. The tree notion comes from the fact that
|
||||
the PrefixTree uses multiple grid layers, each with an increasing level of
|
||||
precision to represent the Earth. This can be thought of as increasing the level
|
||||
of detail of a map or image at higher zoom levels.
|
||||
|
||||
Multiple PrefixTree implementations are provided:
|
||||
|
||||
|
@ -100,6 +117,29 @@ longitude the resulting hash is a bit set. A tree level in a quad tree
|
|||
represents 2 bits in this bit set, one for each coordinate. The maximum
|
||||
amount of levels for the quad trees in Elasticsearch is 50.
|
||||
|
||||
[[spatial-strategy]]
|
||||
[float]
|
||||
===== Spatial strategies
|
||||
The PrefixTree implementations rely on a SpatialStrategy for decomposing
|
||||
the provided Shape(s) into approximated grid squares. Each strategy answers
|
||||
the following:
|
||||
|
||||
* What type of Shapes can be indexed?
|
||||
* What types of Query Operations and Shapes can be used?
|
||||
* Does it support more than one Shape per field?
|
||||
|
||||
The following Strategy implementations (with corresponding capabilities)
|
||||
are provided:
|
||||
|
||||
[cols="<,<,<,<",options="header",]
|
||||
|=======================================================================
|
||||
|Strategy |Supported Shapes |Supported Queries |Multiple Shapes
|
||||
|
||||
|`recursive` |<<input-structure, All>> |`INTERSECTS`, `DISJOINT`, `WITHIN`, `CONTAINS` |Yes
|
||||
|`term` |<<point, Points>> |`INTERSECTS` |Yes
|
||||
|
||||
|=======================================================================
|
||||
|
||||
[float]
|
||||
===== Accuracy
|
||||
|
||||
|
@ -149,6 +189,7 @@ between index size and a reasonable level of precision of 50m at the
|
|||
equator. This allows for indexing tens of millions of shapes without
|
||||
overly bloating the resulting index too much relative to the input size.
|
||||
|
||||
[[input-structure]]
|
||||
[float]
|
||||
==== Input Structure
|
||||
|
||||
|
@ -189,6 +230,7 @@ differs from many Geospatial APIs (e.g., Google Maps) that generally
|
|||
use the colloquial latitude, longitude (Y, X).
|
||||
=============================================
|
||||
|
||||
[[point]]
|
||||
[float]
|
||||
===== http://geojson.org/geojson-spec.html#id2[Point]
|
||||
|
||||
|
|
|
@ -213,6 +213,13 @@ float by default instead of a double. The reasoning is that floats should be
|
|||
more than enough for most cases but would decrease storage requirements
|
||||
significantly.
|
||||
|
||||
==== `_source`'s `format` option
|
||||
|
||||
The `_source` mapping does not support the `format` option anymore. This option
|
||||
will still be accepted for indices created before the upgrade to 3.0 for backward
|
||||
compatibility, but it will have no effect. Indices created on or after 3.0 will
|
||||
reject this option.
|
||||
|
||||
[[breaking_30_plugins]]
|
||||
=== Plugin changes
|
||||
|
||||
|
@ -427,9 +434,9 @@ Use the `field(String, float)` method instead.
|
|||
|
||||
==== MissingQueryBuilder
|
||||
|
||||
The two individual setters for existence() and nullValue() were removed in favour of
|
||||
optional constructor settings in order to better capture and validate their interdependent
|
||||
settings at construction time.
|
||||
The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder
|
||||
inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use
|
||||
`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`.
|
||||
|
||||
==== NotQueryBuilder
|
||||
|
||||
|
@ -507,3 +514,4 @@ from `OsStats.Cpu#getPercent`.
|
|||
=== Fields option
|
||||
Only stored fields are retrievable with this option.
|
||||
The fields option won't be able to load non stored fields from _source anymore.
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ These documents would *not* match the above query:
|
|||
<3> The `user` field is missing completely.
|
||||
|
||||
[float]
|
||||
===== `null_value` mapping
|
||||
==== `null_value` mapping
|
||||
|
||||
If the field mapping includes the <<null-value,`null_value`>> setting
|
||||
then explicit `null` values are replaced with the specified `null_value`. For
|
||||
|
@ -70,3 +70,21 @@ no values in the `user` field and thus would not match the `exists` filter:
|
|||
{ "foo": "bar" }
|
||||
--------------------------------------------------
|
||||
|
||||
==== `missing` query
|
||||
|
||||
'missing' query has been removed because it can be advantageously replaced by an `exists` query inside a must_not
|
||||
clause as follows:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"bool": {
|
||||
"must_not": {
|
||||
"exists": {
|
||||
"field": "user"
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
This query returns documents that have no value in the user field.
|
||||
|
||||
|
|
|
@ -104,7 +104,10 @@ shape:
|
|||
|
||||
==== Spatial Relations
|
||||
|
||||
The Query supports the following spatial relations:
|
||||
The <<spatial-strategy, geo_shape strategy>> mapping parameter determines
|
||||
which spatial relation operators may be used at search time.
|
||||
|
||||
The following is a complete list of spatial relation operators available:
|
||||
|
||||
* `INTERSECTS` - (default) Return all documents whose `geo_shape` field
|
||||
intersects the query geometry.
|
||||
|
|
|
@ -1,132 +0,0 @@
|
|||
[[query-dsl-missing-query]]
|
||||
=== Missing Query
|
||||
|
||||
Returns documents that have only `null` values or no value in the original field:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"constant_score" : {
|
||||
"filter" : {
|
||||
"missing" : { "field" : "user" }
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
For instance, the following docs would match the above filter:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "user": null }
|
||||
{ "user": [] } <1>
|
||||
{ "user": [null] } <2>
|
||||
{ "foo": "bar" } <3>
|
||||
--------------------------------------------------
|
||||
<1> This field has no values.
|
||||
<2> This field has no non-`null` values.
|
||||
<3> The `user` field is missing completely.
|
||||
|
||||
These documents would *not* match the above filter:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "user": "jane" }
|
||||
{ "user": "" } <1>
|
||||
{ "user": "-" } <2>
|
||||
{ "user": ["jane"] }
|
||||
{ "user": ["jane", null ] } <3>
|
||||
--------------------------------------------------
|
||||
<1> An empty string is a non-`null` value.
|
||||
<2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`.
|
||||
<3> This field has one non-`null` value.
|
||||
|
||||
[float]
|
||||
==== `null_value` mapping
|
||||
|
||||
If the field mapping includes a <<null-value,`null_value`>> then explicit `null` values
|
||||
are replaced with the specified `null_value`. For instance, if the `user` field were mapped
|
||||
as follows:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
"user": {
|
||||
"type": "string",
|
||||
"null_value": "_null_"
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
then explicit `null` values would be indexed as the string `_null_`, and the
|
||||
the following docs would *not* match the `missing` filter:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "user": null }
|
||||
{ "user": [null] }
|
||||
--------------------------------------------------
|
||||
|
||||
However, these docs--without explicit `null` values--would still have
|
||||
no values in the `user` field and thus would match the `missing` filter:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "user": [] }
|
||||
{ "foo": "bar" }
|
||||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
===== `existence` and `null_value` parameters
|
||||
|
||||
When the field being queried has a `null_value` mapping, then the behaviour of
|
||||
the `missing` filter can be altered with the `existence` and `null_value`
|
||||
parameters:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{
|
||||
"constant_score" : {
|
||||
"filter" : {
|
||||
"missing" : {
|
||||
"field" : "user",
|
||||
"existence" : true,
|
||||
"null_value" : false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
|
||||
`existence`::
|
||||
+
|
||||
--
|
||||
When the `existence` parameter is set to `true` (the default), the missing
|
||||
filter will include documents where the field has *no* values, ie:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "user": [] }
|
||||
{ "foo": "bar" }
|
||||
--------------------------------------------------
|
||||
|
||||
When set to `false`, these documents will not be included.
|
||||
--
|
||||
|
||||
`null_value`::
|
||||
+
|
||||
--
|
||||
When the `null_value` parameter is set to `true`, the missing
|
||||
filter will include documents where the field contains a `null` value, ie:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
{ "user": null }
|
||||
{ "user": [null] }
|
||||
{ "user": ["jane",null] } <1>
|
||||
--------------------------------------------------
|
||||
<1> Matches because the field contains a `null` value, even though it also contains a non-`null` value.
|
||||
|
||||
When set to `false` (the default), these documents will not be included.
|
||||
--
|
||||
|
||||
NOTE: Either `existence` or `null_value` or both must be set to `true`.
|
|
@ -30,11 +30,6 @@ The queries in this group are:
|
|||
|
||||
Find documents where the field specified contains any non-null value.
|
||||
|
||||
<<query-dsl-missing-query,`missing` query>>::
|
||||
|
||||
Find documents where the field specified does is missing or contains only
|
||||
`null` values.
|
||||
|
||||
<<query-dsl-prefix-query,`prefix` query>>::
|
||||
|
||||
Find documents where the field specified contains terms which being with
|
||||
|
@ -75,8 +70,6 @@ include::range-query.asciidoc[]
|
|||
|
||||
include::exists-query.asciidoc[]
|
||||
|
||||
include::missing-query.asciidoc[]
|
||||
|
||||
include::prefix-query.asciidoc[]
|
||||
|
||||
include::wildcard-query.asciidoc[]
|
||||
|
@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[]
|
|||
include::type-query.asciidoc[]
|
||||
|
||||
include::ids-query.asciidoc[]
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -96,14 +96,6 @@ The `exists` filter has been replaced by the <<query-dsl-exists-query>>. It beh
|
|||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
[role="exclude",id="query-dsl-missing-filter"]
|
||||
=== Missing Filter
|
||||
|
||||
The `missing` filter has been replaced by the <<query-dsl-missing-query>>. It behaves
|
||||
as a query in ``query context'' and as a filter in ``filter context'' (see
|
||||
<<query-dsl>>).
|
||||
|
||||
|
||||
[role="exclude",id="query-dsl-geo-bounding-box-filter"]
|
||||
=== Geo Bounding Box Filter
|
||||
|
||||
|
@ -451,4 +443,3 @@ The `not` query has been replaced by using a `mustNot` clause in a Boolean query
|
|||
=== Nested type
|
||||
|
||||
The docs for the `nested` field datatype have moved to <<nested>>.
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ PUT music/song/1?refresh=true
|
|||
The following parameters are supported:
|
||||
|
||||
`input`::
|
||||
The input to store, this can be a an array of strings or just
|
||||
The input to store, this can be an array of strings or just
|
||||
a string. This field is mandatory.
|
||||
|
||||
`weight`::
|
||||
|
|
|
@ -48,58 +48,24 @@ import org.elasticsearch.script.groovy.GroovyPlugin;
|
|||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.GeoDistanceSortBuilder;
|
||||
import org.elasticsearch.search.sort.ScriptSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilders;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.sort.*;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.*;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.apache.lucene.util.GeoUtils.TOLERANCE;
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.*;
|
||||
import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction;
|
||||
import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId;
|
||||
import static org.hamcrest.Matchers.closeTo;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
import static org.apache.lucene.util.GeoUtils.TOLERANCE;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -503,7 +469,7 @@ public class SimpleSortTests extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testSimpleSorts() throws Exception {
|
||||
Random random = getRandom();
|
||||
Random random = random();
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties")
|
||||
.startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject()
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.cloud.aws.network.Ec2NameResolver;
|
||||
import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
|
@ -119,7 +120,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent<AwsEc2Service>
|
|||
}
|
||||
|
||||
// Increase the number of retries in case of 5xx API responses
|
||||
final Random rand = new Random();
|
||||
final Random rand = Randomness.get();
|
||||
RetryPolicy retryPolicy = new RetryPolicy(
|
||||
RetryPolicy.RetryCondition.NO_RETRY_CONDITION,
|
||||
new RetryPolicy.BackoffStrategy() {
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -57,8 +58,20 @@ public class TribeUnitTests extends ESTestCase {
|
|||
.put("node.mode", NODE_MODE)
|
||||
.put("path.home", createTempDir()).build();
|
||||
|
||||
tribe1 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe1").put("name", "tribe1_node").build()).start();
|
||||
tribe2 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe2").put("name", "tribe2_node").build()).start();
|
||||
tribe1 = new TribeClientNode(
|
||||
Settings.builder()
|
||||
.put(baseSettings)
|
||||
.put("cluster.name", "tribe1")
|
||||
.put("name", "tribe1_node")
|
||||
.put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong())
|
||||
.build()).start();
|
||||
tribe2 = new TribeClientNode(
|
||||
Settings.builder()
|
||||
.put(baseSettings)
|
||||
.put("cluster.name", "tribe2")
|
||||
.put("name", "tribe2_node")
|
||||
.put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong())
|
||||
.build()).start();
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -73,6 +86,8 @@ public class TribeUnitTests extends ESTestCase {
|
|||
System.setProperty("es.cluster.name", "tribe_node_cluster");
|
||||
System.setProperty("es.tribe.t1.cluster.name", "tribe1");
|
||||
System.setProperty("es.tribe.t2.cluster.name", "tribe2");
|
||||
System.setProperty("es.tribe.t1.discovery.id.seed", Long.toString(random().nextLong()));
|
||||
System.setProperty("es.tribe.t2.discovery.id.seed", Long.toString(random().nextLong()));
|
||||
|
||||
try {
|
||||
assertTribeNodeSuccesfullyCreated(Settings.EMPTY);
|
||||
|
@ -80,6 +95,8 @@ public class TribeUnitTests extends ESTestCase {
|
|||
System.clearProperty("es.cluster.name");
|
||||
System.clearProperty("es.tribe.t1.cluster.name");
|
||||
System.clearProperty("es.tribe.t2.cluster.name");
|
||||
System.clearProperty("es.tribe.t1.discovery.id.seed");
|
||||
System.clearProperty("es.tribe.t2.discovery.id.seed");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
cluster.name: tribe_node_cluster
|
||||
tribe.t1.cluster.name: tribe1
|
||||
tribe.t2.cluster.name: tribe2
|
||||
tribe.t2.cluster.name: tribe2
|
||||
tribe.t1.discovery.id.seed: 1
|
||||
tribe.t2.discovery.id.seed: 2
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.smoketest;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SuppressForbidden;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
|
@ -34,7 +35,10 @@ import org.junit.AfterClass;
|
|||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URL;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Locale;
|
||||
|
@ -103,20 +107,14 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase {
|
|||
return client;
|
||||
}
|
||||
|
||||
private static Client startClient() throws UnknownHostException {
|
||||
private static Client startClient() throws IOException {
|
||||
String[] stringAddresses = clusterAddresses.split(",");
|
||||
TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
|
||||
int i = 0;
|
||||
for (String stringAddress : stringAddresses) {
|
||||
String[] split = stringAddress.split(":");
|
||||
if (split.length < 2) {
|
||||
throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid");
|
||||
}
|
||||
try {
|
||||
transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1]));
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
|
||||
}
|
||||
URL url = new URL("http://" + stringAddress);
|
||||
InetAddress inetAddress = InetAddress.getByName(url.getHost());
|
||||
transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
|
||||
}
|
||||
return startClient(createTempDir(), transportAddresses);
|
||||
}
|
||||
|
@ -125,7 +123,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase {
|
|||
if (client == null) {
|
||||
try {
|
||||
client = startClient();
|
||||
} catch (UnknownHostException e) {
|
||||
} catch (IOException e) {
|
||||
logger.error("can not start the client", e);
|
||||
}
|
||||
assertThat(client, notNullValue());
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
@ -111,7 +112,7 @@ public abstract class ESAllocationTestCase extends ESTestCase {
|
|||
for (AllocationDecider d : list) {
|
||||
assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
|
||||
}
|
||||
Collections.shuffle(list, random);
|
||||
Randomness.shuffle(list);
|
||||
return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
|
||||
|
||||
}
|
||||
|
|
|
@ -131,16 +131,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
|
|||
return file;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings.Builder setRandomIndexSettings(Random random, Settings.Builder builder) {
|
||||
if (globalCompatibilityVersion().before(Version.V_1_3_2)) {
|
||||
// if we test against nodes before 1.3.2 we disable all the compression due to a known bug
|
||||
// see #7210
|
||||
builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retruns the tests compatibility version.
|
||||
*/
|
||||
|
@ -250,13 +240,6 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
|
|||
Settings.Builder builder = Settings.builder().put(requiredSettings());
|
||||
builder.put(TransportModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport / disco as external
|
||||
builder.put("node.mode", "network");
|
||||
|
||||
if (compatibilityVersion().before(Version.V_1_3_2)) {
|
||||
// if we test against nodes before 1.3.2 we disable all the compression due to a known bug
|
||||
// see #7210
|
||||
builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, false)
|
||||
.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, false);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
|||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.SuppressForbidden;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
|
@ -136,6 +137,8 @@ import java.lang.annotation.RetentionPolicy;
|
|||
import java.lang.annotation.Target;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URL;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
|
@ -533,7 +536,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception {
|
||||
return RandomizedContext.current().runWithPrivateRandomness(new Randomness(seed), new Callable<TestCluster>() {
|
||||
return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable<TestCluster>() {
|
||||
@Override
|
||||
public TestCluster call() throws Exception {
|
||||
return buildTestCluster(scope, seed);
|
||||
|
@ -1388,7 +1391,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
}
|
||||
final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
|
||||
Collections.shuffle(builders, random);
|
||||
Collections.shuffle(builders, random());
|
||||
final CopyOnWriteArrayList<Tuple<IndexRequestBuilder, Throwable>> errors = new CopyOnWriteArrayList<>();
|
||||
List<CountDownLatch> inFlightAsyncOperations = new ArrayList<>();
|
||||
// If you are indexing just a few documents then frequently do it one at a time. If many then frequently in bulk.
|
||||
|
@ -1727,20 +1730,14 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
return Settings.EMPTY;
|
||||
}
|
||||
|
||||
private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws UnknownHostException {
|
||||
private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException {
|
||||
String[] stringAddresses = clusterAddresses.split(",");
|
||||
TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
|
||||
int i = 0;
|
||||
for (String stringAddress : stringAddresses) {
|
||||
String[] split = stringAddress.split(":");
|
||||
if (split.length < 2) {
|
||||
throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid");
|
||||
}
|
||||
try {
|
||||
transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1]));
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
|
||||
}
|
||||
URL url = new URL("http://" + stringAddress);
|
||||
InetAddress inetAddress = InetAddress.getByName(url.getHost());
|
||||
transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
|
||||
}
|
||||
return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses);
|
||||
}
|
||||
|
|
|
@ -566,7 +566,7 @@ public abstract class ESTestCase extends LuceneTestCase {
|
|||
throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects");
|
||||
}
|
||||
List<T> list = arrayAsArrayList(values);
|
||||
Collections.shuffle(list);
|
||||
Collections.shuffle(list, random());
|
||||
return list.subList(0, size);
|
||||
}
|
||||
|
||||
|
@ -615,7 +615,7 @@ public abstract class ESTestCase extends LuceneTestCase {
|
|||
sb.append("]");
|
||||
assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0));
|
||||
}
|
||||
|
||||
|
||||
/** Returns the suite failure marker: internal use only! */
|
||||
public static TestRuleMarkFailure getSuiteFailureMarker() {
|
||||
return suiteFailureMarker;
|
||||
|
|
|
@ -447,10 +447,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
}
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(RecoverySettings.INDICES_RECOVERY_COMPRESS, random.nextBoolean());
|
||||
}
|
||||
|
||||
if (random.nextBoolean()) {
|
||||
builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms");
|
||||
}
|
||||
|
@ -1554,7 +1550,7 @@ public final class InternalTestCluster extends TestCluster {
|
|||
for (int i = 0; i < numNodes; i++) {
|
||||
asyncs.add(startNodeAsync(settings, version));
|
||||
}
|
||||
|
||||
|
||||
return () -> {
|
||||
List<String> ids = new ArrayList<>();
|
||||
for (Async<String> async : asyncs) {
|
||||
|
|
Loading…
Reference in New Issue