mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-24 17:09:48 +00:00
Merge branch 'master' into feature-suggest-refactoring
This commit is contained in:
commit
b4b874f0d8
@ -307,6 +307,12 @@ class BuildPlugin implements Plugin<Project> {
|
||||
/** Adds repositores used by ES dependencies */
|
||||
static void configureRepositories(Project project) {
|
||||
RepositoryHandler repos = project.repositories
|
||||
if (System.getProperty("repos.mavenlocal") != null) {
|
||||
// with -Drepos.mavenlocal=true we can force checking the local .m2 repo which is
|
||||
// useful for development ie. bwc tests where we install stuff in the local repository
|
||||
// such that we don't have to pass hardcoded files to gradle
|
||||
repos.mavenLocal()
|
||||
}
|
||||
repos.mavenCentral()
|
||||
repos.maven {
|
||||
name 'sonatype-snapshots'
|
||||
|
@ -23,8 +23,6 @@ import org.gradle.api.Project
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
import java.time.LocalDateTime
|
||||
|
||||
/** Configuration for an elasticsearch cluster, used for integration tests. */
|
||||
class ClusterConfiguration {
|
||||
|
||||
@ -34,6 +32,12 @@ class ClusterConfiguration {
|
||||
@Input
|
||||
int numNodes = 1
|
||||
|
||||
@Input
|
||||
int numBwcNodes = 0
|
||||
|
||||
@Input
|
||||
String bwcVersion = null
|
||||
|
||||
@Input
|
||||
int httpPort = 0
|
||||
|
||||
|
@ -53,11 +53,50 @@ class ClusterFormationTasks {
|
||||
// no need to add cluster formation tasks if the task won't run!
|
||||
return
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution)
|
||||
List<Task> startTasks = []
|
||||
File sharedDir = new File(project.buildDir, "cluster/shared")
|
||||
// first we remove everything in the shared cluster directory to ensure there are no leftovers in repos or anything
|
||||
// in theory this should not be necessary but repositories are only deleted in the cluster-state and not on-disk
|
||||
// such that snapshots survive failures / test runs and there is no simple way today to fix that.
|
||||
Task cleanup = project.tasks.create(name: "${task.name}#prepareCluster.cleanShared", type: Delete, dependsOn: task.dependsOn.collect()) {
|
||||
delete sharedDir
|
||||
doLast {
|
||||
sharedDir.mkdirs()
|
||||
}
|
||||
}
|
||||
List<Task> startTasks = [cleanup]
|
||||
List<NodeInfo> nodes = []
|
||||
if (config.numNodes < config.numBwcNodes) {
|
||||
throw new GradleException("numNodes must be >= numBwcNodes [${config.numNodes} < ${config.numBwcNodes}]")
|
||||
}
|
||||
if (config.numBwcNodes > 0 && config.bwcVersion == null) {
|
||||
throw new GradleException("bwcVersion must not be null if numBwcNodes is > 0")
|
||||
}
|
||||
// this is our current version distribution configuration we use for all kinds of REST tests etc.
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchDistro, VersionProperties.elasticsearch)
|
||||
if (config.bwcVersion != null && config.numBwcNodes > 0) {
|
||||
// if we have a cluster that has a BWC cluster we also need to configure a dependency on the BWC version
|
||||
// this version uses the same distribution etc. and only differs in the version we depend on.
|
||||
// from here on everything else works the same as if it's the current version, we fetch the BWC version
|
||||
// from mirrors using gradles built-in mechanism etc.
|
||||
project.configurations {
|
||||
elasticsearchBwcDistro
|
||||
}
|
||||
configureDistributionDependency(project, config.distribution, project.configurations.elasticsearchBwcDistro, config.bwcVersion)
|
||||
}
|
||||
|
||||
for (int i = 0; i < config.numNodes; ++i) {
|
||||
NodeInfo node = new NodeInfo(config, i, project, task)
|
||||
// we start N nodes and out of these N nodes there might be M bwc nodes.
|
||||
// for each of those nodes we might have a different configuratioon
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
Configuration configuration = project.configurations.elasticsearchDistro
|
||||
if (i < config.numBwcNodes) {
|
||||
elasticsearchVersion = config.bwcVersion
|
||||
configuration = project.configurations.elasticsearchBwcDistro
|
||||
}
|
||||
NodeInfo node = new NodeInfo(config, i, project, task, elasticsearchVersion, sharedDir)
|
||||
if (i == 0) {
|
||||
if (config.seedNodePortsFile != null) {
|
||||
// we might allow this in the future to be set but for now we are the only authority to set this!
|
||||
@ -66,7 +105,7 @@ class ClusterFormationTasks {
|
||||
config.seedNodePortsFile = node.transportPortsFile;
|
||||
}
|
||||
nodes.add(node)
|
||||
startTasks.add(configureNode(project, task, node))
|
||||
startTasks.add(configureNode(project, task, cleanup, node, configuration))
|
||||
}
|
||||
|
||||
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
|
||||
@ -77,20 +116,14 @@ class ClusterFormationTasks {
|
||||
}
|
||||
|
||||
/** Adds a dependency on the given distribution */
|
||||
static void configureDistributionDependency(Project project, String distro) {
|
||||
String elasticsearchVersion = VersionProperties.elasticsearch
|
||||
static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) {
|
||||
String packaging = distro
|
||||
if (distro == 'tar') {
|
||||
packaging = 'tar.gz'
|
||||
} else if (distro == 'integ-test-zip') {
|
||||
packaging = 'zip'
|
||||
}
|
||||
project.configurations {
|
||||
elasticsearchDistro
|
||||
}
|
||||
project.dependencies {
|
||||
elasticsearchDistro "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}"
|
||||
}
|
||||
project.dependencies.add(configuration.name, "org.elasticsearch.distribution.${distro}:elasticsearch:${elasticsearchVersion}@${packaging}")
|
||||
}
|
||||
|
||||
/**
|
||||
@ -110,10 +143,10 @@ class ClusterFormationTasks {
|
||||
*
|
||||
* @return a task which starts the node.
|
||||
*/
|
||||
static Task configureNode(Project project, Task task, NodeInfo node) {
|
||||
static Task configureNode(Project project, Task task, Object dependsOn, NodeInfo node, Configuration configuration) {
|
||||
|
||||
// tasks are chained so their execution order is maintained
|
||||
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: task.dependsOn.collect()) {
|
||||
Task setup = project.tasks.create(name: taskName(task, node, 'clean'), type: Delete, dependsOn: dependsOn) {
|
||||
delete node.homeDir
|
||||
delete node.cwd
|
||||
doLast {
|
||||
@ -122,7 +155,7 @@ class ClusterFormationTasks {
|
||||
}
|
||||
setup = configureCheckPreviousTask(taskName(task, node, 'checkPrevious'), project, setup, node)
|
||||
setup = configureStopTask(taskName(task, node, 'stopPrevious'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node)
|
||||
setup = configureExtractTask(taskName(task, node, 'extract'), project, setup, node, configuration)
|
||||
setup = configureWriteConfigTask(taskName(task, node, 'configure'), project, setup, node)
|
||||
setup = configureExtraConfigFilesTask(taskName(task, node, 'extraConfig'), project, setup, node)
|
||||
setup = configureCopyPluginsTask(taskName(task, node, 'copyPlugins'), project, setup, node)
|
||||
@ -158,27 +191,28 @@ class ClusterFormationTasks {
|
||||
}
|
||||
|
||||
/** Adds a task to extract the elasticsearch distribution */
|
||||
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node) {
|
||||
List extractDependsOn = [project.configurations.elasticsearchDistro, setup]
|
||||
/* project.configurations.elasticsearchDistro.singleFile will be an
|
||||
external artifact if this is being run by a plugin not living in the
|
||||
elasticsearch source tree. If this is a plugin built in the
|
||||
elasticsearch source tree or this is a distro in the elasticsearch
|
||||
source tree then this should be the version of elasticsearch built
|
||||
by the source tree. If it isn't then Bad Things(TM) will happen. */
|
||||
static Task configureExtractTask(String name, Project project, Task setup, NodeInfo node, Configuration configuration) {
|
||||
List extractDependsOn = [configuration, setup]
|
||||
/* configuration.singleFile will be an external artifact if this is being run by a plugin not living in the
|
||||
elasticsearch source tree. If this is a plugin built in the elasticsearch source tree or this is a distro in
|
||||
the elasticsearch source tree then this should be the version of elasticsearch built by the source tree.
|
||||
If it isn't then Bad Things(TM) will happen. */
|
||||
Task extract
|
||||
|
||||
switch (node.config.distribution) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
|
||||
from { project.zipTree(project.configurations.elasticsearchDistro.singleFile) }
|
||||
from {
|
||||
project.zipTree(configuration.singleFile)
|
||||
}
|
||||
into node.baseDir
|
||||
}
|
||||
break;
|
||||
case 'tar':
|
||||
extract = project.tasks.create(name: name, type: Copy, dependsOn: extractDependsOn) {
|
||||
from {
|
||||
project.tarTree(project.resources.gzip(project.configurations.elasticsearchDistro.singleFile))
|
||||
project.tarTree(project.resources.gzip(configuration.singleFile))
|
||||
}
|
||||
into node.baseDir
|
||||
}
|
||||
@ -187,7 +221,7 @@ class ClusterFormationTasks {
|
||||
File rpmDatabase = new File(node.baseDir, 'rpm-database')
|
||||
File rpmExtracted = new File(node.baseDir, 'rpm-extracted')
|
||||
/* Delay reading the location of the rpm file until task execution */
|
||||
Object rpm = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
Object rpm = "${ -> configuration.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'rpm', '--badreloc', '--nodeps', '--noscripts', '--notriggers',
|
||||
'--dbpath', rpmDatabase,
|
||||
@ -202,7 +236,7 @@ class ClusterFormationTasks {
|
||||
case 'deb':
|
||||
/* Delay reading the location of the deb file until task execution */
|
||||
File debExtracted = new File(node.baseDir, 'deb-extracted')
|
||||
Object deb = "${ -> project.configurations.elasticsearchDistro.singleFile}"
|
||||
Object deb = "${ -> configuration.singleFile}"
|
||||
extract = project.tasks.create(name: name, type: LoggedExec, dependsOn: extractDependsOn) {
|
||||
commandLine 'dpkg-deb', '-x', deb, debExtracted
|
||||
doFirst {
|
||||
@ -221,8 +255,8 @@ class ClusterFormationTasks {
|
||||
Map esConfig = [
|
||||
'cluster.name' : node.clusterName,
|
||||
'pidfile' : node.pidFile,
|
||||
'path.repo' : "${node.homeDir}/repo",
|
||||
'path.shared_data' : "${node.homeDir}/../",
|
||||
'path.repo' : "${node.sharedDir}/repo",
|
||||
'path.shared_data' : "${node.sharedDir}/",
|
||||
// Define a node attribute so we can test that it exists
|
||||
'node.testattr' : 'test',
|
||||
'repositories.url.allowed_urls': 'http://snapshot.test*'
|
||||
|
@ -40,6 +40,9 @@ class NodeInfo {
|
||||
/** root directory all node files and operations happen under */
|
||||
File baseDir
|
||||
|
||||
/** shared data directory all nodes share */
|
||||
File sharedDir
|
||||
|
||||
/** the pid file the node will use */
|
||||
File pidFile
|
||||
|
||||
@ -89,14 +92,15 @@ class NodeInfo {
|
||||
ByteArrayOutputStream buffer = new ByteArrayOutputStream()
|
||||
|
||||
/** Creates a node to run as part of a cluster for the given task */
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task) {
|
||||
NodeInfo(ClusterConfiguration config, int nodeNum, Project project, Task task, String nodeVersion, File sharedDir) {
|
||||
this.config = config
|
||||
this.nodeNum = nodeNum
|
||||
this.sharedDir = sharedDir
|
||||
clusterName = "${task.path.replace(':', '_').substring(1)}"
|
||||
baseDir = new File(project.buildDir, "cluster/${task.name} node${nodeNum}")
|
||||
pidFile = new File(baseDir, 'es.pid')
|
||||
homeDir = homeDir(baseDir, config.distribution)
|
||||
confDir = confDir(baseDir, config.distribution)
|
||||
homeDir = homeDir(baseDir, config.distribution, nodeVersion)
|
||||
confDir = confDir(baseDir, config.distribution, nodeVersion)
|
||||
configFile = new File(confDir, 'elasticsearch.yml')
|
||||
// even for rpm/deb, the logs are under home because we dont start with real services
|
||||
File logsDir = new File(homeDir, 'logs')
|
||||
@ -181,13 +185,13 @@ class NodeInfo {
|
||||
}
|
||||
|
||||
/** Returns the directory elasticsearch home is contained in for the given distribution */
|
||||
static File homeDir(File baseDir, String distro) {
|
||||
static File homeDir(File baseDir, String distro, String nodeVersion) {
|
||||
String path
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
path = "elasticsearch-${VersionProperties.elasticsearch}"
|
||||
path = "elasticsearch-${nodeVersion}"
|
||||
break
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
@ -199,12 +203,12 @@ class NodeInfo {
|
||||
return new File(baseDir, path)
|
||||
}
|
||||
|
||||
static File confDir(File baseDir, String distro) {
|
||||
static File confDir(File baseDir, String distro, String nodeVersion) {
|
||||
switch (distro) {
|
||||
case 'integ-test-zip':
|
||||
case 'zip':
|
||||
case 'tar':
|
||||
return new File(homeDir(baseDir, distro), 'config')
|
||||
return new File(homeDir(baseDir, distro, nodeVersion), 'config')
|
||||
case 'rpm':
|
||||
case 'deb':
|
||||
return new File(baseDir, "${distro}-extracted/etc/elasticsearch")
|
||||
|
@ -231,8 +231,7 @@ public class NodeInfo extends BaseNodeResponse {
|
||||
plugins.readFrom(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
ingest = new IngestInfo();
|
||||
ingest.readFrom(in);
|
||||
ingest = new IngestInfo(in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
||||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
scriptStats = in.readOptionalStreamable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
|
||||
ingestStats = in.readOptionalWritable(IngestStats.PROTO::readFrom);
|
||||
ingestStats = in.readOptionalWritable(IngestStats::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -79,7 +79,7 @@ public final class ClusterStateHealth implements Iterable<ClusterIndexHealth>, S
|
||||
* @param clusterState The current cluster state. Must not be null.
|
||||
*/
|
||||
public ClusterStateHealth(ClusterState clusterState) {
|
||||
this(clusterState, clusterState.metaData().concreteAllIndices());
|
||||
this(clusterState, clusterState.metaData().getConcreteAllIndices());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -432,7 +432,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
||||
if (routing != null) {
|
||||
Set<String> r = Strings.splitStringByCommaToSet(routing);
|
||||
Map<String, Set<String>> routings = new HashMap<>();
|
||||
String[] concreteIndices = metaData.concreteAllIndices();
|
||||
String[] concreteIndices = metaData.getConcreteAllIndices();
|
||||
for (String index : concreteIndices) {
|
||||
routings.put(index, r);
|
||||
}
|
||||
@ -472,7 +472,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
||||
*/
|
||||
boolean isPatternMatchingAllIndices(MetaData metaData, String[] indicesOrAliases, String[] concreteIndices) {
|
||||
// if we end up matching on all indices, check, if its a wildcard parameter, or a "-something" structure
|
||||
if (concreteIndices.length == metaData.concreteAllIndices().length && indicesOrAliases.length > 0) {
|
||||
if (concreteIndices.length == metaData.getConcreteAllIndices().length && indicesOrAliases.length > 0) {
|
||||
|
||||
//we might have something like /-test1,+test1 that would identify all indices
|
||||
//or something like /-test1 with test1 index missing and IndicesOptions.lenient()
|
||||
@ -728,11 +728,11 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
||||
|
||||
private List<String> resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData, boolean assertEmpty) {
|
||||
if (options.expandWildcardsOpen() && options.expandWildcardsClosed()) {
|
||||
return Arrays.asList(metaData.concreteAllIndices());
|
||||
return Arrays.asList(metaData.getConcreteAllIndices());
|
||||
} else if (options.expandWildcardsOpen()) {
|
||||
return Arrays.asList(metaData.concreteAllOpenIndices());
|
||||
return Arrays.asList(metaData.getConcreteAllOpenIndices());
|
||||
} else if (options.expandWildcardsClosed()) {
|
||||
return Arrays.asList(metaData.concreteAllClosedIndices());
|
||||
return Arrays.asList(metaData.getConcreteAllClosedIndices());
|
||||
} else {
|
||||
assert assertEmpty : "Shouldn't end up here";
|
||||
return Collections.emptyList();
|
||||
|
@ -370,26 +370,14 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
/**
|
||||
* Returns all the concrete indices.
|
||||
*/
|
||||
public String[] concreteAllIndices() {
|
||||
return allIndices;
|
||||
}
|
||||
|
||||
public String[] getConcreteAllIndices() {
|
||||
return concreteAllIndices();
|
||||
}
|
||||
|
||||
public String[] concreteAllOpenIndices() {
|
||||
return allOpenIndices;
|
||||
return allIndices;
|
||||
}
|
||||
|
||||
public String[] getConcreteAllOpenIndices() {
|
||||
return allOpenIndices;
|
||||
}
|
||||
|
||||
public String[] concreteAllClosedIndices() {
|
||||
return allClosedIndices;
|
||||
}
|
||||
|
||||
public String[] getConcreteAllClosedIndices() {
|
||||
return allClosedIndices;
|
||||
}
|
||||
@ -795,9 +783,9 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
metaData.getIndices(),
|
||||
metaData.getTemplates(),
|
||||
metaData.getCustoms(),
|
||||
metaData.concreteAllIndices(),
|
||||
metaData.concreteAllOpenIndices(),
|
||||
metaData.concreteAllClosedIndices(),
|
||||
metaData.getConcreteAllIndices(),
|
||||
metaData.getConcreteAllOpenIndices(),
|
||||
metaData.getConcreteAllClosedIndices(),
|
||||
metaData.getAliasAndIndexLookup());
|
||||
} else {
|
||||
// No changes:
|
||||
|
@ -202,8 +202,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||
@Override
|
||||
synchronized protected void doStop() {
|
||||
for (NotifyTimeout onGoingTimeout : onGoingTimeouts) {
|
||||
onGoingTimeout.cancel();
|
||||
onGoingTimeout.listener.onClose();
|
||||
try {
|
||||
onGoingTimeout.cancel();
|
||||
onGoingTimeout.listener.onClose();
|
||||
} catch (Exception ex) {
|
||||
logger.debug("failed to notify listeners on shutdown", ex);
|
||||
}
|
||||
}
|
||||
ThreadPool.terminate(updateTasksExecutor, 10, TimeUnit.SECONDS);
|
||||
remove(localNodeMasterListeners);
|
||||
|
@ -68,6 +68,7 @@ import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.ElasticsearchException.readException;
|
||||
|
@ -0,0 +1,154 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.gateway.MetaStateService;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
|
||||
/**
|
||||
* Renames index folders from {index.name} to {index.uuid}
|
||||
*/
|
||||
public class IndexFolderUpgrader {
|
||||
private final NodeEnvironment nodeEnv;
|
||||
private final Settings settings;
|
||||
private final ESLogger logger = Loggers.getLogger(IndexFolderUpgrader.class);
|
||||
private final MetaDataStateFormat<IndexMetaData> indexStateFormat = readOnlyIndexMetaDataStateFormat();
|
||||
|
||||
/**
|
||||
* Creates a new upgrader instance
|
||||
* @param settings node settings
|
||||
* @param nodeEnv the node env to operate on
|
||||
*/
|
||||
IndexFolderUpgrader(Settings settings, NodeEnvironment nodeEnv) {
|
||||
this.settings = settings;
|
||||
this.nodeEnv = nodeEnv;
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves the index folder found in <code>source</code> to <code>target</code>
|
||||
*/
|
||||
void upgrade(final Index index, final Path source, final Path target) throws IOException {
|
||||
boolean success = false;
|
||||
try {
|
||||
Files.move(source, target, StandardCopyOption.ATOMIC_MOVE);
|
||||
success = true;
|
||||
} catch (NoSuchFileException | FileNotFoundException exception) {
|
||||
// thrown when the source is non-existent because the folder was renamed
|
||||
// by another node (shared FS) after we checked if the target exists
|
||||
logger.error("multiple nodes trying to upgrade [{}] in parallel, retry upgrading with single node",
|
||||
exception, target);
|
||||
throw exception;
|
||||
} finally {
|
||||
if (success) {
|
||||
logger.info("{} moved from [{}] to [{}]", index, source, target);
|
||||
logger.trace("{} syncing directory [{}]", index, target);
|
||||
IOUtils.fsync(target, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Renames <code>indexFolderName</code> index folders found in node paths and custom path
|
||||
* iff {@link #needsUpgrade(Index, String)} is true.
|
||||
* Index folder in custom paths are renamed first followed by index folders in each node path.
|
||||
*/
|
||||
void upgrade(final String indexFolderName) throws IOException {
|
||||
for (NodeEnvironment.NodePath nodePath : nodeEnv.nodePaths()) {
|
||||
final Path indexFolderPath = nodePath.indicesPath.resolve(indexFolderName);
|
||||
final IndexMetaData indexMetaData = indexStateFormat.loadLatestState(logger, indexFolderPath);
|
||||
if (indexMetaData != null) {
|
||||
final Index index = indexMetaData.getIndex();
|
||||
if (needsUpgrade(index, indexFolderName)) {
|
||||
logger.info("{} upgrading [{}] to new naming convention", index, indexFolderPath);
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
if (indexSettings.hasCustomDataPath()) {
|
||||
// we rename index folder in custom path before renaming them in any node path
|
||||
// to have the index state under a not-yet-upgraded index folder, which we use to
|
||||
// continue renaming after a incomplete upgrade.
|
||||
final Path customLocationSource = nodeEnv.resolveBaseCustomLocation(indexSettings)
|
||||
.resolve(indexFolderName);
|
||||
final Path customLocationTarget = customLocationSource.resolveSibling(index.getUUID());
|
||||
// we rename the folder in custom path only the first time we encounter a state
|
||||
// in a node path, which needs upgrading, it is a no-op for subsequent node paths
|
||||
if (Files.exists(customLocationSource) // might not exist if no data was written for this index
|
||||
&& Files.exists(customLocationTarget) == false) {
|
||||
upgrade(index, customLocationSource, customLocationTarget);
|
||||
} else {
|
||||
logger.info("[{}] no upgrade needed - already upgraded", customLocationTarget);
|
||||
}
|
||||
}
|
||||
upgrade(index, indexFolderPath, indexFolderPath.resolveSibling(index.getUUID()));
|
||||
} else {
|
||||
logger.debug("[{}] no upgrade needed - already upgraded", indexFolderPath);
|
||||
}
|
||||
} else {
|
||||
logger.warn("[{}] no index state found - ignoring", indexFolderPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Upgrades all indices found under <code>nodeEnv</code>. Already upgraded indices are ignored.
|
||||
*/
|
||||
public static void upgradeIndicesIfNeeded(final Settings settings, final NodeEnvironment nodeEnv) throws IOException {
|
||||
final IndexFolderUpgrader upgrader = new IndexFolderUpgrader(settings, nodeEnv);
|
||||
for (String indexFolderName : nodeEnv.availableIndexFolders()) {
|
||||
upgrader.upgrade(indexFolderName);
|
||||
}
|
||||
}
|
||||
|
||||
static boolean needsUpgrade(Index index, String indexFolderName) {
|
||||
return indexFolderName.equals(index.getUUID()) == false;
|
||||
}
|
||||
|
||||
static MetaDataStateFormat<IndexMetaData> readOnlyIndexMetaDataStateFormat() {
|
||||
// NOTE: XContentType param is not used as we use the format read from the serialized index state
|
||||
return new MetaDataStateFormat<IndexMetaData>(XContentType.SMILE, MetaStateService.INDEX_STATE_FILE_PREFIX) {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return IndexMetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -70,7 +70,6 @@ import java.util.Set;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
||||
@ -89,7 +88,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
* not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */
|
||||
public final Boolean spins;
|
||||
|
||||
public NodePath(Path path, Environment environment) throws IOException {
|
||||
public NodePath(Path path) throws IOException {
|
||||
this.path = path;
|
||||
this.indicesPath = path.resolve(INDICES_FOLDER);
|
||||
this.fileStore = Environment.getFileStore(path);
|
||||
@ -102,16 +101,18 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
|
||||
/**
|
||||
* Resolves the given shards directory against this NodePath
|
||||
* ${data.paths}/nodes/{node.id}/indices/{index.uuid}/{shard.id}
|
||||
*/
|
||||
public Path resolve(ShardId shardId) {
|
||||
return resolve(shardId.getIndex()).resolve(Integer.toString(shardId.id()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves the given indexes directory against this NodePath
|
||||
* Resolves index directory against this NodePath
|
||||
* ${data.paths}/nodes/{node.id}/indices/{index.uuid}
|
||||
*/
|
||||
public Path resolve(Index index) {
|
||||
return indicesPath.resolve(index.getName());
|
||||
return indicesPath.resolve(index.getUUID());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -131,7 +132,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
|
||||
private final int localNodeId;
|
||||
private final AtomicBoolean closed = new AtomicBoolean(false);
|
||||
private final Map<ShardLockKey, InternalShardLock> shardLocks = new HashMap<>();
|
||||
private final Map<ShardId, InternalShardLock> shardLocks = new HashMap<>();
|
||||
|
||||
/**
|
||||
* Maximum number of data nodes that should run in an environment.
|
||||
@ -186,7 +187,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
|
||||
try {
|
||||
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
|
||||
nodePaths[dirIndex] = new NodePath(dir, environment);
|
||||
nodePaths[dirIndex] = new NodePath(dir);
|
||||
localNodeId = possibleLockId;
|
||||
} catch (LockObtainFailedException ex) {
|
||||
logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
|
||||
@ -445,11 +446,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
* @param indexSettings settings for the index being deleted
|
||||
*/
|
||||
public void deleteIndexDirectoryUnderLock(Index index, IndexSettings indexSettings) throws IOException {
|
||||
final Path[] indexPaths = indexPaths(index.getName());
|
||||
final Path[] indexPaths = indexPaths(index);
|
||||
logger.trace("deleting index {} directory, paths({}): [{}]", index, indexPaths.length, indexPaths);
|
||||
IOUtils.rm(indexPaths);
|
||||
if (indexSettings.hasCustomDataPath()) {
|
||||
Path customLocation = resolveCustomLocation(indexSettings, index.getName());
|
||||
Path customLocation = resolveIndexCustomLocation(indexSettings);
|
||||
logger.trace("deleting custom index {} directory [{}]", index, customLocation);
|
||||
IOUtils.rm(customLocation);
|
||||
}
|
||||
@ -517,17 +518,16 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
*/
|
||||
public ShardLock shardLock(final ShardId shardId, long lockTimeoutMS) throws IOException {
|
||||
logger.trace("acquiring node shardlock on [{}], timeout [{}]", shardId, lockTimeoutMS);
|
||||
final ShardLockKey shardLockKey = new ShardLockKey(shardId);
|
||||
final InternalShardLock shardLock;
|
||||
final boolean acquired;
|
||||
synchronized (shardLocks) {
|
||||
if (shardLocks.containsKey(shardLockKey)) {
|
||||
shardLock = shardLocks.get(shardLockKey);
|
||||
if (shardLocks.containsKey(shardId)) {
|
||||
shardLock = shardLocks.get(shardId);
|
||||
shardLock.incWaitCount();
|
||||
acquired = false;
|
||||
} else {
|
||||
shardLock = new InternalShardLock(shardLockKey);
|
||||
shardLocks.put(shardLockKey, shardLock);
|
||||
shardLock = new InternalShardLock(shardId);
|
||||
shardLocks.put(shardId, shardLock);
|
||||
acquired = true;
|
||||
}
|
||||
}
|
||||
@ -547,7 +547,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
@Override
|
||||
protected void closeInternal() {
|
||||
shardLock.release();
|
||||
logger.trace("released shard lock for [{}]", shardLockKey);
|
||||
logger.trace("released shard lock for [{}]", shardId);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -559,51 +559,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
*/
|
||||
public Set<ShardId> lockedShards() {
|
||||
synchronized (shardLocks) {
|
||||
Set<ShardId> lockedShards = shardLocks.keySet().stream()
|
||||
.map(shardLockKey -> new ShardId(new Index(shardLockKey.indexName, "_na_"), shardLockKey.shardId)).collect(Collectors.toSet());
|
||||
return unmodifiableSet(lockedShards);
|
||||
}
|
||||
}
|
||||
|
||||
// a key for the shard lock. we can't use shardIds, because the contain
|
||||
// the index uuid, but we want the lock semantics to the same as we map indices to disk folders, i.e., without the uuid (for now).
|
||||
private final class ShardLockKey {
|
||||
final String indexName;
|
||||
final int shardId;
|
||||
|
||||
public ShardLockKey(final ShardId shardId) {
|
||||
this.indexName = shardId.getIndexName();
|
||||
this.shardId = shardId.id();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + indexName + "][" + shardId + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ShardLockKey that = (ShardLockKey) o;
|
||||
|
||||
if (shardId != that.shardId) {
|
||||
return false;
|
||||
}
|
||||
return indexName.equals(that.indexName);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = indexName.hashCode();
|
||||
result = 31 * result + shardId;
|
||||
return result;
|
||||
return unmodifiableSet(new HashSet<>(shardLocks.keySet()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -616,10 +572,10 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
*/
|
||||
private final Semaphore mutex = new Semaphore(1);
|
||||
private int waitCount = 1; // guarded by shardLocks
|
||||
private final ShardLockKey lockKey;
|
||||
private final ShardId shardId;
|
||||
|
||||
InternalShardLock(ShardLockKey id) {
|
||||
lockKey = id;
|
||||
InternalShardLock(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
mutex.acquireUninterruptibly();
|
||||
}
|
||||
|
||||
@ -639,10 +595,10 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
synchronized (shardLocks) {
|
||||
assert waitCount > 0 : "waitCount is " + waitCount + " but should be > 0";
|
||||
--waitCount;
|
||||
logger.trace("shard lock wait count for [{}] is now [{}]", lockKey, waitCount);
|
||||
logger.trace("shard lock wait count for {} is now [{}]", shardId, waitCount);
|
||||
if (waitCount == 0) {
|
||||
logger.trace("last shard lock wait decremented, removing lock for [{}]", lockKey);
|
||||
InternalShardLock remove = shardLocks.remove(lockKey);
|
||||
logger.trace("last shard lock wait decremented, removing lock for {}", shardId);
|
||||
InternalShardLock remove = shardLocks.remove(shardId);
|
||||
assert remove != null : "Removed lock was null";
|
||||
}
|
||||
}
|
||||
@ -651,11 +607,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
void acquire(long timeoutInMillis) throws LockObtainFailedException{
|
||||
try {
|
||||
if (mutex.tryAcquire(timeoutInMillis, TimeUnit.MILLISECONDS) == false) {
|
||||
throw new LockObtainFailedException("Can't lock shard " + lockKey + ", timed out after " + timeoutInMillis + "ms");
|
||||
throw new LockObtainFailedException("Can't lock shard " + shardId + ", timed out after " + timeoutInMillis + "ms");
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new LockObtainFailedException("Can't lock shard " + lockKey + ", interrupted", e);
|
||||
throw new LockObtainFailedException("Can't lock shard " + shardId + ", interrupted", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -698,11 +654,11 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
/**
|
||||
* Returns all index paths.
|
||||
*/
|
||||
public Path[] indexPaths(String indexName) {
|
||||
public Path[] indexPaths(Index index) {
|
||||
assert assertEnvIsLocked();
|
||||
Path[] indexPaths = new Path[nodePaths.length];
|
||||
for (int i = 0; i < nodePaths.length; i++) {
|
||||
indexPaths[i] = nodePaths[i].indicesPath.resolve(indexName);
|
||||
indexPaths[i] = nodePaths[i].resolve(index);
|
||||
}
|
||||
return indexPaths;
|
||||
}
|
||||
@ -725,25 +681,47 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
return shardLocations;
|
||||
}
|
||||
|
||||
public Set<String> findAllIndices() throws IOException {
|
||||
/**
|
||||
* Returns all folder names in ${data.paths}/nodes/{node.id}/indices folder
|
||||
*/
|
||||
public Set<String> availableIndexFolders() throws IOException {
|
||||
if (nodePaths == null || locks == null) {
|
||||
throw new IllegalStateException("node is not configured to store local location");
|
||||
}
|
||||
assert assertEnvIsLocked();
|
||||
Set<String> indices = new HashSet<>();
|
||||
Set<String> indexFolders = new HashSet<>();
|
||||
for (NodePath nodePath : nodePaths) {
|
||||
Path indicesLocation = nodePath.indicesPath;
|
||||
if (Files.isDirectory(indicesLocation)) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indicesLocation)) {
|
||||
for (Path index : stream) {
|
||||
if (Files.isDirectory(index)) {
|
||||
indices.add(index.getFileName().toString());
|
||||
indexFolders.add(index.getFileName().toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return indices;
|
||||
return indexFolders;
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolves all existing paths to <code>indexFolderName</code> in ${data.paths}/nodes/{node.id}/indices
|
||||
*/
|
||||
public Path[] resolveIndexFolder(String indexFolderName) throws IOException {
|
||||
if (nodePaths == null || locks == null) {
|
||||
throw new IllegalStateException("node is not configured to store local location");
|
||||
}
|
||||
assert assertEnvIsLocked();
|
||||
List<Path> paths = new ArrayList<>(nodePaths.length);
|
||||
for (NodePath nodePath : nodePaths) {
|
||||
Path indexFolder = nodePath.indicesPath.resolve(indexFolderName);
|
||||
if (Files.exists(indexFolder)) {
|
||||
paths.add(indexFolder);
|
||||
}
|
||||
}
|
||||
return paths.toArray(new Path[paths.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -761,13 +739,13 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
}
|
||||
assert assertEnvIsLocked();
|
||||
final Set<ShardId> shardIds = new HashSet<>();
|
||||
String indexName = index.getName();
|
||||
final String indexUniquePathId = index.getUUID();
|
||||
for (final NodePath nodePath : nodePaths) {
|
||||
Path location = nodePath.indicesPath;
|
||||
if (Files.isDirectory(location)) {
|
||||
try (DirectoryStream<Path> indexStream = Files.newDirectoryStream(location)) {
|
||||
for (Path indexPath : indexStream) {
|
||||
if (indexName.equals(indexPath.getFileName().toString())) {
|
||||
if (indexUniquePathId.equals(indexPath.getFileName().toString())) {
|
||||
shardIds.addAll(findAllShardsForIndex(indexPath, index));
|
||||
}
|
||||
}
|
||||
@ -778,7 +756,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
}
|
||||
|
||||
private static Set<ShardId> findAllShardsForIndex(Path indexPath, Index index) throws IOException {
|
||||
assert indexPath.getFileName().toString().equals(index.getName());
|
||||
assert indexPath.getFileName().toString().equals(index.getUUID());
|
||||
Set<ShardId> shardIds = new HashSet<>();
|
||||
if (Files.isDirectory(indexPath)) {
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
|
||||
@ -861,7 +839,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
*
|
||||
* @param indexSettings settings for the index
|
||||
*/
|
||||
private Path resolveCustomLocation(IndexSettings indexSettings) {
|
||||
public Path resolveBaseCustomLocation(IndexSettings indexSettings) {
|
||||
String customDataDir = indexSettings.customDataPath();
|
||||
if (customDataDir != null) {
|
||||
// This assert is because this should be caught by MetaDataCreateIndexService
|
||||
@ -882,10 +860,9 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
* the root path for the index.
|
||||
*
|
||||
* @param indexSettings settings for the index
|
||||
* @param indexName index to resolve the path for
|
||||
*/
|
||||
private Path resolveCustomLocation(IndexSettings indexSettings, final String indexName) {
|
||||
return resolveCustomLocation(indexSettings).resolve(indexName);
|
||||
private Path resolveIndexCustomLocation(IndexSettings indexSettings) {
|
||||
return resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getUUID());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -897,7 +874,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
* @param shardId shard to resolve the path to
|
||||
*/
|
||||
public Path resolveCustomLocation(IndexSettings indexSettings, final ShardId shardId) {
|
||||
return resolveCustomLocation(indexSettings, shardId.getIndexName()).resolve(Integer.toString(shardId.id()));
|
||||
return resolveIndexCustomLocation(indexSettings).resolve(Integer.toString(shardId.id()));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -921,22 +898,24 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
|
||||
for (Path path : nodeDataPaths()) { // check node-paths are writable
|
||||
tryWriteTempFile(path);
|
||||
}
|
||||
for (String index : this.findAllIndices()) {
|
||||
for (Path path : this.indexPaths(index)) { // check index paths are writable
|
||||
Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME);
|
||||
tryWriteTempFile(statePath);
|
||||
tryWriteTempFile(path);
|
||||
}
|
||||
for (ShardId shardID : this.findAllShardIds(new Index(index, IndexMetaData.INDEX_UUID_NA_VALUE))) {
|
||||
Path[] paths = this.availableShardPaths(shardID);
|
||||
for (Path path : paths) { // check shard paths are writable
|
||||
Path indexDir = path.resolve(ShardPath.INDEX_FOLDER_NAME);
|
||||
Path statePath = path.resolve(MetaDataStateFormat.STATE_DIR_NAME);
|
||||
Path translogDir = path.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
|
||||
tryWriteTempFile(indexDir);
|
||||
tryWriteTempFile(translogDir);
|
||||
tryWriteTempFile(statePath);
|
||||
tryWriteTempFile(path);
|
||||
for (String indexFolderName : this.availableIndexFolders()) {
|
||||
for (Path indexPath : this.resolveIndexFolder(indexFolderName)) { // check index paths are writable
|
||||
Path indexStatePath = indexPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
|
||||
tryWriteTempFile(indexStatePath);
|
||||
tryWriteTempFile(indexPath);
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(indexPath)) {
|
||||
for (Path shardPath : stream) {
|
||||
String fileName = shardPath.getFileName().toString();
|
||||
if (Files.isDirectory(shardPath) && fileName.chars().allMatch(Character::isDigit)) {
|
||||
Path indexDir = shardPath.resolve(ShardPath.INDEX_FOLDER_NAME);
|
||||
Path statePath = shardPath.resolve(MetaDataStateFormat.STATE_DIR_NAME);
|
||||
Path translogDir = shardPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
|
||||
tryWriteTempFile(indexDir);
|
||||
tryWriteTempFile(translogDir);
|
||||
tryWriteTempFile(statePath);
|
||||
tryWriteTempFile(shardPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
@ -26,12 +27,17 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
@ -47,7 +53,7 @@ public class DanglingIndicesState extends AbstractComponent {
|
||||
private final MetaStateService metaStateService;
|
||||
private final LocalAllocateDangledIndices allocateDangledIndices;
|
||||
|
||||
private final Map<String, IndexMetaData> danglingIndices = ConcurrentCollections.newConcurrentMap();
|
||||
private final Map<Index, IndexMetaData> danglingIndices = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
@Inject
|
||||
public DanglingIndicesState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService,
|
||||
@ -74,7 +80,7 @@ public class DanglingIndicesState extends AbstractComponent {
|
||||
/**
|
||||
* The current set of dangling indices.
|
||||
*/
|
||||
Map<String, IndexMetaData> getDanglingIndices() {
|
||||
Map<Index, IndexMetaData> getDanglingIndices() {
|
||||
// This might be a good use case for CopyOnWriteHashMap
|
||||
return unmodifiableMap(new HashMap<>(danglingIndices));
|
||||
}
|
||||
@ -83,10 +89,16 @@ public class DanglingIndicesState extends AbstractComponent {
|
||||
* Cleans dangling indices if they are already allocated on the provided meta data.
|
||||
*/
|
||||
void cleanupAllocatedDangledIndices(MetaData metaData) {
|
||||
for (String danglingIndex : danglingIndices.keySet()) {
|
||||
if (metaData.hasIndex(danglingIndex)) {
|
||||
logger.debug("[{}] no longer dangling (created), removing from dangling list", danglingIndex);
|
||||
danglingIndices.remove(danglingIndex);
|
||||
for (Index index : danglingIndices.keySet()) {
|
||||
final IndexMetaData indexMetaData = metaData.index(index);
|
||||
if (indexMetaData != null && indexMetaData.getIndex().getName().equals(index.getName())) {
|
||||
if (indexMetaData.getIndex().getUUID().equals(index.getUUID()) == false) {
|
||||
logger.warn("[{}] can not be imported as a dangling index, as there is already another index " +
|
||||
"with the same name but a different uuid. local index will be ignored (but not deleted)", index);
|
||||
} else {
|
||||
logger.debug("[{}] no longer dangling (created), removing from dangling list", index);
|
||||
}
|
||||
danglingIndices.remove(index);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -104,36 +116,30 @@ public class DanglingIndicesState extends AbstractComponent {
|
||||
* that have state on disk, but are not part of the provided meta data, or not detected
|
||||
* as dangled already.
|
||||
*/
|
||||
Map<String, IndexMetaData> findNewDanglingIndices(MetaData metaData) {
|
||||
final Set<String> indices;
|
||||
Map<Index, IndexMetaData> findNewDanglingIndices(MetaData metaData) {
|
||||
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size());
|
||||
for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) {
|
||||
excludeIndexPathIds.add(cursor.value.getIndex().getUUID());
|
||||
}
|
||||
excludeIndexPathIds.addAll(danglingIndices.keySet().stream().map(Index::getUUID).collect(Collectors.toList()));
|
||||
try {
|
||||
indices = nodeEnv.findAllIndices();
|
||||
} catch (Throwable e) {
|
||||
final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains);
|
||||
Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size());
|
||||
for (IndexMetaData indexMetaData : indexMetaDataList) {
|
||||
if (metaData.hasIndex(indexMetaData.getIndex().getName())) {
|
||||
logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata",
|
||||
indexMetaData.getIndex());
|
||||
} else {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state",
|
||||
indexMetaData.getIndex());
|
||||
newIndices.put(indexMetaData.getIndex(), indexMetaData);
|
||||
}
|
||||
}
|
||||
return newIndices;
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed to list dangling indices", e);
|
||||
return emptyMap();
|
||||
}
|
||||
|
||||
Map<String, IndexMetaData> newIndices = new HashMap<>();
|
||||
for (String indexName : indices) {
|
||||
if (metaData.hasIndex(indexName) == false && danglingIndices.containsKey(indexName) == false) {
|
||||
try {
|
||||
IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName);
|
||||
if (indexMetaData != null) {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName);
|
||||
if (!indexMetaData.getIndex().getName().equals(indexName)) {
|
||||
logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.getIndex());
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build();
|
||||
}
|
||||
newIndices.put(indexName, indexMetaData);
|
||||
} else {
|
||||
logger.debug("[{}] dangling index directory detected, but no state found", indexName);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
logger.warn("[{}] failed to load index state for detected dangled index", t, indexName);
|
||||
}
|
||||
}
|
||||
}
|
||||
return newIndices;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.IndexFolderUpgrader;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
@ -86,6 +87,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
try {
|
||||
ensureNoPre019State();
|
||||
pre20Upgrade();
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(settings, nodeEnv);
|
||||
long startNS = System.nanoTime();
|
||||
metaStateService.loadFullState();
|
||||
logger.debug("took {} to load state", TimeValue.timeValueMillis(TimeValue.nsecToMSec(System.nanoTime() - startNS)));
|
||||
@ -130,7 +132,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
for (IndexMetaData indexMetaData : newMetaData) {
|
||||
IndexMetaData indexMetaDataOnDisk = null;
|
||||
if (indexMetaData.getState().equals(IndexMetaData.State.CLOSE)) {
|
||||
indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex().getName());
|
||||
indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex());
|
||||
}
|
||||
if (indexMetaDataOnDisk != null) {
|
||||
newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex());
|
||||
@ -158,7 +160,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
// check and write changes in indices
|
||||
for (IndexMetaWriteInfo indexMetaWrite : writeInfo) {
|
||||
try {
|
||||
metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData, indexMetaWrite.previousMetaData);
|
||||
metaStateService.writeIndex(indexMetaWrite.reason, indexMetaWrite.newMetaData);
|
||||
} catch (Throwable e) {
|
||||
success = false;
|
||||
}
|
||||
@ -166,7 +168,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
}
|
||||
|
||||
danglingIndicesState.processDanglingIndices(newMetaData);
|
||||
|
||||
if (success) {
|
||||
previousMetaData = newMetaData;
|
||||
previouslyWrittenIndices = unmodifiableSet(relevantIndices);
|
||||
@ -233,7 +234,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
||||
// We successfully checked all indices for backward compatibility and found no non-upgradable indices, which
|
||||
// means the upgrade can continue. Now it's safe to overwrite index metadata with the new version.
|
||||
for (IndexMetaData indexMetaData : updateIndexMetaData) {
|
||||
metaStateService.writeIndex("upgrade", indexMetaData, null);
|
||||
// since we still haven't upgraded the index folders, we write index state in the old folder
|
||||
metaStateService.writeIndex("upgrade", indexMetaData, nodeEnv.resolveIndexFolder(indexMetaData.getIndex().getName()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -33,9 +33,12 @@ import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Handles writing and loading both {@link MetaData} and {@link IndexMetaData}
|
||||
@ -45,7 +48,7 @@ public class MetaStateService extends AbstractComponent {
|
||||
static final String FORMAT_SETTING = "gateway.format";
|
||||
|
||||
static final String GLOBAL_STATE_FILE_PREFIX = "global-";
|
||||
private static final String INDEX_STATE_FILE_PREFIX = "state-";
|
||||
public static final String INDEX_STATE_FILE_PREFIX = "state-";
|
||||
|
||||
private final NodeEnvironment nodeEnv;
|
||||
|
||||
@ -91,14 +94,12 @@ public class MetaStateService extends AbstractComponent {
|
||||
} else {
|
||||
metaDataBuilder = MetaData.builder();
|
||||
}
|
||||
|
||||
final Set<String> indices = nodeEnv.findAllIndices();
|
||||
for (String index : indices) {
|
||||
IndexMetaData indexMetaData = loadIndexState(index);
|
||||
if (indexMetaData == null) {
|
||||
logger.debug("[{}] failed to find metadata for existing index location", index);
|
||||
} else {
|
||||
for (String indexFolderName : nodeEnv.availableIndexFolders()) {
|
||||
IndexMetaData indexMetaData = indexStateFormat.loadLatestState(logger, nodeEnv.resolveIndexFolder(indexFolderName));
|
||||
if (indexMetaData != null) {
|
||||
metaDataBuilder.put(indexMetaData, false);
|
||||
} else {
|
||||
logger.debug("[{}] failed to find metadata for existing index location", indexFolderName);
|
||||
}
|
||||
}
|
||||
return metaDataBuilder.build();
|
||||
@ -108,10 +109,35 @@ public class MetaStateService extends AbstractComponent {
|
||||
* Loads the index state for the provided index name, returning null if doesn't exists.
|
||||
*/
|
||||
@Nullable
|
||||
IndexMetaData loadIndexState(String index) throws IOException {
|
||||
IndexMetaData loadIndexState(Index index) throws IOException {
|
||||
return indexStateFormat.loadLatestState(logger, nodeEnv.indexPaths(index));
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads all indices states available on disk
|
||||
*/
|
||||
List<IndexMetaData> loadIndicesStates(Predicate<String> excludeIndexPathIdsPredicate) throws IOException {
|
||||
List<IndexMetaData> indexMetaDataList = new ArrayList<>();
|
||||
for (String indexFolderName : nodeEnv.availableIndexFolders()) {
|
||||
if (excludeIndexPathIdsPredicate.test(indexFolderName)) {
|
||||
continue;
|
||||
}
|
||||
IndexMetaData indexMetaData = indexStateFormat.loadLatestState(logger,
|
||||
nodeEnv.resolveIndexFolder(indexFolderName));
|
||||
if (indexMetaData != null) {
|
||||
final String indexPathId = indexMetaData.getIndex().getUUID();
|
||||
if (indexFolderName.equals(indexPathId)) {
|
||||
indexMetaDataList.add(indexMetaData);
|
||||
} else {
|
||||
throw new IllegalStateException("[" + indexFolderName+ "] invalid index folder name, rename to [" + indexPathId + "]");
|
||||
}
|
||||
} else {
|
||||
logger.debug("[{}] failed to find metadata for existing index location", indexFolderName);
|
||||
}
|
||||
}
|
||||
return indexMetaDataList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the global state, *without* index state, see {@link #loadFullState()} for that.
|
||||
*/
|
||||
@ -129,13 +155,22 @@ public class MetaStateService extends AbstractComponent {
|
||||
/**
|
||||
* Writes the index state.
|
||||
*/
|
||||
void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception {
|
||||
logger.trace("[{}] writing state, reason [{}]", indexMetaData.getIndex(), reason);
|
||||
void writeIndex(String reason, IndexMetaData indexMetaData) throws IOException {
|
||||
writeIndex(reason, indexMetaData, nodeEnv.indexPaths(indexMetaData.getIndex()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes the index state in <code>locations</code>, use {@link #writeGlobalState(String, MetaData)}
|
||||
* to write index state in index paths
|
||||
*/
|
||||
void writeIndex(String reason, IndexMetaData indexMetaData, Path[] locations) throws IOException {
|
||||
final Index index = indexMetaData.getIndex();
|
||||
logger.trace("[{}] writing state, reason [{}]", index, reason);
|
||||
try {
|
||||
indexStateFormat.write(indexMetaData, indexMetaData.getVersion(), nodeEnv.indexPaths(indexMetaData.getIndex().getName()));
|
||||
indexStateFormat.write(indexMetaData, indexMetaData.getVersion(), locations);
|
||||
} catch (Throwable ex) {
|
||||
logger.warn("[{}]: failed to write index state", ex, indexMetaData.getIndex());
|
||||
throw new IOException("failed to write state for [" + indexMetaData.getIndex() + "]", ex);
|
||||
logger.warn("[{}]: failed to write index state", ex, index);
|
||||
throw new IOException("failed to write state for [" + index + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,7 +29,6 @@ import java.io.IOException;
|
||||
import java.nio.file.FileStore;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public final class ShardPath {
|
||||
@ -37,22 +36,20 @@ public final class ShardPath {
|
||||
public static final String TRANSLOG_FOLDER_NAME = "translog";
|
||||
|
||||
private final Path path;
|
||||
private final String indexUUID;
|
||||
private final ShardId shardId;
|
||||
private final Path shardStatePath;
|
||||
private final boolean isCustomDataPath;
|
||||
|
||||
public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, String indexUUID, ShardId shardId) {
|
||||
public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) {
|
||||
assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString();
|
||||
assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString();
|
||||
assert dataPath.getParent().getFileName().toString().equals(shardId.getIndexName()) : "dataPath must end with index/shardID but didn't: " + dataPath.toString();
|
||||
assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndexName()) : "shardStatePath must end with index/shardID but didn't: " + dataPath.toString();
|
||||
assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString();
|
||||
assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString();
|
||||
if (isCustomDataPath && dataPath.equals(shardStatePath)) {
|
||||
throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths");
|
||||
}
|
||||
this.isCustomDataPath = isCustomDataPath;
|
||||
this.path = dataPath;
|
||||
this.indexUUID = indexUUID;
|
||||
this.shardId = shardId;
|
||||
this.shardStatePath = shardStatePath;
|
||||
}
|
||||
@ -73,10 +70,6 @@ public final class ShardPath {
|
||||
return Files.exists(path);
|
||||
}
|
||||
|
||||
public String getIndexUUID() {
|
||||
return indexUUID;
|
||||
}
|
||||
|
||||
public ShardId getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
@ -144,7 +137,7 @@ public final class ShardPath {
|
||||
dataPath = statePath;
|
||||
}
|
||||
logger.debug("{} loaded data path [{}], state path [{}]", shardId, dataPath, statePath);
|
||||
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, indexUUID, shardId);
|
||||
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
|
||||
}
|
||||
}
|
||||
|
||||
@ -168,34 +161,6 @@ public final class ShardPath {
|
||||
}
|
||||
}
|
||||
|
||||
/** Maps each path.data path to a "guess" of how many bytes the shards allocated to that path might additionally use over their
|
||||
* lifetime; we do this so a bunch of newly allocated shards won't just all go the path with the most free space at this moment. */
|
||||
private static Map<Path,Long> getEstimatedReservedBytes(NodeEnvironment env, long avgShardSizeInBytes, Iterable<IndexShard> shards) throws IOException {
|
||||
long totFreeSpace = 0;
|
||||
for (NodeEnvironment.NodePath nodePath : env.nodePaths()) {
|
||||
totFreeSpace += nodePath.fileStore.getUsableSpace();
|
||||
}
|
||||
|
||||
// Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
|
||||
// shard size across the cluster and 5% of the total available free space on this node:
|
||||
long estShardSizeInBytes = Math.max(avgShardSizeInBytes, (long) (totFreeSpace/20.0));
|
||||
|
||||
// Collate predicted (guessed!) disk usage on each path.data:
|
||||
Map<Path,Long> reservedBytes = new HashMap<>();
|
||||
for (IndexShard shard : shards) {
|
||||
Path dataPath = NodeEnvironment.shardStatePathToDataPath(shard.shardPath().getShardStatePath());
|
||||
|
||||
// Remove indices/<index>/<shardID> subdirs from the statePath to get back to the path.data/<lockID>:
|
||||
Long curBytes = reservedBytes.get(dataPath);
|
||||
if (curBytes == null) {
|
||||
curBytes = 0L;
|
||||
}
|
||||
reservedBytes.put(dataPath, curBytes + estShardSizeInBytes);
|
||||
}
|
||||
|
||||
return reservedBytes;
|
||||
}
|
||||
|
||||
public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shardId, IndexSettings indexSettings,
|
||||
long avgShardSizeInBytes, Map<Path,Integer> dataPathToShardCount) throws IOException {
|
||||
|
||||
@ -206,7 +171,6 @@ public final class ShardPath {
|
||||
dataPath = env.resolveCustomLocation(indexSettings, shardId);
|
||||
statePath = env.nodePaths()[0].resolve(shardId);
|
||||
} else {
|
||||
|
||||
long totFreeSpace = 0;
|
||||
for (NodeEnvironment.NodePath nodePath : env.nodePaths()) {
|
||||
totFreeSpace += nodePath.fileStore.getUsableSpace();
|
||||
@ -241,9 +205,7 @@ public final class ShardPath {
|
||||
statePath = bestPath.resolve(shardId);
|
||||
dataPath = statePath;
|
||||
}
|
||||
|
||||
final String indexUUID = indexSettings.getUUID();
|
||||
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, indexUUID, shardId);
|
||||
return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -258,9 +220,6 @@ public final class ShardPath {
|
||||
if (shardId != null ? !shardId.equals(shardPath.shardId) : shardPath.shardId != null) {
|
||||
return false;
|
||||
}
|
||||
if (indexUUID != null ? !indexUUID.equals(shardPath.indexUUID) : shardPath.indexUUID != null) {
|
||||
return false;
|
||||
}
|
||||
if (path != null ? !path.equals(shardPath.path) : shardPath.path != null) {
|
||||
return false;
|
||||
}
|
||||
@ -271,7 +230,6 @@ public final class ShardPath {
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = path != null ? path.hashCode() : 0;
|
||||
result = 31 * result + (indexUUID != null ? indexUUID.hashCode() : 0);
|
||||
result = 31 * result + (shardId != null ? shardId.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
@ -280,7 +238,6 @@ public final class ShardPath {
|
||||
public String toString() {
|
||||
return "ShardPath{" +
|
||||
"path=" + path +
|
||||
", indexUUID='" + indexUUID + '\'' +
|
||||
", shard=" + shardId +
|
||||
'}';
|
||||
}
|
||||
|
@ -531,7 +531,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
||||
}
|
||||
// this is a pure protection to make sure this index doesn't get re-imported as a dangling index.
|
||||
// we should in the future rather write a tombstone rather than wiping the metadata.
|
||||
MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index.getName()));
|
||||
MetaDataStateFormat.deleteMetaState(nodeEnv.indexPaths(index));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,12 +31,18 @@ import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class IngestStats implements Writeable<IngestStats>, ToXContent {
|
||||
|
||||
public final static IngestStats PROTO = new IngestStats(null, null);
|
||||
|
||||
private final Stats totalStats;
|
||||
private final Map<String, Stats> statsPerPipeline;
|
||||
|
||||
public IngestStats(StreamInput in) throws IOException {
|
||||
this.totalStats = new Stats(in);
|
||||
int size = in.readVInt();
|
||||
this.statsPerPipeline = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
statsPerPipeline.put(in.readString(), new Stats(in));
|
||||
}
|
||||
}
|
||||
|
||||
public IngestStats(Stats totalStats, Map<String, Stats> statsPerPipeline) {
|
||||
this.totalStats = totalStats;
|
||||
this.statsPerPipeline = statsPerPipeline;
|
||||
@ -58,16 +64,7 @@ public class IngestStats implements Writeable<IngestStats>, ToXContent {
|
||||
|
||||
@Override
|
||||
public IngestStats readFrom(StreamInput in) throws IOException {
|
||||
Stats totalStats = Stats.PROTO.readFrom(in);
|
||||
totalStats.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
Map<String, Stats> statsPerPipeline = new HashMap<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
Stats stats = Stats.PROTO.readFrom(in);
|
||||
statsPerPipeline.put(in.readString(), stats);
|
||||
stats.readFrom(in);
|
||||
}
|
||||
return new IngestStats(totalStats, statsPerPipeline);
|
||||
return new IngestStats(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -99,13 +96,18 @@ public class IngestStats implements Writeable<IngestStats>, ToXContent {
|
||||
|
||||
public static class Stats implements Writeable<Stats>, ToXContent {
|
||||
|
||||
private final static Stats PROTO = new Stats(0, 0, 0, 0);
|
||||
|
||||
private final long ingestCount;
|
||||
private final long ingestTimeInMillis;
|
||||
private final long ingestCurrent;
|
||||
private final long ingestFailedCount;
|
||||
|
||||
public Stats(StreamInput in) throws IOException {
|
||||
ingestCount = in.readVLong();
|
||||
ingestTimeInMillis = in.readVLong();
|
||||
ingestCurrent = in.readVLong();
|
||||
ingestFailedCount = in.readVLong();
|
||||
}
|
||||
|
||||
public Stats(long ingestCount, long ingestTimeInMillis, long ingestCurrent, long ingestFailedCount) {
|
||||
this.ingestCount = ingestCount;
|
||||
this.ingestTimeInMillis = ingestTimeInMillis;
|
||||
@ -144,11 +146,7 @@ public class IngestStats implements Writeable<IngestStats>, ToXContent {
|
||||
|
||||
@Override
|
||||
public Stats readFrom(StreamInput in) throws IOException {
|
||||
long ingestCount = in.readVLong();
|
||||
long ingestTimeInMillis = in.readVLong();
|
||||
long ingestCurrent = in.readVLong();
|
||||
long ingestFailedCount = in.readVLong();
|
||||
return new Stats(ingestCount, ingestTimeInMillis, ingestCurrent, ingestFailedCount);
|
||||
return new Stats(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.ingest.core;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
@ -32,17 +33,22 @@ import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.TreeSet;
|
||||
|
||||
public class IngestInfo implements Streamable, ToXContent {
|
||||
public class IngestInfo implements Writeable<IngestInfo>, ToXContent {
|
||||
|
||||
private Set<ProcessorInfo> processors;
|
||||
private final Set<ProcessorInfo> processors;
|
||||
|
||||
public IngestInfo() {
|
||||
processors = Collections.emptySet();
|
||||
public IngestInfo(StreamInput in) throws IOException {
|
||||
this(Collections.emptyList());
|
||||
final int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
processors.add(new ProcessorInfo(in));
|
||||
}
|
||||
}
|
||||
|
||||
public IngestInfo(List<ProcessorInfo> processors) {
|
||||
this.processors = new LinkedHashSet<>(processors);
|
||||
this.processors = new TreeSet<>(processors); // we use a treeset here to have a test-able / predictable order
|
||||
}
|
||||
|
||||
public Iterable<ProcessorInfo> getProcessors() {
|
||||
@ -54,15 +60,8 @@ public class IngestInfo implements Streamable, ToXContent {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
int size = in.readVInt();
|
||||
Set<ProcessorInfo> processors = new LinkedHashSet<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
ProcessorInfo info = new ProcessorInfo();
|
||||
info.readFrom(in);
|
||||
processors.add(info);
|
||||
}
|
||||
this.processors = processors;
|
||||
public IngestInfo readFrom(StreamInput in) throws IOException {
|
||||
return new IngestInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -22,16 +22,18 @@ package org.elasticsearch.ingest.core;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class ProcessorInfo implements Streamable, ToXContent {
|
||||
public class ProcessorInfo implements Writeable<ProcessorInfo>, ToXContent, Comparable<ProcessorInfo> {
|
||||
|
||||
private String type;
|
||||
private final String type;
|
||||
|
||||
ProcessorInfo() {
|
||||
public ProcessorInfo(StreamInput input) throws IOException {
|
||||
type = input.readString();
|
||||
}
|
||||
|
||||
public ProcessorInfo(String type) {
|
||||
@ -46,8 +48,8 @@ public class ProcessorInfo implements Streamable, ToXContent {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
this.type = in.readString();
|
||||
public ProcessorInfo readFrom(StreamInput in) throws IOException {
|
||||
return new ProcessorInfo(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -78,4 +80,9 @@ public class ProcessorInfo implements Streamable, ToXContent {
|
||||
public int hashCode() {
|
||||
return type.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(ProcessorInfo o) {
|
||||
return type.compareTo(o.type);
|
||||
}
|
||||
}
|
||||
|
@ -19,15 +19,33 @@
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.BytesRefs;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A sort builder to sort based on a document field.
|
||||
*/
|
||||
public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
||||
public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> implements SortBuilderParser<FieldSortBuilder> {
|
||||
static final FieldSortBuilder PROTOTYPE = new FieldSortBuilder("");
|
||||
public static final String NAME = "field_sort";
|
||||
public static final ParseField NESTED_PATH = new ParseField("nested_path");
|
||||
public static final ParseField NESTED_FILTER = new ParseField("nested_filter");
|
||||
public static final ParseField MISSING = new ParseField("missing");
|
||||
public static final ParseField ORDER = new ParseField("order");
|
||||
public static final ParseField REVERSE = new ParseField("reverse");
|
||||
public static final ParseField SORT_MODE = new ParseField("mode");
|
||||
public static final ParseField UNMAPPED_TYPE = new ParseField("unmapped_type");
|
||||
|
||||
private final String fieldName;
|
||||
|
||||
@ -41,10 +59,22 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
||||
|
||||
private String nestedPath;
|
||||
|
||||
/** Copy constructor. */
|
||||
public FieldSortBuilder(FieldSortBuilder template) {
|
||||
this(template.fieldName);
|
||||
this.order(template.order());
|
||||
this.missing(template.missing());
|
||||
this.unmappedType(template.unmappedType());
|
||||
this.sortMode(template.sortMode());
|
||||
this.setNestedFilter(template.getNestedFilter());
|
||||
this.setNestedPath(template.getNestedPath());
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new sort based on a document field.
|
||||
*
|
||||
* @param fieldName The field name.
|
||||
* @param fieldName
|
||||
* The field name.
|
||||
*/
|
||||
public FieldSortBuilder(String fieldName) {
|
||||
if (fieldName == null) {
|
||||
@ -53,21 +83,39 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
||||
this.fieldName = fieldName;
|
||||
}
|
||||
|
||||
/** Returns the document field this sort should be based on. */
|
||||
public String getFieldName() {
|
||||
return this.fieldName;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the value when a field is missing in a doc. Can also be set to <tt>_last</tt> or
|
||||
* <tt>_first</tt> to sort missing last or first respectively.
|
||||
*/
|
||||
public FieldSortBuilder missing(Object missing) {
|
||||
this.missing = missing;
|
||||
if (missing instanceof String) {
|
||||
this.missing = BytesRefs.toBytesRef(missing);
|
||||
} else {
|
||||
this.missing = missing;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Returns the value used when a field is missing in a doc. */
|
||||
public Object missing() {
|
||||
if (missing instanceof BytesRef) {
|
||||
return ((BytesRef) missing).utf8ToString();
|
||||
}
|
||||
return missing;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the type to use in case the current field is not mapped in an index.
|
||||
* Specifying a type tells Elasticsearch what type the sort values should have, which is important
|
||||
* for cross-index search, if there are sort fields that exist on some indices only.
|
||||
* If the unmapped type is <tt>null</tt> then query execution will fail if one or more indices
|
||||
* don't have a mapping for the current field.
|
||||
* Specifying a type tells Elasticsearch what type the sort values should
|
||||
* have, which is important for cross-index search, if there are sort fields
|
||||
* that exist on some indices only. If the unmapped type is <tt>null</tt>
|
||||
* then query execution will fail if one or more indices don't have a
|
||||
* mapping for the current field.
|
||||
*/
|
||||
public FieldSortBuilder unmappedType(String type) {
|
||||
this.unmappedType = type;
|
||||
@ -75,8 +123,19 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines what values to pick in the case a document contains multiple values for the targeted sort field.
|
||||
* Possible values: min, max, sum and avg
|
||||
* Returns the type to use in case the current field is not mapped in an
|
||||
* index.
|
||||
*/
|
||||
public String unmappedType() {
|
||||
return this.unmappedType;
|
||||
}
|
||||
|
||||
/**
|
||||
* Defines what values to pick in the case a document contains multiple
|
||||
* values for the targeted sort field. Possible values: min, max, sum and
|
||||
* avg
|
||||
*
|
||||
* TODO would love to see an enum here
|
||||
* <p>
|
||||
* The last two values are only applicable for number based fields.
|
||||
*/
|
||||
@ -86,44 +145,217 @@ public class FieldSortBuilder extends SortBuilder<FieldSortBuilder> {
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the nested filter that the nested objects should match with in order to be taken into account
|
||||
* for sorting.
|
||||
* Returns what values to pick in the case a document contains multiple
|
||||
* values for the targeted sort field.
|
||||
*/
|
||||
public String sortMode() {
|
||||
return this.sortMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the nested filter that the nested objects should match with in order
|
||||
* to be taken into account for sorting.
|
||||
*
|
||||
* TODO should the above getters and setters be deprecated/ changed in
|
||||
* favour of real getters and setters?
|
||||
*/
|
||||
public FieldSortBuilder setNestedFilter(QueryBuilder nestedFilter) {
|
||||
this.nestedFilter = nestedFilter;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the nested filter that the nested objects should match with in
|
||||
* order to be taken into account for sorting.
|
||||
*/
|
||||
public QueryBuilder getNestedFilter() {
|
||||
return this.nestedFilter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the nested path if sorting occurs on a field that is inside a nested object. By default when sorting on a
|
||||
* field inside a nested object, the nearest upper nested object is selected as nested path.
|
||||
* Sets the nested path if sorting occurs on a field that is inside a nested
|
||||
* object. By default when sorting on a field inside a nested object, the
|
||||
* nearest upper nested object is selected as nested path.
|
||||
*/
|
||||
public FieldSortBuilder setNestedPath(String nestedPath) {
|
||||
this.nestedPath = nestedPath;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the nested path if sorting occurs in a field that is inside a
|
||||
* nested object.
|
||||
*/
|
||||
public String getNestedPath() {
|
||||
return this.nestedPath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(fieldName);
|
||||
builder.field(ORDER_FIELD.getPreferredName(), order);
|
||||
if (missing != null) {
|
||||
builder.field("missing", missing);
|
||||
if (missing instanceof BytesRef) {
|
||||
builder.field(MISSING.getPreferredName(), ((BytesRef) missing).utf8ToString());
|
||||
} else {
|
||||
builder.field(MISSING.getPreferredName(), missing);
|
||||
}
|
||||
}
|
||||
if (unmappedType != null) {
|
||||
builder.field(SortParseElement.UNMAPPED_TYPE.getPreferredName(), unmappedType);
|
||||
builder.field(UNMAPPED_TYPE.getPreferredName(), unmappedType);
|
||||
}
|
||||
if (sortMode != null) {
|
||||
builder.field("mode", sortMode);
|
||||
builder.field(SORT_MODE.getPreferredName(), sortMode);
|
||||
}
|
||||
if (nestedFilter != null) {
|
||||
builder.field("nested_filter", nestedFilter, params);
|
||||
builder.field(NESTED_FILTER.getPreferredName(), nestedFilter, params);
|
||||
}
|
||||
if (nestedPath != null) {
|
||||
builder.field("nested_path", nestedPath);
|
||||
builder.field(NESTED_PATH.getPreferredName(), nestedPath);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FieldSortBuilder builder = (FieldSortBuilder) other;
|
||||
return (Objects.equals(this.fieldName, builder.fieldName) && Objects.equals(this.nestedFilter, builder.nestedFilter)
|
||||
&& Objects.equals(this.nestedPath, builder.nestedPath) && Objects.equals(this.missing, builder.missing)
|
||||
&& Objects.equals(this.order, builder.order) && Objects.equals(this.sortMode, builder.sortMode)
|
||||
&& Objects.equals(this.unmappedType, builder.unmappedType));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(this.fieldName, this.nestedFilter, this.nestedPath, this.missing, this.order, this.sortMode, this.unmappedType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(this.fieldName);
|
||||
if (this.nestedFilter != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeQuery(this.nestedFilter);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
out.writeOptionalString(this.nestedPath);
|
||||
out.writeGenericValue(this.missing);
|
||||
|
||||
if (this.order != null) {
|
||||
out.writeBoolean(true);
|
||||
this.order.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
|
||||
out.writeOptionalString(this.sortMode);
|
||||
out.writeOptionalString(this.unmappedType);
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldSortBuilder readFrom(StreamInput in) throws IOException {
|
||||
String fieldName = in.readString();
|
||||
FieldSortBuilder result = new FieldSortBuilder(fieldName);
|
||||
if (in.readBoolean()) {
|
||||
QueryBuilder query = in.readQuery();
|
||||
result.setNestedFilter(query);
|
||||
}
|
||||
result.setNestedPath(in.readOptionalString());
|
||||
result.missing(in.readGenericValue());
|
||||
|
||||
if (in.readBoolean()) {
|
||||
result.order(SortOrder.readOrderFrom(in));
|
||||
}
|
||||
result.sortMode(in.readOptionalString());
|
||||
result.unmappedType(in.readOptionalString());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FieldSortBuilder fromXContent(QueryParseContext context, String fieldName) throws IOException {
|
||||
XContentParser parser = context.parser();
|
||||
|
||||
QueryBuilder nestedFilter = null;
|
||||
String nestedPath = null;
|
||||
Object missing = null;
|
||||
SortOrder order = null;
|
||||
String sortMode = null;
|
||||
String unmappedType = null;
|
||||
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (context.parseFieldMatcher().match(currentFieldName, NESTED_FILTER)) {
|
||||
nestedFilter = context.parseInnerQueryBuilder();
|
||||
} else {
|
||||
throw new ParsingException(parser.getTokenLocation(), "Expected " + NESTED_FILTER.getPreferredName() + " element.");
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if (context.parseFieldMatcher().match(currentFieldName, NESTED_PATH)) {
|
||||
nestedPath = parser.text();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, MISSING)) {
|
||||
missing = parser.objectBytes();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, REVERSE)) {
|
||||
if (parser.booleanValue()) {
|
||||
order = SortOrder.DESC;
|
||||
}
|
||||
// else we keep the default ASC
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, ORDER)) {
|
||||
String sortOrder = parser.text();
|
||||
if ("asc".equals(sortOrder)) {
|
||||
order = SortOrder.ASC;
|
||||
} else if ("desc".equals(sortOrder)) {
|
||||
order = SortOrder.DESC;
|
||||
} else {
|
||||
throw new IllegalStateException("Sort order " + sortOrder + " not supported.");
|
||||
}
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, SORT_MODE)) {
|
||||
sortMode = parser.text();
|
||||
} else if (context.parseFieldMatcher().match(currentFieldName, UNMAPPED_TYPE)) {
|
||||
unmappedType = parser.text();
|
||||
} else {
|
||||
throw new IllegalArgumentException("Option " + currentFieldName + " not supported.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
FieldSortBuilder builder = new FieldSortBuilder(fieldName);
|
||||
if (nestedFilter != null) {
|
||||
builder.setNestedFilter(nestedFilter);
|
||||
}
|
||||
if (nestedPath != null) {
|
||||
builder.setNestedPath(nestedPath);
|
||||
}
|
||||
if (missing != null) {
|
||||
builder.missing(missing);
|
||||
}
|
||||
if (order != null) {
|
||||
builder.order(order);
|
||||
}
|
||||
if (sortMode != null) {
|
||||
builder.sortMode(sortMode);
|
||||
}
|
||||
if (unmappedType != null) {
|
||||
builder.unmappedType(unmappedType);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
@ -23,11 +23,9 @@ import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.geo.GeoDistance;
|
||||
import org.elasticsearch.common.geo.GeoPoint;
|
||||
import org.elasticsearch.common.geo.GeoUtils;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.DistanceUnit;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
@ -44,8 +42,7 @@ import java.util.Objects;
|
||||
/**
|
||||
* A geo distance based sorting on a geo point like field.
|
||||
*/
|
||||
public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder>
|
||||
implements ToXContent, NamedWriteable<GeoDistanceSortBuilder>, SortElementParserTemp<GeoDistanceSortBuilder> {
|
||||
public class GeoDistanceSortBuilder extends SortBuilder<GeoDistanceSortBuilder> implements SortBuilderParser<GeoDistanceSortBuilder> {
|
||||
public static final String NAME = "_geo_distance";
|
||||
public static final boolean DEFAULT_COERCE = false;
|
||||
public static final boolean DEFAULT_IGNORE_MALFORMED = false;
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.search.sort;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -35,7 +34,7 @@ import java.util.Objects;
|
||||
/**
|
||||
* A sort builder allowing to sort by score.
|
||||
*/
|
||||
public class ScoreSortBuilder extends SortBuilder<ScoreSortBuilder> implements NamedWriteable<ScoreSortBuilder>,
|
||||
public class ScoreSortBuilder extends SortBuilder<ScoreSortBuilder> implements SortBuilderParser<ScoreSortBuilder>,
|
||||
SortElementParserTemp<ScoreSortBuilder> {
|
||||
|
||||
private static final String NAME = "_score";
|
||||
|
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public interface SortBuilderParser<T extends ToXContent> extends NamedWriteable<T>, ToXContent {
|
||||
/**
|
||||
* Creates a new item from the json held by the {@link SortBuilderParser}
|
||||
* in {@link org.elasticsearch.common.xcontent.XContent} format
|
||||
*
|
||||
* @param context
|
||||
* the input parse context. The state on the parser contained in
|
||||
* this context will be changed as a side effect of this method
|
||||
* call
|
||||
* @return the new item
|
||||
*/
|
||||
SortBuilder fromXContent(QueryParseContext context, String elementName) throws IOException;
|
||||
}
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
@ -29,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.mapper.MapperParsingException;
|
||||
import org.elasticsearch.index.query.RangeQueryBuilder;
|
||||
@ -179,7 +181,6 @@ public class CreateIndexIT extends ESIntegTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/14932,https://github.com/elastic/elasticsearch/pull/15853" )
|
||||
public void testCreateAndDeleteIndexConcurrently() throws InterruptedException {
|
||||
createIndex("test");
|
||||
final AtomicInteger indexVersion = new AtomicInteger(0);
|
||||
@ -224,10 +225,14 @@ public class CreateIndexIT extends ESIntegTestCase {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
try {
|
||||
synchronized (indexVersionLock) {
|
||||
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).get();
|
||||
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get())
|
||||
.setTimeout(TimeValue.timeValueSeconds(10)).get();
|
||||
}
|
||||
} catch (IndexNotFoundException inf) {
|
||||
// fine
|
||||
} catch (UnavailableShardsException ex) {
|
||||
assertEquals(ex.getCause().getClass(), IndexNotFoundException.class);
|
||||
// fine we run into a delete index while retrying
|
||||
}
|
||||
}
|
||||
latch.await();
|
||||
|
@ -41,6 +41,7 @@ import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.IndexFolderUpgrader;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
@ -105,6 +106,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
|
||||
List<String> indexes;
|
||||
List<String> unsupportedIndexes;
|
||||
static String singleDataPathNodeName;
|
||||
static String multiDataPathNodeName;
|
||||
static Path singleDataPath;
|
||||
static Path[] multiDataPath;
|
||||
|
||||
@ -127,6 +130,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
|
||||
@AfterClass
|
||||
public static void tearDownStatics() {
|
||||
singleDataPathNodeName = null;
|
||||
multiDataPathNodeName = null;
|
||||
singleDataPath = null;
|
||||
multiDataPath = null;
|
||||
}
|
||||
@ -157,7 +162,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
InternalTestCluster.Async<String> multiDataPathNode = internalCluster().startNodeAsync(nodeSettings.build());
|
||||
|
||||
// find single data path dir
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNode.get()).nodeDataPaths();
|
||||
singleDataPathNodeName = singleDataPathNode.get();
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
singleDataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
assertFalse(Files.exists(singleDataPath));
|
||||
@ -165,7 +171,8 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
logger.info("--> Single data path: {}", singleDataPath);
|
||||
|
||||
// find multi data path dirs
|
||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNode.get()).nodeDataPaths();
|
||||
multiDataPathNodeName = multiDataPathNode.get();
|
||||
nodePaths = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName).nodeDataPaths();
|
||||
assertEquals(2, nodePaths.length);
|
||||
multiDataPath = new Path[] {nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER),
|
||||
nodePaths[1].resolve(NodeEnvironment.INDICES_FOLDER)};
|
||||
@ -178,6 +185,13 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
replicas.get(); // wait for replicas
|
||||
}
|
||||
|
||||
void upgradeIndexFolder() throws Exception {
|
||||
final NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, singleDataPathNodeName);
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment);
|
||||
final NodeEnvironment nodeEnv = internalCluster().getInstance(NodeEnvironment.class, multiDataPathNodeName);
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnv);
|
||||
}
|
||||
|
||||
String loadIndex(String indexFile) throws Exception {
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
@ -296,6 +310,10 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
|
||||
void assertOldIndexWorks(String index) throws Exception {
|
||||
Version version = extractVersion(index);
|
||||
String indexName = loadIndex(index);
|
||||
// we explicitly upgrade the index folders as these indices
|
||||
// are imported as dangling indices and not available on
|
||||
// node startup
|
||||
upgradeIndexFolder();
|
||||
importIndex(indexName);
|
||||
assertIndexSanity(indexName, version);
|
||||
assertBasicSearchWorks(indexName);
|
||||
|
@ -92,22 +92,22 @@ public class DiskUsageTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testFillShardLevelInfo() {
|
||||
final Index index = new Index("test", "_na_");
|
||||
final Index index = new Index("test", "0xdeadbeef");
|
||||
ShardRouting test_0 = ShardRouting.newUnassigned(index, 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
ShardRoutingHelper.initialize(test_0, "node1");
|
||||
ShardRoutingHelper.moveToStarted(test_0);
|
||||
Path test0Path = createTempDir().resolve("indices").resolve("test").resolve("0");
|
||||
Path test0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
|
||||
CommonStats commonStats0 = new CommonStats();
|
||||
commonStats0.store = new StoreStats(100, 1);
|
||||
ShardRouting test_1 = ShardRouting.newUnassigned(index, 1, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||
ShardRoutingHelper.initialize(test_1, "node2");
|
||||
ShardRoutingHelper.moveToStarted(test_1);
|
||||
Path test1Path = createTempDir().resolve("indices").resolve("test").resolve("1");
|
||||
Path test1Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("1");
|
||||
CommonStats commonStats1 = new CommonStats();
|
||||
commonStats1.store = new StoreStats(1000, 1);
|
||||
ShardStats[] stats = new ShardStats[] {
|
||||
new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, "0xdeadbeef", test_0.shardId()), commonStats0 , null),
|
||||
new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, "0xdeadbeef", test_1.shardId()), commonStats1 , null)
|
||||
new ShardStats(test_0, new ShardPath(false, test0Path, test0Path, test_0.shardId()), commonStats0 , null),
|
||||
new ShardStats(test_1, new ShardPath(false, test1Path, test1Path, test_1.shardId()), commonStats1 , null)
|
||||
};
|
||||
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> routingToPath = ImmutableOpenMap.builder();
|
||||
|
@ -22,8 +22,10 @@ package org.elasticsearch.cluster.allocation;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
@ -42,6 +44,7 @@ import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
@ -226,9 +229,10 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).id()).get(0).state(), equalTo(ShardRoutingState.STARTED));
|
||||
|
||||
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefresh(true).execute().actionGet();
|
||||
final Index index = resolveIndex("test");
|
||||
|
||||
logger.info("--> closing all nodes");
|
||||
Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId("test", "_na_", 0));
|
||||
Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId(index, 0));
|
||||
assertThat(FileSystemUtils.exists(shardLocation), equalTo(true)); // make sure the data is there!
|
||||
internalCluster().closeNonSharedNodes(false); // don't wipe data directories the index needs to be there!
|
||||
|
||||
|
@ -0,0 +1,366 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.util;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.gateway.MetaStateService;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class IndexFolderUpgraderTests extends ESTestCase {
|
||||
|
||||
private static MetaDataStateFormat<IndexMetaData> indexMetaDataStateFormat =
|
||||
new MetaDataStateFormat<IndexMetaData>(XContentType.SMILE, MetaStateService.INDEX_STATE_FILE_PREFIX) {
|
||||
|
||||
@Override
|
||||
public void toXContent(XContentBuilder builder, IndexMetaData state) throws IOException {
|
||||
IndexMetaData.Builder.toXContent(state, builder, ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
return IndexMetaData.Builder.fromXContent(parser);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* tests custom data paths are upgraded
|
||||
*/
|
||||
public void testUpgradeCustomDataPath() throws IOException {
|
||||
Path customPath = createTempDir();
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean())
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build();
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
int numIdxFiles = randomIntBetween(1, 5);
|
||||
int numTranslogFiles = randomIntBetween(1, 5);
|
||||
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
|
||||
writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
|
||||
IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv);
|
||||
helper.upgrade(indexSettings.getIndex().getName());
|
||||
checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tests upgrade on partially upgraded index, when we crash while upgrading
|
||||
*/
|
||||
public void testPartialUpgradeCustomDataPath() throws IOException {
|
||||
Path customPath = createTempDir();
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean())
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), customPath.toAbsolutePath().toString()).build();
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, customPath.toAbsolutePath().toString())
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
int numIdxFiles = randomIntBetween(1, 5);
|
||||
int numTranslogFiles = randomIntBetween(1, 5);
|
||||
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
|
||||
writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
|
||||
IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv) {
|
||||
@Override
|
||||
void upgrade(Index index, Path source, Path target) throws IOException {
|
||||
if(randomBoolean()) {
|
||||
throw new FileNotFoundException("simulated");
|
||||
}
|
||||
}
|
||||
};
|
||||
// only upgrade some paths
|
||||
try {
|
||||
helper.upgrade(index.getName());
|
||||
} catch (IOException e) {
|
||||
assertTrue(e instanceof FileNotFoundException);
|
||||
}
|
||||
helper = new IndexFolderUpgrader(settings, nodeEnv);
|
||||
// try to upgrade again
|
||||
helper.upgrade(indexSettings.getIndex().getName());
|
||||
checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpgrade() throws IOException {
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build();
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
int numIdxFiles = randomIntBetween(1, 5);
|
||||
int numTranslogFiles = randomIntBetween(1, 5);
|
||||
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
|
||||
writeIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
|
||||
IndexFolderUpgrader helper = new IndexFolderUpgrader(settings, nodeEnv);
|
||||
helper.upgrade(indexSettings.getIndex().getName());
|
||||
checkIndex(nodeEnv, indexSettings, numIdxFiles, numTranslogFiles);
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpgradeIndices() throws IOException {
|
||||
final Settings nodeSettings = Settings.builder()
|
||||
.put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), randomBoolean()).build();
|
||||
try (NodeEnvironment nodeEnv = newNodeEnvironment(nodeSettings)) {
|
||||
Map<IndexSettings, Tuple<Integer, Integer>> indexSettingsMap = new HashMap<>();
|
||||
for (int i = 0; i < randomIntBetween(2, 5); i++) {
|
||||
final Index index = new Index(randomAsciiOfLength(10), Strings.randomBase64UUID());
|
||||
Settings settings = Settings.builder()
|
||||
.put(nodeSettings)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_0_0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 5))
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
IndexMetaData indexState = IndexMetaData.builder(index.getName()).settings(settings).build();
|
||||
Tuple<Integer, Integer> fileCounts = new Tuple<>(randomIntBetween(1, 5), randomIntBetween(1, 5));
|
||||
IndexSettings indexSettings = new IndexSettings(indexState, nodeSettings);
|
||||
indexSettingsMap.put(indexSettings, fileCounts);
|
||||
writeIndex(nodeEnv, indexSettings, fileCounts.v1(), fileCounts.v2());
|
||||
}
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(nodeSettings, nodeEnv);
|
||||
for (Map.Entry<IndexSettings, Tuple<Integer, Integer>> entry : indexSettingsMap.entrySet()) {
|
||||
checkIndex(nodeEnv, entry.getKey(), entry.getValue().v1(), entry.getValue().v2());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Run upgrade on a real bwc index
|
||||
*/
|
||||
public void testUpgradeRealIndex() throws IOException, URISyntaxException {
|
||||
List<Path> indexes = new ArrayList<>();
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(getBwcIndicesPath(), "index-*.zip")) {
|
||||
for (Path path : stream) {
|
||||
indexes.add(path);
|
||||
}
|
||||
}
|
||||
CollectionUtil.introSort(indexes, (o1, o2) -> o1.getFileName().compareTo(o2.getFileName()));
|
||||
final Path path = randomFrom(indexes);
|
||||
final String indexName = path.getFileName().toString().replace(".zip", "").toLowerCase(Locale.ROOT);
|
||||
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
// decompress the index
|
||||
try (InputStream stream = Files.newInputStream(path)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
// check it is unique
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
Path[] list = FileSystemUtils.files(unzipDataDir);
|
||||
if (list.length != 1) {
|
||||
throw new IllegalStateException("Backwards index must contain exactly one cluster but was " + list.length);
|
||||
}
|
||||
// the bwc scripts packs the indices under this path
|
||||
Path src = list[0].resolve("nodes/0/indices/" + indexName);
|
||||
assertTrue("[" + path + "] missing index dir: " + src.toString(), Files.exists(src));
|
||||
final Path indicesPath = randomFrom(nodeEnvironment.nodePaths()).indicesPath;
|
||||
logger.info("--> injecting index [{}] into [{}]", indexName, indicesPath);
|
||||
OldIndexBackwardsCompatibilityIT.copyIndex(logger, src, indexName, indicesPath);
|
||||
IndexFolderUpgrader.upgradeIndicesIfNeeded(Settings.EMPTY, nodeEnvironment);
|
||||
|
||||
// ensure old index folder is deleted
|
||||
Set<String> indexFolders = nodeEnvironment.availableIndexFolders();
|
||||
assertEquals(indexFolders.size(), 1);
|
||||
|
||||
// ensure index metadata is moved
|
||||
IndexMetaData indexMetaData = indexMetaDataStateFormat.loadLatestState(logger,
|
||||
nodeEnvironment.resolveIndexFolder(indexFolders.iterator().next()));
|
||||
assertNotNull(indexMetaData);
|
||||
Index index = indexMetaData.getIndex();
|
||||
assertEquals(index.getName(), indexName);
|
||||
|
||||
Set<ShardId> shardIds = nodeEnvironment.findAllShardIds(index);
|
||||
// ensure all shards are moved
|
||||
assertEquals(shardIds.size(), indexMetaData.getNumberOfShards());
|
||||
for (ShardId shardId : shardIds) {
|
||||
final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnvironment, shardId,
|
||||
new IndexSettings(indexMetaData, Settings.EMPTY));
|
||||
final Path translog = shardPath.resolveTranslog();
|
||||
final Path idx = shardPath.resolveIndex();
|
||||
final Path state = shardPath.getShardStatePath().resolve(MetaDataStateFormat.STATE_DIR_NAME);
|
||||
assertTrue(shardPath.exists());
|
||||
assertTrue(Files.exists(translog));
|
||||
assertTrue(Files.exists(idx));
|
||||
assertTrue(Files.exists(state));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testNeedsUpgrade() throws IOException {
|
||||
final Index index = new Index("foo", Strings.randomBase64UUID());
|
||||
IndexMetaData indexState = IndexMetaData.builder(index.getName())
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, index.getUUID())
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))
|
||||
.numberOfShards(1)
|
||||
.numberOfReplicas(0)
|
||||
.build();
|
||||
try (NodeEnvironment nodeEnvironment = newNodeEnvironment()) {
|
||||
indexMetaDataStateFormat.write(indexState, 1, nodeEnvironment.indexPaths(index));
|
||||
assertFalse(IndexFolderUpgrader.needsUpgrade(index, index.getUUID()));
|
||||
}
|
||||
}
|
||||
|
||||
private void checkIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings,
|
||||
int numIdxFiles, int numTranslogFiles) throws IOException {
|
||||
final Index index = indexSettings.getIndex();
|
||||
// ensure index state can be loaded
|
||||
IndexMetaData loadLatestState = indexMetaDataStateFormat.loadLatestState(logger, nodeEnv.indexPaths(index));
|
||||
assertNotNull(loadLatestState);
|
||||
assertEquals(loadLatestState.getIndex(), index);
|
||||
for (int shardId = 0; shardId < indexSettings.getNumberOfShards(); shardId++) {
|
||||
// ensure shard path can be loaded
|
||||
ShardPath targetShardPath = ShardPath.loadShardPath(logger, nodeEnv, new ShardId(index, shardId), indexSettings);
|
||||
assertNotNull(targetShardPath);
|
||||
// ensure shard contents are copied over
|
||||
final Path translog = targetShardPath.resolveTranslog();
|
||||
final Path idx = targetShardPath.resolveIndex();
|
||||
|
||||
// ensure index and translog files are copied over
|
||||
assertEquals(numTranslogFiles, FileSystemUtils.files(translog).length);
|
||||
assertEquals(numIdxFiles, FileSystemUtils.files(idx).length);
|
||||
Path[] files = FileSystemUtils.files(translog);
|
||||
final HashSet<Path> translogFiles = new HashSet<>(Arrays.asList(files));
|
||||
for (int i = 0; i < numTranslogFiles; i++) {
|
||||
final String name = Integer.toString(i);
|
||||
translogFiles.contains(translog.resolve(name + ".translog"));
|
||||
byte[] content = Files.readAllBytes(translog.resolve(name + ".translog"));
|
||||
assertEquals(name , new String(content, StandardCharsets.UTF_8));
|
||||
}
|
||||
Path[] indexFileList = FileSystemUtils.files(idx);
|
||||
final HashSet<Path> idxFiles = new HashSet<>(Arrays.asList(indexFileList));
|
||||
for (int i = 0; i < numIdxFiles; i++) {
|
||||
final String name = Integer.toString(i);
|
||||
idxFiles.contains(idx.resolve(name + ".tst"));
|
||||
byte[] content = Files.readAllBytes(idx.resolve(name + ".tst"));
|
||||
assertEquals(name, new String(content, StandardCharsets.UTF_8));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void writeIndex(NodeEnvironment nodeEnv, IndexSettings indexSettings,
|
||||
int numIdxFiles, int numTranslogFiles) throws IOException {
|
||||
NodeEnvironment.NodePath[] nodePaths = nodeEnv.nodePaths();
|
||||
Path[] oldIndexPaths = new Path[nodePaths.length];
|
||||
for (int i = 0; i < nodePaths.length; i++) {
|
||||
oldIndexPaths[i] = nodePaths[i].indicesPath.resolve(indexSettings.getIndex().getName());
|
||||
}
|
||||
indexMetaDataStateFormat.write(indexSettings.getIndexMetaData(), 1, oldIndexPaths);
|
||||
for (int id = 0; id < indexSettings.getNumberOfShards(); id++) {
|
||||
Path oldIndexPath = randomFrom(oldIndexPaths);
|
||||
ShardId shardId = new ShardId(indexSettings.getIndex(), id);
|
||||
if (indexSettings.hasCustomDataPath()) {
|
||||
Path customIndexPath = nodeEnv.resolveBaseCustomLocation(indexSettings).resolve(indexSettings.getIndex().getName());
|
||||
writeShard(shardId, customIndexPath, numIdxFiles, numTranslogFiles);
|
||||
} else {
|
||||
writeShard(shardId, oldIndexPath, numIdxFiles, numTranslogFiles);
|
||||
}
|
||||
ShardStateMetaData state = new ShardStateMetaData(true, indexSettings.getUUID(), AllocationId.newInitializing());
|
||||
ShardStateMetaData.FORMAT.write(state, 1, oldIndexPath.resolve(String.valueOf(shardId.getId())));
|
||||
}
|
||||
}
|
||||
|
||||
private void writeShard(ShardId shardId, Path indexLocation,
|
||||
final int numIdxFiles, final int numTranslogFiles) throws IOException {
|
||||
Path oldShardDataPath = indexLocation.resolve(String.valueOf(shardId.getId()));
|
||||
final Path translogPath = oldShardDataPath.resolve(ShardPath.TRANSLOG_FOLDER_NAME);
|
||||
final Path idxPath = oldShardDataPath.resolve(ShardPath.INDEX_FOLDER_NAME);
|
||||
Files.createDirectories(translogPath);
|
||||
Files.createDirectories(idxPath);
|
||||
for (int i = 0; i < numIdxFiles; i++) {
|
||||
String filename = Integer.toString(i);
|
||||
try (BufferedWriter w = Files.newBufferedWriter(idxPath.resolve(filename + ".tst"),
|
||||
StandardCharsets.UTF_8)) {
|
||||
w.write(filename);
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < numTranslogFiles; i++) {
|
||||
String filename = Integer.toString(i);
|
||||
try (BufferedWriter w = Files.newBufferedWriter(translogPath.resolve(filename + ".translog"),
|
||||
StandardCharsets.UTF_8)) {
|
||||
w.write(filename);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -36,7 +37,11 @@ import org.elasticsearch.test.IndexSettingsModule;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
@ -129,21 +134,22 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
public void testShardLock() throws IOException {
|
||||
final NodeEnvironment env = newNodeEnvironment();
|
||||
|
||||
ShardLock fooLock = env.shardLock(new ShardId("foo", "_na_", 0));
|
||||
assertEquals(new ShardId("foo", "_na_", 0), fooLock.getShardId());
|
||||
Index index = new Index("foo", "fooUUID");
|
||||
ShardLock fooLock = env.shardLock(new ShardId(index, 0));
|
||||
assertEquals(new ShardId(index, 0), fooLock.getShardId());
|
||||
|
||||
try {
|
||||
env.shardLock(new ShardId("foo", "_na_", 0));
|
||||
env.shardLock(new ShardId(index, 0));
|
||||
fail("shard is locked");
|
||||
} catch (LockObtainFailedException ex) {
|
||||
// expected
|
||||
}
|
||||
for (Path path : env.indexPaths("foo")) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
Files.createDirectories(path.resolve("0"));
|
||||
Files.createDirectories(path.resolve("1"));
|
||||
}
|
||||
try {
|
||||
env.lockAllForIndex(new Index("foo", "_na_"), idxSettings, randomIntBetween(0, 10));
|
||||
env.lockAllForIndex(index, idxSettings, randomIntBetween(0, 10));
|
||||
fail("shard 0 is locked");
|
||||
} catch (LockObtainFailedException ex) {
|
||||
// expected
|
||||
@ -151,11 +157,11 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
|
||||
fooLock.close();
|
||||
// can lock again?
|
||||
env.shardLock(new ShardId("foo", "_na_", 0)).close();
|
||||
env.shardLock(new ShardId(index, 0)).close();
|
||||
|
||||
List<ShardLock> locks = env.lockAllForIndex(new Index("foo", "_na_"), idxSettings, randomIntBetween(0, 10));
|
||||
List<ShardLock> locks = env.lockAllForIndex(index, idxSettings, randomIntBetween(0, 10));
|
||||
try {
|
||||
env.shardLock(new ShardId("foo", "_na_", 0));
|
||||
env.shardLock(new ShardId(index, 0));
|
||||
fail("shard is locked");
|
||||
} catch (LockObtainFailedException ex) {
|
||||
// expected
|
||||
@ -165,18 +171,45 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
env.close();
|
||||
}
|
||||
|
||||
public void testGetAllIndices() throws Exception {
|
||||
public void testAvailableIndexFolders() throws Exception {
|
||||
final NodeEnvironment env = newNodeEnvironment();
|
||||
final int numIndices = randomIntBetween(1, 10);
|
||||
Set<String> actualPaths = new HashSet<>();
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
for (Path path : env.indexPaths("foo" + i)) {
|
||||
Files.createDirectories(path);
|
||||
Index index = new Index("foo" + i, "fooUUID" + i);
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
Files.createDirectories(path.resolve(MetaDataStateFormat.STATE_DIR_NAME));
|
||||
actualPaths.add(path.getFileName().toString());
|
||||
}
|
||||
}
|
||||
Set<String> indices = env.findAllIndices();
|
||||
assertEquals(indices.size(), numIndices);
|
||||
|
||||
assertThat(actualPaths, equalTo(env.availableIndexFolders()));
|
||||
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
|
||||
env.close();
|
||||
}
|
||||
|
||||
public void testResolveIndexFolders() throws Exception {
|
||||
final NodeEnvironment env = newNodeEnvironment();
|
||||
final int numIndices = randomIntBetween(1, 10);
|
||||
Map<String, List<Path>> actualIndexDataPaths = new HashMap<>();
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
assertTrue(indices.contains("foo" + i));
|
||||
Index index = new Index("foo" + i, "fooUUID" + i);
|
||||
Path[] indexPaths = env.indexPaths(index);
|
||||
for (Path path : indexPaths) {
|
||||
Files.createDirectories(path);
|
||||
String fileName = path.getFileName().toString();
|
||||
List<Path> paths = actualIndexDataPaths.get(fileName);
|
||||
if (paths == null) {
|
||||
paths = new ArrayList<>();
|
||||
}
|
||||
paths.add(path);
|
||||
actualIndexDataPaths.put(fileName, paths);
|
||||
}
|
||||
}
|
||||
for (Map.Entry<String, List<Path>> actualIndexDataPathEntry : actualIndexDataPaths.entrySet()) {
|
||||
List<Path> actual = actualIndexDataPathEntry.getValue();
|
||||
Path[] actualPaths = actual.toArray(new Path[actual.size()]);
|
||||
assertThat(actualPaths, equalTo(env.resolveIndexFolder(actualIndexDataPathEntry.getKey())));
|
||||
}
|
||||
assertTrue("LockedShards: " + env.lockedShards(), env.lockedShards().isEmpty());
|
||||
env.close();
|
||||
@ -184,44 +217,45 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
|
||||
public void testDeleteSafe() throws IOException, InterruptedException {
|
||||
final NodeEnvironment env = newNodeEnvironment();
|
||||
ShardLock fooLock = env.shardLock(new ShardId("foo", "_na_", 0));
|
||||
assertEquals(new ShardId("foo", "_na_", 0), fooLock.getShardId());
|
||||
final Index index = new Index("foo", "fooUUID");
|
||||
ShardLock fooLock = env.shardLock(new ShardId(index, 0));
|
||||
assertEquals(new ShardId(index, 0), fooLock.getShardId());
|
||||
|
||||
|
||||
for (Path path : env.indexPaths("foo")) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
Files.createDirectories(path.resolve("0"));
|
||||
Files.createDirectories(path.resolve("1"));
|
||||
}
|
||||
|
||||
try {
|
||||
env.deleteShardDirectorySafe(new ShardId("foo", "_na_", 0), idxSettings);
|
||||
env.deleteShardDirectorySafe(new ShardId(index, 0), idxSettings);
|
||||
fail("shard is locked");
|
||||
} catch (LockObtainFailedException ex) {
|
||||
// expected
|
||||
}
|
||||
|
||||
for (Path path : env.indexPaths("foo")) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
assertTrue(Files.exists(path.resolve("0")));
|
||||
assertTrue(Files.exists(path.resolve("1")));
|
||||
|
||||
}
|
||||
|
||||
env.deleteShardDirectorySafe(new ShardId("foo", "_na_", 1), idxSettings);
|
||||
env.deleteShardDirectorySafe(new ShardId(index, 1), idxSettings);
|
||||
|
||||
for (Path path : env.indexPaths("foo")) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
assertTrue(Files.exists(path.resolve("0")));
|
||||
assertFalse(Files.exists(path.resolve("1")));
|
||||
}
|
||||
|
||||
try {
|
||||
env.deleteIndexDirectorySafe(new Index("foo", "_na_"), randomIntBetween(0, 10), idxSettings);
|
||||
env.deleteIndexDirectorySafe(index, randomIntBetween(0, 10), idxSettings);
|
||||
fail("shard is locked");
|
||||
} catch (LockObtainFailedException ex) {
|
||||
// expected
|
||||
}
|
||||
fooLock.close();
|
||||
|
||||
for (Path path : env.indexPaths("foo")) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
assertTrue(Files.exists(path));
|
||||
}
|
||||
|
||||
@ -242,7 +276,7 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
start.await();
|
||||
try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "_na_", 0))) {
|
||||
try (ShardLock autoCloses = env.shardLock(new ShardId(index, 0))) {
|
||||
blockLatch.countDown();
|
||||
Thread.sleep(randomIntBetween(1, 10));
|
||||
}
|
||||
@ -257,11 +291,11 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
start.countDown();
|
||||
blockLatch.await();
|
||||
|
||||
env.deleteIndexDirectorySafe(new Index("foo", "_na_"), 5000, idxSettings);
|
||||
env.deleteIndexDirectorySafe(index, 5000, idxSettings);
|
||||
|
||||
assertNull(threadException.get());
|
||||
|
||||
for (Path path : env.indexPaths("foo")) {
|
||||
for (Path path : env.indexPaths(index)) {
|
||||
assertFalse(Files.exists(path));
|
||||
}
|
||||
latch.await();
|
||||
@ -300,7 +334,7 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
for (int i = 0; i < iters; i++) {
|
||||
int shard = randomIntBetween(0, counts.length - 1);
|
||||
try {
|
||||
try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "_na_", shard), scaledRandomIntBetween(0, 10))) {
|
||||
try (ShardLock autoCloses = env.shardLock(new ShardId("foo", "fooUUID", shard), scaledRandomIntBetween(0, 10))) {
|
||||
counts[shard].value++;
|
||||
countsAtomic[shard].incrementAndGet();
|
||||
assertEquals(flipFlop[shard].incrementAndGet(), 1);
|
||||
@ -334,37 +368,38 @@ public class NodeEnvironmentTests extends ESTestCase {
|
||||
String[] dataPaths = tmpPaths();
|
||||
NodeEnvironment env = newNodeEnvironment(dataPaths, "/tmp", Settings.EMPTY);
|
||||
|
||||
IndexSettings s1 = IndexSettingsModule.newIndexSettings("myindex", Settings.EMPTY);
|
||||
IndexSettings s2 = IndexSettingsModule.newIndexSettings("myindex", Settings.builder().put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build());
|
||||
Index index = new Index("myindex", "_na_");
|
||||
final Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "myindexUUID").build();
|
||||
IndexSettings s1 = IndexSettingsModule.newIndexSettings("myindex", indexSettings);
|
||||
IndexSettings s2 = IndexSettingsModule.newIndexSettings("myindex", Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_DATA_PATH, "/tmp/foo").build());
|
||||
Index index = new Index("myindex", "myindexUUID");
|
||||
ShardId sid = new ShardId(index, 0);
|
||||
|
||||
assertFalse("no settings should mean no custom data path", s1.hasCustomDataPath());
|
||||
assertTrue("settings with path_data should have a custom data path", s2.hasCustomDataPath());
|
||||
|
||||
assertThat(env.availableShardPaths(sid), equalTo(env.availableShardPaths(sid)));
|
||||
assertThat(env.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/0/myindex/0")));
|
||||
assertThat(env.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/0/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("shard paths with a custom data_path should contain only regular paths",
|
||||
env.availableShardPaths(sid),
|
||||
equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex/0")));
|
||||
equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("index paths uses the regular template",
|
||||
env.indexPaths(index.getName()), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex")));
|
||||
env.indexPaths(index), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID())));
|
||||
|
||||
env.close();
|
||||
NodeEnvironment env2 = newNodeEnvironment(dataPaths, "/tmp",
|
||||
Settings.builder().put(NodeEnvironment.ADD_NODE_ID_TO_CUSTOM_PATH.getKey(), false).build());
|
||||
|
||||
assertThat(env2.availableShardPaths(sid), equalTo(env2.availableShardPaths(sid)));
|
||||
assertThat(env2.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/myindex/0")));
|
||||
assertThat(env2.resolveCustomLocation(s2, sid), equalTo(PathUtils.get("/tmp/foo/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("shard paths with a custom data_path should contain only regular paths",
|
||||
env2.availableShardPaths(sid),
|
||||
equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex/0")));
|
||||
equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID() + "/0")));
|
||||
|
||||
assertThat("index paths uses the regular template",
|
||||
env2.indexPaths(index.getName()), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/myindex")));
|
||||
env2.indexPaths(index), equalTo(stringsToPaths(dataPaths, "elasticsearch/nodes/0/indices/" + index.getUUID())));
|
||||
|
||||
env2.close();
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import org.hamcrest.Matchers;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
@ -53,6 +54,47 @@ public class DanglingIndicesStateTests extends ESTestCase {
|
||||
assertTrue(danglingState.getDanglingIndices().isEmpty());
|
||||
}
|
||||
}
|
||||
public void testDanglingIndicesDiscovery() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
|
||||
DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
|
||||
|
||||
assertTrue(danglingState.getDanglingIndices().isEmpty());
|
||||
MetaData metaData = MetaData.builder().build();
|
||||
final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID");
|
||||
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build();
|
||||
metaStateService.writeIndex("test_write", dangledIndex);
|
||||
Map<Index, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
assertTrue(newDanglingIndices.containsKey(dangledIndex.getIndex()));
|
||||
metaData = MetaData.builder().put(dangledIndex, false).build();
|
||||
newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
assertFalse(newDanglingIndices.containsKey(dangledIndex.getIndex()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidIndexFolder() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
|
||||
DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
|
||||
|
||||
MetaData metaData = MetaData.builder().build();
|
||||
final String uuid = "test1UUID";
|
||||
final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, uuid);
|
||||
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build();
|
||||
metaStateService.writeIndex("test_write", dangledIndex);
|
||||
for (Path path : env.resolveIndexFolder(uuid)) {
|
||||
if (Files.exists(path)) {
|
||||
Files.move(path, path.resolveSibling("invalidUUID"), StandardCopyOption.ATOMIC_MOVE);
|
||||
}
|
||||
}
|
||||
try {
|
||||
danglingState.findNewDanglingIndices(metaData);
|
||||
fail("no exception thrown for invalid folder name");
|
||||
} catch (IllegalStateException e) {
|
||||
assertThat(e.getMessage(), equalTo("[invalidUUID] invalid index folder name, rename to [test1UUID]"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testDanglingProcessing() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
@ -61,15 +103,16 @@ public class DanglingIndicesStateTests extends ESTestCase {
|
||||
|
||||
MetaData metaData = MetaData.builder().build();
|
||||
|
||||
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build();
|
||||
metaStateService.writeIndex("test_write", dangledIndex, null);
|
||||
final Settings.Builder settings = Settings.builder().put(indexSettings).put(IndexMetaData.SETTING_INDEX_UUID, "test1UUID");
|
||||
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(settings).build();
|
||||
metaStateService.writeIndex("test_write", dangledIndex);
|
||||
|
||||
// check that several runs when not in the metadata still keep the dangled index around
|
||||
int numberOfChecks = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < numberOfChecks; i++) {
|
||||
Map<String, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
Map<Index, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
assertThat(newDanglingIndices.size(), equalTo(1));
|
||||
assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1"));
|
||||
assertThat(newDanglingIndices.keySet(), Matchers.hasItems(dangledIndex.getIndex()));
|
||||
assertTrue(danglingState.getDanglingIndices().isEmpty());
|
||||
}
|
||||
|
||||
@ -77,7 +120,7 @@ public class DanglingIndicesStateTests extends ESTestCase {
|
||||
danglingState.findNewAndAddDanglingIndices(metaData);
|
||||
|
||||
assertThat(danglingState.getDanglingIndices().size(), equalTo(1));
|
||||
assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1"));
|
||||
assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems(dangledIndex.getIndex()));
|
||||
}
|
||||
|
||||
// simulate allocation to the metadata
|
||||
@ -85,35 +128,15 @@ public class DanglingIndicesStateTests extends ESTestCase {
|
||||
|
||||
// check that several runs when in the metadata, but not cleaned yet, still keeps dangled
|
||||
for (int i = 0; i < numberOfChecks; i++) {
|
||||
Map<String, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
Map<Index, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
assertTrue(newDanglingIndices.isEmpty());
|
||||
|
||||
assertThat(danglingState.getDanglingIndices().size(), equalTo(1));
|
||||
assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems("test1"));
|
||||
assertThat(danglingState.getDanglingIndices().keySet(), Matchers.hasItems(dangledIndex.getIndex()));
|
||||
}
|
||||
|
||||
danglingState.cleanupAllocatedDangledIndices(metaData);
|
||||
assertTrue(danglingState.getDanglingIndices().isEmpty());
|
||||
}
|
||||
}
|
||||
|
||||
public void testRenameOfIndexState() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
MetaStateService metaStateService = new MetaStateService(Settings.EMPTY, env);
|
||||
DanglingIndicesState danglingState = new DanglingIndicesState(Settings.EMPTY, env, metaStateService, null);
|
||||
|
||||
MetaData metaData = MetaData.builder().build();
|
||||
|
||||
IndexMetaData dangledIndex = IndexMetaData.builder("test1").settings(indexSettings).build();
|
||||
metaStateService.writeIndex("test_write", dangledIndex, null);
|
||||
|
||||
for (Path path : env.indexPaths("test1")) {
|
||||
Files.move(path, path.getParent().resolve("test1_renamed"));
|
||||
}
|
||||
|
||||
Map<String, IndexMetaData> newDanglingIndices = danglingState.findNewDanglingIndices(metaData);
|
||||
assertThat(newDanglingIndices.size(), equalTo(1));
|
||||
assertThat(newDanglingIndices.keySet(), Matchers.hasItems("test1_renamed"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.gateway;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
|
||||
@ -68,14 +69,15 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
||||
index(index, "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
ensureGreen();
|
||||
assertIndexInMetaState(node1, index);
|
||||
assertIndexDirectoryDeleted(node2, index);
|
||||
Index resolveIndex = resolveIndex(index);
|
||||
assertIndexDirectoryDeleted(node2, resolveIndex);
|
||||
assertIndexInMetaState(masterNode, index);
|
||||
|
||||
logger.debug("relocating index...");
|
||||
client().admin().indices().prepareUpdateSettings(index).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node2)).get();
|
||||
client().admin().cluster().prepareHealth().setWaitForRelocatingShards(0).get();
|
||||
ensureGreen();
|
||||
assertIndexDirectoryDeleted(node1, index);
|
||||
assertIndexDirectoryDeleted(node1, resolveIndex);
|
||||
assertIndexInMetaState(node2, index);
|
||||
assertIndexInMetaState(masterNode, index);
|
||||
}
|
||||
@ -146,10 +148,10 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
||||
assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
}
|
||||
|
||||
protected void assertIndexDirectoryDeleted(final String nodeName, final String indexName) throws Exception {
|
||||
protected void assertIndexDirectoryDeleted(final String nodeName, final Index index) throws Exception {
|
||||
assertBusy(() -> {
|
||||
logger.info("checking if index directory exists...");
|
||||
assertFalse("Expecting index directory of " + indexName + " to be deleted from node " + nodeName, indexDirectoryExists(nodeName, indexName));
|
||||
assertFalse("Expecting index directory of " + index + " to be deleted from node " + nodeName, indexDirectoryExists(nodeName, index));
|
||||
}
|
||||
);
|
||||
}
|
||||
@ -168,9 +170,9 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
|
||||
private boolean indexDirectoryExists(String nodeName, String indexName) {
|
||||
private boolean indexDirectoryExists(String nodeName, Index index) {
|
||||
NodeEnvironment nodeEnv = ((InternalTestCluster) cluster()).getInstance(NodeEnvironment.class, nodeName);
|
||||
for (Path path : nodeEnv.indexPaths(indexName)) {
|
||||
for (Path path : nodeEnv.indexPaths(index)) {
|
||||
if (Files.exists(path)) {
|
||||
return true;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
@ -43,15 +44,15 @@ public class MetaStateServiceTests extends ESTestCase {
|
||||
MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
|
||||
|
||||
IndexMetaData index = IndexMetaData.builder("test1").settings(indexSettings).build();
|
||||
metaStateService.writeIndex("test_write", index, null);
|
||||
assertThat(metaStateService.loadIndexState("test1"), equalTo(index));
|
||||
metaStateService.writeIndex("test_write", index);
|
||||
assertThat(metaStateService.loadIndexState(index.getIndex()), equalTo(index));
|
||||
}
|
||||
}
|
||||
|
||||
public void testLoadMissingIndex() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
MetaStateService metaStateService = new MetaStateService(randomSettings(), env);
|
||||
assertThat(metaStateService.loadIndexState("test1"), nullValue());
|
||||
assertThat(metaStateService.loadIndexState(new Index("test1", "test1UUID")), nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
@ -94,7 +95,7 @@ public class MetaStateServiceTests extends ESTestCase {
|
||||
.build();
|
||||
|
||||
metaStateService.writeGlobalState("test_write", metaData);
|
||||
metaStateService.writeIndex("test_write", index, null);
|
||||
metaStateService.writeIndex("test_write", index);
|
||||
|
||||
MetaData loadedState = metaStateService.loadFullState();
|
||||
assertThat(loadedState.persistentSettings(), equalTo(metaData.persistentSettings()));
|
||||
|
@ -70,6 +70,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.NodeServicesProvider;
|
||||
@ -97,6 +98,7 @@ import org.elasticsearch.test.FieldMaskingReader;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
@ -141,25 +143,25 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
||||
|
||||
public void testWriteShardState() throws Exception {
|
||||
try (NodeEnvironment env = newNodeEnvironment()) {
|
||||
ShardId id = new ShardId("foo", "_na_", 1);
|
||||
ShardId id = new ShardId("foo", "fooUUID", 1);
|
||||
long version = between(1, Integer.MAX_VALUE / 2);
|
||||
boolean primary = randomBoolean();
|
||||
AllocationId allocationId = randomBoolean() ? null : randomAllocationId();
|
||||
ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "foo", allocationId);
|
||||
ShardStateMetaData state1 = new ShardStateMetaData(version, primary, "fooUUID", allocationId);
|
||||
write(state1, env.availableShardPaths(id));
|
||||
ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(id));
|
||||
assertEquals(shardStateMetaData, state1);
|
||||
|
||||
ShardStateMetaData state2 = new ShardStateMetaData(version, primary, "foo", allocationId);
|
||||
ShardStateMetaData state2 = new ShardStateMetaData(version, primary, "fooUUID", allocationId);
|
||||
write(state2, env.availableShardPaths(id));
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(id));
|
||||
assertEquals(shardStateMetaData, state1);
|
||||
|
||||
ShardStateMetaData state3 = new ShardStateMetaData(version + 1, primary, "foo", allocationId);
|
||||
ShardStateMetaData state3 = new ShardStateMetaData(version + 1, primary, "fooUUID", allocationId);
|
||||
write(state3, env.availableShardPaths(id));
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(id));
|
||||
assertEquals(shardStateMetaData, state3);
|
||||
assertEquals("foo", state3.indexUUID);
|
||||
assertEquals("fooUUID", state3.indexUUID);
|
||||
}
|
||||
}
|
||||
|
||||
@ -167,7 +169,9 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
|
||||
Path[] shardPaths = env.availableShardPaths(new ShardId("test", "_na_", 0));
|
||||
ClusterService cs = getInstanceFromNode(ClusterService.class);
|
||||
final Index index = cs.state().metaData().index("test").getIndex();
|
||||
Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
|
||||
logger.info("--> paths: [{}]", (Object)shardPaths);
|
||||
// Should not be able to acquire the lock because it's already open
|
||||
try {
|
||||
@ -179,7 +183,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
||||
// Test without the regular shard lock to assume we can acquire it
|
||||
// (worst case, meaning that the shard lock could be acquired and
|
||||
// we're green to delete the shard's directory)
|
||||
ShardLock sLock = new DummyShardLock(new ShardId("test", "_na_", 0));
|
||||
ShardLock sLock = new DummyShardLock(new ShardId(index, 0));
|
||||
try {
|
||||
env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY));
|
||||
fail("should not have been able to delete the directory");
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
@ -42,13 +43,13 @@ public class ShardPathTests extends ESTestCase {
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF")
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
|
||||
Settings settings = builder.build();
|
||||
ShardId shardId = new ShardId("foo", "_na_", 0);
|
||||
ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
Path path = randomFrom(paths);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), 2, path);
|
||||
ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
|
||||
assertEquals(path, shardPath.getDataPath());
|
||||
assertEquals("0xDEADBEEF", shardPath.getIndexUUID());
|
||||
assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID());
|
||||
assertEquals("foo", shardPath.getShardId().getIndexName());
|
||||
assertEquals(path.resolve("translog"), shardPath.resolveTranslog());
|
||||
assertEquals(path.resolve("index"), shardPath.resolveIndex());
|
||||
@ -57,14 +58,15 @@ public class ShardPathTests extends ESTestCase {
|
||||
|
||||
public void testFailLoadShardPathOnMultiState() throws IOException {
|
||||
try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF")
|
||||
final String indexUUID = "0xDEADBEEF";
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, indexUUID)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
|
||||
Settings settings = builder.build();
|
||||
ShardId shardId = new ShardId("foo", "_na_", 0);
|
||||
ShardId shardId = new ShardId("foo", indexUUID, 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
assumeTrue("This test tests multi data.path but we only got one", paths.length > 1);
|
||||
int id = randomIntBetween(1, 10);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, "0xDEADBEEF", AllocationId.newInitializing()), id, paths);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(id, true, indexUUID, AllocationId.newInitializing()), id, paths);
|
||||
ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings));
|
||||
fail("Expected IllegalStateException");
|
||||
} catch (IllegalStateException e) {
|
||||
@ -77,7 +79,7 @@ public class ShardPathTests extends ESTestCase {
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "foobar")
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
|
||||
Settings settings = builder.build();
|
||||
ShardId shardId = new ShardId("foo", "_na_", 0);
|
||||
ShardId shardId = new ShardId("foo", "foobar", 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
Path path = randomFrom(paths);
|
||||
int id = randomIntBetween(1, 10);
|
||||
@ -90,9 +92,10 @@ public class ShardPathTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testIllegalCustomDataPath() {
|
||||
final Path path = createTempDir().resolve("foo").resolve("0");
|
||||
Index index = new Index("foo", "foo");
|
||||
final Path path = createTempDir().resolve(index.getUUID()).resolve("0");
|
||||
try {
|
||||
new ShardPath(true, path, path, "foo", new ShardId("foo", "_na_", 0));
|
||||
new ShardPath(true, path, path, new ShardId(index, 0));
|
||||
fail("Expected IllegalArgumentException");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths"));
|
||||
@ -100,8 +103,9 @@ public class ShardPathTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testValidCtor() {
|
||||
final Path path = createTempDir().resolve("foo").resolve("0");
|
||||
ShardPath shardPath = new ShardPath(false, path, path, "foo", new ShardId("foo", "_na_", 0));
|
||||
Index index = new Index("foo", "foo");
|
||||
final Path path = createTempDir().resolve(index.getUUID()).resolve("0");
|
||||
ShardPath shardPath = new ShardPath(false, path, path, new ShardId(index, 0));
|
||||
assertFalse(shardPath.isCustomDataPath());
|
||||
assertEquals(shardPath.getDataPath(), path);
|
||||
assertEquals(shardPath.getShardStatePath(), path);
|
||||
@ -111,8 +115,9 @@ public class ShardPathTests extends ESTestCase {
|
||||
boolean useCustomDataPath = randomBoolean();
|
||||
final Settings indexSettings;
|
||||
final Settings nodeSettings;
|
||||
final String indexUUID = "0xDEADBEEF";
|
||||
Settings.Builder indexSettingsBuilder = settingsBuilder()
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF")
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, indexUUID)
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT);
|
||||
final Path customPath;
|
||||
if (useCustomDataPath) {
|
||||
@ -132,10 +137,10 @@ public class ShardPathTests extends ESTestCase {
|
||||
nodeSettings = Settings.EMPTY;
|
||||
}
|
||||
try (final NodeEnvironment env = newNodeEnvironment(nodeSettings)) {
|
||||
ShardId shardId = new ShardId("foo", "_na_", 0);
|
||||
ShardId shardId = new ShardId("foo", indexUUID, 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
Path path = randomFrom(paths);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, "0xDEADBEEF", AllocationId.newInitializing()), 2, path);
|
||||
ShardStateMetaData.FORMAT.write(new ShardStateMetaData(2, true, indexUUID, AllocationId.newInitializing()), 2, path);
|
||||
ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), indexSettings));
|
||||
boolean found = false;
|
||||
for (Path p : env.nodeDataPaths()) {
|
||||
|
@ -49,6 +49,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.gateway.PrimaryShardAllocator;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MergePolicyConfig;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
@ -571,8 +572,9 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
private Map<String, List<Path>> findFilesToCorruptForReplica() throws IOException {
|
||||
Map<String, List<Path>> filesToNodes = new HashMap<>();
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
Index test = state.metaData().index("test").getIndex();
|
||||
for (ShardRouting shardRouting : state.getRoutingTable().allShards("test")) {
|
||||
if (shardRouting.primary() == true) {
|
||||
if (shardRouting.primary()) {
|
||||
continue;
|
||||
}
|
||||
assertTrue(shardRouting.assignedToNode());
|
||||
@ -582,8 +584,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
filesToNodes.put(nodeStats.getNode().getName(), files);
|
||||
for (FsInfo.Path info : nodeStats.getFs()) {
|
||||
String path = info.getPath();
|
||||
final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/index";
|
||||
Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
|
||||
Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
|
||||
if (Files.exists(file)) { // multi data path might only have one path in use
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
|
||||
for (Path item : stream) {
|
||||
@ -604,6 +605,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
|
||||
private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException {
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
Index test = state.metaData().index("test").getIndex();
|
||||
GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
|
||||
List<ShardIterator> iterators = iterableAsArrayList(shardIterators);
|
||||
ShardIterator shardIterator = RandomPicks.randomFrom(getRandom(), iterators);
|
||||
@ -616,8 +618,7 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
|
||||
for (FsInfo.Path info : nodeStatses.getNodes()[0].getFs()) {
|
||||
String path = info.getPath();
|
||||
final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/index";
|
||||
Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
|
||||
Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
|
||||
if (Files.exists(file)) { // multi data path might only have one path in use
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
|
||||
for (Path item : stream) {
|
||||
@ -676,12 +677,13 @@ public class CorruptedFileIT extends ESIntegTestCase {
|
||||
|
||||
public List<Path> listShardFiles(ShardRouting routing) throws IOException {
|
||||
NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(routing.currentNodeId()).setFs(true).get();
|
||||
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
final Index test = state.metaData().index("test").getIndex();
|
||||
assertThat(routing.toString(), nodeStatses.getNodes().length, equalTo(1));
|
||||
List<Path> files = new ArrayList<>();
|
||||
for (FsInfo.Path info : nodeStatses.getNodes()[0].getFs()) {
|
||||
String path = info.getPath();
|
||||
Path file = PathUtils.get(path).resolve("indices/test/" + Integer.toString(routing.getId()) + "/index");
|
||||
Path file = PathUtils.get(path).resolve("indices/" + test.getUUID() + "/" + Integer.toString(routing.getId()) + "/index");
|
||||
if (Files.exists(file)) { // multi data path might only have one path in use
|
||||
try (DirectoryStream<Path> stream = Files.newDirectoryStream(file)) {
|
||||
for (Path item : stream) {
|
||||
|
@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MockEngineFactoryPlugin;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
@ -110,6 +111,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
||||
private void corruptRandomTranslogFiles() throws IOException {
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
GroupShardsIterator shardIterators = state.getRoutingNodes().getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false);
|
||||
final Index test = state.metaData().index("test").getIndex();
|
||||
List<ShardIterator> iterators = iterableAsArrayList(shardIterators);
|
||||
ShardIterator shardIterator = RandomPicks.randomFrom(getRandom(), iterators);
|
||||
ShardRouting shardRouting = shardIterator.nextOrNull();
|
||||
@ -121,7 +123,7 @@ public class CorruptedTranslogIT extends ESIntegTestCase {
|
||||
Set<Path> files = new TreeSet<>(); // treeset makes sure iteration order is deterministic
|
||||
for (FsInfo.Path fsPath : nodeStatses.getNodes()[0].getFs()) {
|
||||
String path = fsPath.getPath();
|
||||
final String relativeDataLocationPath = "indices/test/" + Integer.toString(shardRouting.getId()) + "/translog";
|
||||
final String relativeDataLocationPath = "indices/"+ test.getUUID() +"/" + Integer.toString(shardRouting.getId()) + "/translog";
|
||||
Path file = PathUtils.get(path).resolve(relativeDataLocationPath);
|
||||
if (Files.exists(file)) {
|
||||
logger.info("--> path: {}", file);
|
||||
|
@ -46,9 +46,9 @@ public class FsDirectoryServiceTests extends ESTestCase {
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
|
||||
IndexStoreConfig config = new IndexStoreConfig(build);
|
||||
IndexStore store = new IndexStore(settings, config);
|
||||
Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0");
|
||||
Files.createDirectories(tempDir);
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0));
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0));
|
||||
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
|
||||
Directory directory = fsDirectoryService.newDirectory();
|
||||
assertTrue(directory instanceof RateLimitedFSDirectory);
|
||||
@ -62,9 +62,9 @@ public class FsDirectoryServiceTests extends ESTestCase {
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
|
||||
IndexStoreConfig config = new IndexStoreConfig(build);
|
||||
IndexStore store = new IndexStore(settings, config);
|
||||
Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0");
|
||||
Files.createDirectories(tempDir);
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, settings.getUUID(), new ShardId(settings.getIndex(), 0));
|
||||
ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0));
|
||||
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
|
||||
Directory directory = fsDirectoryService.newDirectory();
|
||||
assertTrue(directory instanceof RateLimitedFSDirectory);
|
||||
|
@ -47,13 +47,14 @@ import java.util.Locale;
|
||||
public class IndexStoreTests extends ESTestCase {
|
||||
|
||||
public void testStoreDirectory() throws IOException {
|
||||
final Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
Index index = new Index("foo", "fooUUID");
|
||||
final Path tempDir = createTempDir().resolve(index.getUUID()).resolve("0");
|
||||
final IndexModule.Type[] values = IndexModule.Type.values();
|
||||
final IndexModule.Type type = RandomPicks.randomFrom(random(), values);
|
||||
Settings settings = Settings.settingsBuilder().put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), type.name().toLowerCase(Locale.ROOT))
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build();
|
||||
IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings);
|
||||
FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, "foo", new ShardId("foo", "_na_", 0)));
|
||||
FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0)));
|
||||
try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) {
|
||||
switch (type) {
|
||||
case NIOFS:
|
||||
@ -84,8 +85,9 @@ public class IndexStoreTests extends ESTestCase {
|
||||
}
|
||||
|
||||
public void testStoreDirectoryDefault() throws IOException {
|
||||
final Path tempDir = createTempDir().resolve("foo").resolve("0");
|
||||
FsDirectoryService service = new FsDirectoryService(IndexSettingsModule.newIndexSettings("foo", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()), null, new ShardPath(false, tempDir, tempDir, "foo", new ShardId("foo", "_na_", 0)));
|
||||
Index index = new Index("bar", "foo");
|
||||
final Path tempDir = createTempDir().resolve(index.getUUID()).resolve("0");
|
||||
FsDirectoryService service = new FsDirectoryService(IndexSettingsModule.newIndexSettings("bar", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build()), null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0)));
|
||||
try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) {
|
||||
if (Constants.WINDOWS) {
|
||||
assertTrue(directory.toString(), directory instanceof MMapDirectory || directory instanceof SimpleFSDirectory);
|
||||
|
@ -112,12 +112,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1))
|
||||
);
|
||||
ensureGreen("test");
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
Index index = state.metaData().index("test").getIndex();
|
||||
|
||||
logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true));
|
||||
|
||||
logger.info("--> starting node server3");
|
||||
final String node_3 = internalCluster().startNode(Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false));
|
||||
@ -128,12 +130,12 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
.get();
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_3, index)), equalTo(false));
|
||||
|
||||
logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish");
|
||||
|
||||
@ -161,12 +163,12 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
.get();
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
|
||||
assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false));
|
||||
assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(true));
|
||||
assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false));
|
||||
assertThat(waitForIndexDeletion(node_1, index), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_3, index)), equalTo(true));
|
||||
|
||||
}
|
||||
|
||||
@ -180,16 +182,18 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
);
|
||||
ensureGreen("test");
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
Index index = state.metaData().index("test").getIndex();
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
|
||||
|
||||
final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build());
|
||||
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
|
||||
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false));
|
||||
|
||||
// add a transport delegate that will prevent the shard active request to succeed the first time after relocation has finished.
|
||||
// node_1 will then wait for the next cluster state change before it tries a next attempt to delete the shard.
|
||||
@ -220,14 +224,14 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
// it must still delete the shard, even if it cannot find it anymore in indicesservice
|
||||
client().admin().indices().prepareDelete("test").get();
|
||||
|
||||
assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false));
|
||||
assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(false));
|
||||
assertThat(waitForShardDeletion(node_2, "test", 0), equalTo(false));
|
||||
assertThat(waitForIndexDeletion(node_2, "test"), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false));
|
||||
assertThat(waitForShardDeletion(node_1, index, 0), equalTo(false));
|
||||
assertThat(waitForIndexDeletion(node_1, index), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_1, index)), equalTo(false));
|
||||
assertThat(waitForShardDeletion(node_2, index, 0), equalTo(false));
|
||||
assertThat(waitForIndexDeletion(node_2, index), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(false));
|
||||
assertThat(Files.exists(indexDirectory(node_2, index)), equalTo(false));
|
||||
}
|
||||
|
||||
public void testShardsCleanup() throws Exception {
|
||||
@ -241,9 +245,11 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
);
|
||||
ensureGreen("test");
|
||||
|
||||
ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
Index index = state.metaData().index("test").getIndex();
|
||||
logger.info("--> making sure that shard and its replica are allocated on node_1 and node_2");
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_2, index, 0)), equalTo(true));
|
||||
|
||||
logger.info("--> starting node server3");
|
||||
String node_3 = internalCluster().startNode();
|
||||
@ -255,10 +261,10 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
assertThat(clusterHealth.isTimedOut(), equalTo(false));
|
||||
|
||||
logger.info("--> making sure that shard is not allocated on server3");
|
||||
assertThat(waitForShardDeletion(node_3, "test", 0), equalTo(false));
|
||||
assertThat(waitForShardDeletion(node_3, index, 0), equalTo(false));
|
||||
|
||||
Path server2Shard = shardDirectory(node_2, "test", 0);
|
||||
logger.info("--> stopping node {}", node_2);
|
||||
Path server2Shard = shardDirectory(node_2, index, 0);
|
||||
logger.info("--> stopping node " + node_2);
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node_2));
|
||||
|
||||
logger.info("--> running cluster_health");
|
||||
@ -273,9 +279,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
assertThat(Files.exists(server2Shard), equalTo(true));
|
||||
|
||||
logger.info("--> making sure that shard and its replica exist on server1, server2 and server3");
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(server2Shard), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true));
|
||||
|
||||
logger.info("--> starting node node_4");
|
||||
final String node_4 = internalCluster().startNode();
|
||||
@ -284,9 +290,9 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> making sure that shard and its replica are allocated on server1 and server3 but not on server2");
|
||||
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true));
|
||||
assertThat(waitForShardDeletion(node_4, "test", 0), equalTo(false));
|
||||
assertThat(Files.exists(shardDirectory(node_1, index, 0)), equalTo(true));
|
||||
assertThat(Files.exists(shardDirectory(node_3, index, 0)), equalTo(true));
|
||||
assertThat(waitForShardDeletion(node_4, index, 0), equalTo(false));
|
||||
}
|
||||
|
||||
public void testShardActiveElsewhereDoesNotDeleteAnother() throws Exception {
|
||||
@ -426,30 +432,30 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
|
||||
waitNoPendingTasksOnAll();
|
||||
logger.info("Checking if shards aren't removed");
|
||||
for (int shard : node2Shards) {
|
||||
assertTrue(waitForShardDeletion(nonMasterNode, "test", shard));
|
||||
assertTrue(waitForShardDeletion(nonMasterNode, index, shard));
|
||||
}
|
||||
}
|
||||
|
||||
private Path indexDirectory(String server, String index) {
|
||||
private Path indexDirectory(String server, Index index) {
|
||||
NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server);
|
||||
final Path[] paths = env.indexPaths(index);
|
||||
assert paths.length == 1;
|
||||
return paths[0];
|
||||
}
|
||||
|
||||
private Path shardDirectory(String server, String index, int shard) {
|
||||
private Path shardDirectory(String server, Index index, int shard) {
|
||||
NodeEnvironment env = internalCluster().getInstance(NodeEnvironment.class, server);
|
||||
final Path[] paths = env.availableShardPaths(new ShardId(index, "_na_", shard));
|
||||
final Path[] paths = env.availableShardPaths(new ShardId(index, shard));
|
||||
assert paths.length == 1;
|
||||
return paths[0];
|
||||
}
|
||||
|
||||
private boolean waitForShardDeletion(final String server, final String index, final int shard) throws InterruptedException {
|
||||
private boolean waitForShardDeletion(final String server, final Index index, final int shard) throws InterruptedException {
|
||||
awaitBusy(() -> !Files.exists(shardDirectory(server, index, shard)));
|
||||
return Files.exists(shardDirectory(server, index, shard));
|
||||
}
|
||||
|
||||
private boolean waitForIndexDeletion(final String server, final String index) throws InterruptedException {
|
||||
private boolean waitForIndexDeletion(final String server, final Index index) throws InterruptedException {
|
||||
awaitBusy(() -> !Files.exists(indexDirectory(server, index)));
|
||||
return Files.exists(indexDirectory(server, index));
|
||||
}
|
||||
|
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.ingest;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
public class IngestStatsTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
IngestStats.Stats total = new IngestStats.Stats(5, 10, 20, 30);
|
||||
IngestStats.Stats foo = new IngestStats.Stats(50, 100, 200, 300);
|
||||
IngestStats ingestStats = new IngestStats(total, Collections.singletonMap("foo", foo));
|
||||
IngestStats serialize = serialize(ingestStats);
|
||||
assertNotSame(serialize, ingestStats);
|
||||
assertNotSame(serialize.getTotalStats(), total);
|
||||
assertEquals(total.getIngestCount(), serialize.getTotalStats().getIngestCount());
|
||||
assertEquals(total.getIngestFailedCount(), serialize.getTotalStats().getIngestFailedCount());
|
||||
assertEquals(total.getIngestTimeInMillis(), serialize.getTotalStats().getIngestTimeInMillis());
|
||||
assertEquals(total.getIngestCurrent(), serialize.getTotalStats().getIngestCurrent());
|
||||
|
||||
assertEquals(ingestStats.getStatsPerPipeline().size(), 1);
|
||||
assertTrue(ingestStats.getStatsPerPipeline().containsKey("foo"));
|
||||
|
||||
Map<String, IngestStats.Stats> left = ingestStats.getStatsPerPipeline();
|
||||
Map<String, IngestStats.Stats> right = serialize.getStatsPerPipeline();
|
||||
|
||||
assertEquals(right.size(), 1);
|
||||
assertTrue(right.containsKey("foo"));
|
||||
assertEquals(left.size(), 1);
|
||||
assertTrue(left.containsKey("foo"));
|
||||
IngestStats.Stats leftStats = left.get("foo");
|
||||
IngestStats.Stats rightStats = right.get("foo");
|
||||
assertEquals(leftStats.getIngestCount(), rightStats.getIngestCount());
|
||||
assertEquals(leftStats.getIngestFailedCount(), rightStats.getIngestFailedCount());
|
||||
assertEquals(leftStats.getIngestTimeInMillis(), rightStats.getIngestTimeInMillis());
|
||||
assertEquals(leftStats.getIngestCurrent(), rightStats.getIngestCurrent());
|
||||
}
|
||||
|
||||
private <T> T serialize(Writeable<T> writeable) throws IOException {
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
writeable.writeTo(out);
|
||||
StreamInput in = StreamInput.wrap(out.bytes());
|
||||
return writeable.readFrom(in);
|
||||
}
|
||||
}
|
@ -47,6 +47,7 @@ import org.elasticsearch.transport.TransportInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -137,7 +138,7 @@ public class NodeInfoStreamingTests extends ESTestCase {
|
||||
PluginsAndModules plugins = new PluginsAndModules();
|
||||
plugins.addModule(DummyPluginInfo.INSTANCE);
|
||||
plugins.addPlugin(DummyPluginInfo.INSTANCE);
|
||||
IngestInfo ingestInfo = new IngestInfo();
|
||||
IngestInfo ingestInfo = new IngestInfo(Collections.emptyList());
|
||||
return new NodeInfo(VersionUtils.randomVersion(random()), build, node, serviceAttributes, settings, osInfo, process, jvm, threadPoolInfo, transport, htttpInfo, plugins, ingestInfo);
|
||||
}
|
||||
}
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -43,7 +43,7 @@ import java.io.IOException;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public abstract class AbstractSortTestCase<T extends SortBuilder & NamedWriteable<T> & SortElementParserTemp<T>> extends ESTestCase {
|
||||
public abstract class AbstractSortTestCase<T extends SortBuilder & SortBuilderParser<T>> extends ESTestCase {
|
||||
|
||||
protected static NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
@ -55,6 +55,7 @@ public abstract class AbstractSortTestCase<T extends SortBuilder & NamedWriteabl
|
||||
namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
namedWriteableRegistry.registerPrototype(SortBuilder.class, GeoDistanceSortBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(SortBuilder.class, ScoreSortBuilder.PROTOTYPE);
|
||||
namedWriteableRegistry.registerPrototype(SortBuilder.class, FieldSortBuilder.PROTOTYPE);
|
||||
indicesQueriesRegistry = new SearchModule(Settings.EMPTY, namedWriteableRegistry).buildQueryParserRegistry();
|
||||
}
|
||||
|
||||
@ -154,7 +155,8 @@ public abstract class AbstractSortTestCase<T extends SortBuilder & NamedWriteabl
|
||||
try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) {
|
||||
T prototype = (T) namedWriteableRegistry.getPrototype(SortBuilder.class,
|
||||
original.getWriteableName());
|
||||
return prototype.readFrom(in);
|
||||
T copy = (T) prototype.readFrom(in);
|
||||
return copy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,85 @@
|
||||
/*
|
||||
x * Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.sort;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class FieldSortBuilderTests extends AbstractSortTestCase<FieldSortBuilder> {
|
||||
|
||||
@Override
|
||||
protected FieldSortBuilder createTestItem() {
|
||||
String fieldName = randomAsciiOfLengthBetween(1, 10);
|
||||
FieldSortBuilder builder = new FieldSortBuilder(fieldName);
|
||||
if (randomBoolean()) {
|
||||
builder.order(RandomSortDataGenerator.order(builder.order()));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.missing(RandomSortDataGenerator.missing(builder.missing()));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.unmappedType(RandomSortDataGenerator.randomAscii(builder.unmappedType()));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.sortMode(RandomSortDataGenerator.mode(builder.sortMode()));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.setNestedFilter(RandomSortDataGenerator.nestedFilter(builder.getNestedFilter()));
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
builder.setNestedPath(RandomSortDataGenerator.randomAscii(builder.getNestedPath()));
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected FieldSortBuilder mutate(FieldSortBuilder original) throws IOException {
|
||||
FieldSortBuilder mutated = new FieldSortBuilder(original);
|
||||
int parameter = randomIntBetween(0, 5);
|
||||
switch (parameter) {
|
||||
case 0:
|
||||
mutated.setNestedPath(RandomSortDataGenerator.randomAscii(mutated.getNestedPath()));
|
||||
break;
|
||||
case 1:
|
||||
mutated.setNestedFilter(RandomSortDataGenerator.nestedFilter(mutated.getNestedFilter()));
|
||||
break;
|
||||
case 2:
|
||||
mutated.sortMode(RandomSortDataGenerator.mode(mutated.sortMode()));
|
||||
break;
|
||||
case 3:
|
||||
mutated.unmappedType(RandomSortDataGenerator.randomAscii(mutated.unmappedType()));
|
||||
break;
|
||||
case 4:
|
||||
mutated.missing(RandomSortDataGenerator.missing(mutated.missing()));
|
||||
break;
|
||||
case 5:
|
||||
mutated.order(RandomSortDataGenerator.order(mutated.order()));
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unsupported mutation.");
|
||||
}
|
||||
return mutated;
|
||||
}
|
||||
}
|
@ -4,6 +4,17 @@
|
||||
This section discusses the changes that you need to be aware of when migrating
|
||||
your application to Elasticsearch 5.0.
|
||||
|
||||
[float]
|
||||
=== Indices created before 5.0
|
||||
|
||||
Elasticsearch 5.0 can read indices created in version 2.0 and above. If any
|
||||
of your indices were created before 2.0 you will need to upgrade to the
|
||||
latest 2.x version of Elasticsearch first, in order to upgrade your indices or
|
||||
to delete the old indices. Elasticsearch will not start in the presence of old
|
||||
indices. To upgrade 2.x indices, first start a node which have access to all
|
||||
the data folders and let it upgrade all the indices before starting up rest of
|
||||
the cluster.
|
||||
|
||||
[IMPORTANT]
|
||||
.Reindex indices from Elasticseach 1.x or before
|
||||
=========================================
|
||||
|
@ -9,3 +9,18 @@
|
||||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.modules.0.name: ingest-grok }
|
||||
- match: { nodes.$master.ingest.processors.0.type: append }
|
||||
- match: { nodes.$master.ingest.processors.1.type: convert }
|
||||
- match: { nodes.$master.ingest.processors.2.type: date }
|
||||
- match: { nodes.$master.ingest.processors.3.type: fail }
|
||||
- match: { nodes.$master.ingest.processors.4.type: foreach }
|
||||
- match: { nodes.$master.ingest.processors.5.type: grok }
|
||||
- match: { nodes.$master.ingest.processors.6.type: gsub }
|
||||
- match: { nodes.$master.ingest.processors.7.type: join }
|
||||
- match: { nodes.$master.ingest.processors.8.type: lowercase }
|
||||
- match: { nodes.$master.ingest.processors.9.type: remove }
|
||||
- match: { nodes.$master.ingest.processors.10.type: rename }
|
||||
- match: { nodes.$master.ingest.processors.11.type: set }
|
||||
- match: { nodes.$master.ingest.processors.12.type: split }
|
||||
- match: { nodes.$master.ingest.processors.13.type: trim }
|
||||
- match: { nodes.$master.ingest.processors.14.type: uppercase }
|
||||
|
@ -23,7 +23,7 @@ esplugin {
|
||||
}
|
||||
|
||||
versions << [
|
||||
'azure': '0.9.0',
|
||||
'azure': '0.9.3',
|
||||
'jersey': '1.13'
|
||||
]
|
||||
|
||||
|
@ -1 +0,0 @@
|
||||
050719f91deceed1be1aaf87e85099a861295fa2
|
@ -0,0 +1 @@
|
||||
7fe32241b738aad0f700f4277fa998230c144ae7
|
@ -1 +0,0 @@
|
||||
887ca8ee5564e8ba2351e6b5db2a1293a8d04674
|
@ -0,0 +1 @@
|
||||
602d3e6f5a9f058c2439e8fdf1270cddc811b440
|
@ -8,4 +8,19 @@
|
||||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: ingest-attachment }
|
||||
- match: { nodes.$master.ingest.processors.11.type: attachment }
|
||||
- match: { nodes.$master.ingest.processors.0.type: append }
|
||||
- match: { nodes.$master.ingest.processors.1.type: attachment }
|
||||
- match: { nodes.$master.ingest.processors.2.type: convert }
|
||||
- match: { nodes.$master.ingest.processors.3.type: date }
|
||||
- match: { nodes.$master.ingest.processors.4.type: fail }
|
||||
- match: { nodes.$master.ingest.processors.5.type: foreach }
|
||||
- match: { nodes.$master.ingest.processors.6.type: gsub }
|
||||
- match: { nodes.$master.ingest.processors.7.type: join }
|
||||
- match: { nodes.$master.ingest.processors.8.type: lowercase }
|
||||
- match: { nodes.$master.ingest.processors.9.type: remove }
|
||||
- match: { nodes.$master.ingest.processors.10.type: rename }
|
||||
- match: { nodes.$master.ingest.processors.11.type: set }
|
||||
- match: { nodes.$master.ingest.processors.12.type: split }
|
||||
- match: { nodes.$master.ingest.processors.13.type: trim }
|
||||
- match: { nodes.$master.ingest.processors.14.type: uppercase }
|
||||
|
||||
|
@ -8,4 +8,18 @@
|
||||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: ingest-geoip }
|
||||
- match: { nodes.$master.ingest.processors.3.type: geoip }
|
||||
- match: { nodes.$master.ingest.processors.0.type: append }
|
||||
- match: { nodes.$master.ingest.processors.1.type: convert }
|
||||
- match: { nodes.$master.ingest.processors.2.type: date }
|
||||
- match: { nodes.$master.ingest.processors.3.type: fail }
|
||||
- match: { nodes.$master.ingest.processors.4.type: foreach }
|
||||
- match: { nodes.$master.ingest.processors.5.type: geoip }
|
||||
- match: { nodes.$master.ingest.processors.6.type: gsub }
|
||||
- match: { nodes.$master.ingest.processors.7.type: join }
|
||||
- match: { nodes.$master.ingest.processors.8.type: lowercase }
|
||||
- match: { nodes.$master.ingest.processors.9.type: remove }
|
||||
- match: { nodes.$master.ingest.processors.10.type: rename }
|
||||
- match: { nodes.$master.ingest.processors.11.type: set }
|
||||
- match: { nodes.$master.ingest.processors.12.type: split }
|
||||
- match: { nodes.$master.ingest.processors.13.type: trim }
|
||||
- match: { nodes.$master.ingest.processors.14.type: uppercase }
|
||||
|
@ -52,6 +52,7 @@ public class Murmur3FieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
|
||||
public void testUpgradeOldMapping() throws IOException, ExecutionException, InterruptedException {
|
||||
final String indexName = "index-mapper-murmur3-2.0.0";
|
||||
final String indexUUID = "1VzJds59TTK7lRu17W0mcg";
|
||||
InternalTestCluster.Async<String> master = internalCluster().startNodeAsync();
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
@ -72,6 +73,7 @@ public class Murmur3FieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
assertFalse(Files.exists(dataPath));
|
||||
Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices");
|
||||
Files.move(src, dataPath);
|
||||
Files.move(dataPath.resolve(indexName), dataPath.resolve(indexUUID));
|
||||
|
||||
master.get();
|
||||
// force reloading dangling indices with a cluster state republish
|
||||
|
@ -53,6 +53,7 @@ public class SizeFieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
|
||||
public void testUpgradeOldMapping() throws IOException, ExecutionException, InterruptedException {
|
||||
final String indexName = "index-mapper-size-2.0.0";
|
||||
final String indexUUID = "ENCw7sG0SWuTPcH60bHheg";
|
||||
InternalTestCluster.Async<String> master = internalCluster().startNodeAsync();
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
@ -73,6 +74,7 @@ public class SizeFieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
assertFalse(Files.exists(dataPath));
|
||||
Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices");
|
||||
Files.move(src, dataPath);
|
||||
Files.move(dataPath.resolve(indexName), dataPath.resolve(indexUUID));
|
||||
master.get();
|
||||
// force reloading dangling indices with a cluster state republish
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
|
23
qa/backwards-5.0/build.gradle
Normal file
23
qa/backwards-5.0/build.gradle
Normal file
@ -0,0 +1,23 @@
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
/* This project runs the core REST tests against a 2 node cluster where one of the nodes has a different minor.
|
||||
* Since we don't have a version to test against we currently use the hardcoded snapshot for to bascially run
|
||||
* against ourself. To test that useing a different version works got into distribution/zip and execute:
|
||||
* gradle clean install -Dbuild.snapshot=false
|
||||
*
|
||||
* This installs the release-build into a local .m2 repository, then change this version here to:
|
||||
* bwcVersion = "5.0.0"
|
||||
*
|
||||
* now you can run the bwc tests with:
|
||||
* gradle check -Drepos.mavenlocal=true
|
||||
*
|
||||
* (-Drepos.mavenlocal=true will force gradle to look for the zip distribuiton in the local .m2 repository)
|
||||
*/
|
||||
integTest {
|
||||
includePackaged = true
|
||||
cluster {
|
||||
numNodes = 2
|
||||
numBwcNodes = 1
|
||||
bwcVersion = "5.0.0-SNAPSHOT" // this is the same as the current version until we released the first RC
|
||||
}
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.smoketest;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class MultiNodeBackwardsIT extends ESRestTestCase {
|
||||
|
||||
public MultiNodeBackwardsIT(RestTestCandidate testCandidate) {
|
||||
super(testCandidate);
|
||||
}
|
||||
|
||||
@ParametersFactory
|
||||
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||
return createParameters(0, 1);
|
||||
}
|
||||
}
|
||||
|
@ -1,28 +1,3 @@
|
||||
---
|
||||
"Check availability of default processors":
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
- set: {master_node: master}
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.ingest.processors.0.type: date }
|
||||
- match: { nodes.$master.ingest.processors.1.type: uppercase }
|
||||
- match: { nodes.$master.ingest.processors.2.type: set }
|
||||
- match: { nodes.$master.ingest.processors.3.type: lowercase }
|
||||
- match: { nodes.$master.ingest.processors.4.type: gsub }
|
||||
- match: { nodes.$master.ingest.processors.5.type: convert }
|
||||
- match: { nodes.$master.ingest.processors.6.type: remove }
|
||||
- match: { nodes.$master.ingest.processors.7.type: fail }
|
||||
- match: { nodes.$master.ingest.processors.8.type: foreach }
|
||||
- match: { nodes.$master.ingest.processors.9.type: split }
|
||||
- match: { nodes.$master.ingest.processors.10.type: trim }
|
||||
- match: { nodes.$master.ingest.processors.11.type: rename }
|
||||
- match: { nodes.$master.ingest.processors.12.type: join }
|
||||
- match: { nodes.$master.ingest.processors.13.type: append }
|
||||
|
||||
---
|
||||
"Test basic pipeline crud":
|
||||
- do:
|
||||
|
@ -76,11 +76,12 @@ setup:
|
||||
- do:
|
||||
nodes.stats:
|
||||
metric: [ ingest ]
|
||||
- gte: {nodes.$master.ingest.total.count: 1}
|
||||
#we can't assert anything here since we might have more than one node in the cluster
|
||||
- gte: {nodes.$master.ingest.total.count: 0}
|
||||
- gte: {nodes.$master.ingest.total.failed: 0}
|
||||
- gte: {nodes.$master.ingest.total.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.total.current: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline1.count: 1}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline1.count: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline1.failed: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline1.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline1.current: 0}
|
||||
@ -113,11 +114,12 @@ setup:
|
||||
- do:
|
||||
nodes.stats:
|
||||
metric: [ ingest ]
|
||||
- gte: {nodes.$master.ingest.total.count: 1}
|
||||
#we can't assert anything here since we might have more than one node in the cluster
|
||||
- gte: {nodes.$master.ingest.total.count: 0}
|
||||
- gte: {nodes.$master.ingest.total.failed: 0}
|
||||
- gte: {nodes.$master.ingest.total.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.total.current: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline2.count: 1}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline2.count: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline2.failed: 0}
|
||||
- gte: {nodes.$master.ingest.pipelines.pipeline2.time_in_millis: 0}
|
||||
- match: {nodes.$master.ingest.pipelines.pipeline2.current: 0}
|
||||
|
@ -39,6 +39,7 @@ List projects = [
|
||||
'plugins:repository-s3',
|
||||
'plugins:jvm-example',
|
||||
'plugins:store-smb',
|
||||
'qa:backwards-5.0',
|
||||
'qa:evil-tests',
|
||||
'qa:smoke-test-client',
|
||||
'qa:smoke-test-multinode',
|
||||
|
Loading…
x
Reference in New Issue
Block a user