Merge branch 'master' into tell_me_your_plugins
This commit is contained in:
commit
297f0f22d7
|
@ -75,16 +75,18 @@ class JNANatives {
|
|||
}
|
||||
|
||||
// mlockall failed for some reason
|
||||
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg + ". This can result in part of the JVM being swapped out.");
|
||||
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
|
||||
logger.warn("This can result in part of the JVM being swapped out.");
|
||||
if (errno == JNACLibrary.ENOMEM) {
|
||||
if (rlimitSuccess) {
|
||||
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
|
||||
if (Constants.LINUX) {
|
||||
// give specific instructions for the linux case to make it easy
|
||||
String user = System.getProperty("user.name");
|
||||
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
|
||||
"\t# allow user 'esuser' mlockall\n" +
|
||||
"\tesuser soft memlock unlimited\n" +
|
||||
"\tesuser hard memlock unlimited"
|
||||
"\t# allow user '" + user + "' mlockall\n" +
|
||||
"\t" + user + " soft memlock unlimited\n" +
|
||||
"\t" + user + " hard memlock unlimited"
|
||||
);
|
||||
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
|
||||
}
|
||||
|
|
|
@ -54,10 +54,7 @@ class JavaVersion implements Comparable<JavaVersion> {
|
|||
}
|
||||
|
||||
public static boolean isValid(String value) {
|
||||
if (!value.matches("^0*[0-9]+(\\.[0-9]+)*$")) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
return value.matches("^0*[0-9]+(\\.[0-9]+)*$");
|
||||
}
|
||||
|
||||
private final static JavaVersion CURRENT = parse(System.getProperty("java.specification.version"));
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -143,7 +144,11 @@ public class TransportClient extends AbstractClient {
|
|||
try {
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
modules.add(new Version.Module(version));
|
||||
modules.add(new PluginsModule(this.settings, pluginsService));
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.nodeModules()) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new PluginsModule(pluginsService));
|
||||
modules.add(new EnvironmentModule(environment));
|
||||
modules.add(new SettingsModule(this.settings));
|
||||
modules.add(new NetworkModule());
|
||||
|
@ -160,6 +165,8 @@ public class TransportClient extends AbstractClient {
|
|||
modules.add(new ClientTransportModule());
|
||||
modules.add(new CircuitBreakerModule(this.settings));
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
||||
Injector injector = modules.createInjector();
|
||||
injector.getInstance(TransportService.class).start();
|
||||
TransportClient transportClient = new TransportClient(injector);
|
||||
|
|
|
@ -1,65 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class Modules {
|
||||
|
||||
public static Module createModule(Class<? extends Module> moduleClass, @Nullable Settings settings) {
|
||||
Constructor<? extends Module> constructor;
|
||||
try {
|
||||
constructor = moduleClass.getConstructor(Settings.class);
|
||||
try {
|
||||
return constructor.newInstance(settings);
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
|
||||
}
|
||||
} catch (NoSuchMethodException e) {
|
||||
try {
|
||||
constructor = moduleClass.getConstructor();
|
||||
try {
|
||||
return constructor.newInstance();
|
||||
} catch (Exception e1) {
|
||||
throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
|
||||
}
|
||||
} catch (NoSuchMethodException e1) {
|
||||
throw new ElasticsearchException("No constructor for [" + moduleClass + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void processModules(Iterable<Module> modules) {
|
||||
for (Module module : modules) {
|
||||
if (module instanceof PreProcessModule) {
|
||||
for (Module module1 : modules) {
|
||||
((PreProcessModule) module).processModule(module1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -31,20 +31,9 @@ public class ModulesBuilder implements Iterable<Module> {
|
|||
|
||||
private final List<Module> modules = Lists.newArrayList();
|
||||
|
||||
public ModulesBuilder add(Module... modules) {
|
||||
for (Module module : modules) {
|
||||
add(module);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public ModulesBuilder add(Module module) {
|
||||
public ModulesBuilder add(Module... newModules) {
|
||||
for (Module module : newModules) {
|
||||
modules.add(module);
|
||||
if (module instanceof SpawnModules) {
|
||||
Iterable<? extends Module> spawned = ((SpawnModules) module).spawnModules();
|
||||
for (Module spawn : spawned) {
|
||||
add(spawn);
|
||||
}
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -55,7 +44,6 @@ public class ModulesBuilder implements Iterable<Module> {
|
|||
}
|
||||
|
||||
public Injector createInjector() {
|
||||
Modules.processModules(modules);
|
||||
Injector injector = Guice.createInjector(modules);
|
||||
Injectors.cleanCaches(injector);
|
||||
// in ES, we always create all instances as if they are eager singletons
|
||||
|
@ -65,7 +53,6 @@ public class ModulesBuilder implements Iterable<Module> {
|
|||
}
|
||||
|
||||
public Injector createChildInjector(Injector injector) {
|
||||
Modules.processModules(modules);
|
||||
Injector childInjector = injector.createChildInjector(modules);
|
||||
Injectors.cleanCaches(childInjector);
|
||||
// in ES, we always create all instances as if they are eager singletons
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
/**
|
||||
* This interface can be added to a Module to spawn sub modules. DO NOT USE.
|
||||
*
|
||||
* This is fundamentally broken.
|
||||
* <ul>
|
||||
* <li>If you have a plugin with multiple modules, return all the modules at once.</li>
|
||||
* <li>If you are trying to make the implementation of a module "pluggable", don't do it.
|
||||
* This is not extendable because custom implementations (using onModule) cannot be
|
||||
* registered before spawnModules() is called.</li>
|
||||
* </ul>
|
||||
*/
|
||||
public interface SpawnModules {
|
||||
|
||||
Iterable<? extends Module> spawnModules();
|
||||
}
|
|
@ -64,6 +64,7 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
|
|||
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
|
||||
|
||||
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
|
||||
public static final String DISCOVERY_ZEN_PING_UNICAST_HOSTS = "discovery.zen.ping.unicast.hosts";
|
||||
|
||||
// these limits are per-address
|
||||
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
|
||||
|
@ -116,7 +117,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
|||
}
|
||||
|
||||
this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10);
|
||||
String[] hostArr = this.settings.getAsArray("discovery.zen.ping.unicast.hosts");
|
||||
String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS);
|
||||
// trim the hosts
|
||||
for (int i = 0; i < hostArr.length; i++) {
|
||||
hostArr[i] = hostArr[i].trim();
|
||||
|
|
|
@ -43,12 +43,10 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.nio.file.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.regex.Matcher;
|
||||
|
@ -253,10 +251,9 @@ public abstract class MetaDataStateFormat<T> {
|
|||
if (dataLocations != null) { // select all eligable files first
|
||||
for (Path dataLocation : dataLocations) {
|
||||
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
|
||||
if (!Files.isDirectory(stateDir)) {
|
||||
continue;
|
||||
}
|
||||
// now, iterate over the current versions, and find latest one
|
||||
// we don't check if the stateDir is present since it could be deleted
|
||||
// after the check. Also if there is a _state file and it's not a dir something is really wrong
|
||||
try (DirectoryStream<Path> paths = Files.newDirectoryStream(stateDir)) { // we don't pass a glob since we need the group part for parsing
|
||||
for (Path stateFile : paths) {
|
||||
final Matcher matcher = stateFilePattern.matcher(stateFile.getFileName().toString());
|
||||
|
@ -270,6 +267,8 @@ public abstract class MetaDataStateFormat<T> {
|
|||
files.add(pav);
|
||||
}
|
||||
}
|
||||
} catch (NoSuchFileException | FileNotFoundException ex) {
|
||||
// no _state directory -- move on
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.http;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.io.ByteStreams;
|
||||
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -30,6 +31,7 @@ import org.elasticsearch.node.service.NodeService;
|
|||
import org.elasticsearch.rest.*;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.*;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.HashMap;
|
||||
|
@ -114,10 +116,14 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
|
|||
}
|
||||
|
||||
public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) {
|
||||
if (request.rawPath().startsWith("/_plugin/")) {
|
||||
String rawPath = request.rawPath();
|
||||
if (rawPath.startsWith("/_plugin/")) {
|
||||
RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
|
||||
filterChain.continueProcessing(request, channel);
|
||||
return;
|
||||
} else if (rawPath.equals("/favicon.ico")) {
|
||||
handleFavicon(request, channel);
|
||||
return;
|
||||
}
|
||||
restController.dispatchRequest(request, channel);
|
||||
}
|
||||
|
@ -131,6 +137,22 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
|
|||
}
|
||||
}
|
||||
|
||||
void handleFavicon(HttpRequest request, HttpChannel channel) {
|
||||
if (request.method() == RestRequest.Method.GET) {
|
||||
try {
|
||||
try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) {
|
||||
byte[] content = ByteStreams.toByteArray(stream);
|
||||
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", content);
|
||||
channel.sendResponse(restResponse);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR));
|
||||
}
|
||||
} else {
|
||||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
}
|
||||
}
|
||||
|
||||
void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException {
|
||||
if (disableSites) {
|
||||
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
|
||||
|
|
|
@ -56,7 +56,6 @@ import org.elasticsearch.indices.IndicesService;
|
|||
import org.elasticsearch.indices.InternalIndicesLifecycle;
|
||||
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.plugins.ShardsPluginsModule;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -317,7 +316,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
|
|||
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
|
||||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
modules.add(new ShardsPluginsModule(indexSettings, pluginsService));
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.shardModules(indexSettings)) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new IndexShardModule(shardId, primary, indexSettings));
|
||||
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
|
||||
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
|
||||
|
@ -327,6 +329,9 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
|
|||
}
|
||||
}), path));
|
||||
modules.add(new DeletionPolicyModule());
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
||||
try {
|
||||
shardInjector = modules.createChildInjector(injector);
|
||||
} catch (CreationException e) {
|
||||
|
|
|
@ -78,8 +78,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
|
|||
|
||||
private String minimumShouldMatch;
|
||||
|
||||
private String rewrite = null;
|
||||
|
||||
private String fuzzyRewrite = null;
|
||||
|
||||
private Boolean lenient;
|
||||
|
@ -179,11 +177,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
|
|||
return this;
|
||||
}
|
||||
|
||||
public MatchQueryBuilder rewrite(String rewrite) {
|
||||
this.rewrite = rewrite;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
|
||||
this.fuzzyRewrite = fuzzyRewrite;
|
||||
return this;
|
||||
|
@ -249,9 +242,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
|
|||
if (minimumShouldMatch != null) {
|
||||
builder.field("minimum_should_match", minimumShouldMatch);
|
||||
}
|
||||
if (rewrite != null) {
|
||||
builder.field("rewrite", rewrite);
|
||||
}
|
||||
if (fuzzyRewrite != null) {
|
||||
builder.field("fuzzy_rewrite", fuzzyRewrite);
|
||||
}
|
||||
|
|
|
@ -61,8 +61,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
|
|||
|
||||
private String minimumShouldMatch;
|
||||
|
||||
private String rewrite = null;
|
||||
|
||||
private String fuzzyRewrite = null;
|
||||
|
||||
private Boolean useDisMax;
|
||||
|
@ -255,11 +253,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
|
|||
return this;
|
||||
}
|
||||
|
||||
public MultiMatchQueryBuilder rewrite(String rewrite) {
|
||||
this.rewrite = rewrite;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MultiMatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
|
||||
this.fuzzyRewrite = fuzzyRewrite;
|
||||
return this;
|
||||
|
@ -367,9 +360,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
|
|||
if (minimumShouldMatch != null) {
|
||||
builder.field("minimum_should_match", minimumShouldMatch);
|
||||
}
|
||||
if (rewrite != null) {
|
||||
builder.field("rewrite", rewrite);
|
||||
}
|
||||
if (fuzzyRewrite != null) {
|
||||
builder.field("fuzzy_rewrite", fuzzyRewrite);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,12 @@
|
|||
package org.elasticsearch.indices;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.*;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Iterators;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -35,7 +40,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.*;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.Injectors;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -43,7 +53,12 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.gateway.MetaDataStateFormat;
|
||||
import org.elasticsearch.index.*;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
import org.elasticsearch.index.IndexNameModule;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.LocalNodeIdModule;
|
||||
import org.elasticsearch.index.aliases.IndexAliasesServiceModule;
|
||||
import org.elasticsearch.index.analysis.AnalysisModule;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
|
@ -71,13 +86,16 @@ import org.elasticsearch.index.store.IndexStore;
|
|||
import org.elasticsearch.index.store.IndexStoreModule;
|
||||
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.plugins.IndexPluginsModule;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
@ -306,7 +324,10 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
modules.add(new IndexNameModule(index));
|
||||
modules.add(new LocalNodeIdModule(localNodeId));
|
||||
modules.add(new IndexSettingsModule(index, indexSettings));
|
||||
modules.add(new IndexPluginsModule(indexSettings, pluginsService));
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.indexModules(indexSettings)) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new IndexStoreModule(indexSettings));
|
||||
modules.add(new AnalysisModule(indexSettings, indicesAnalysisService));
|
||||
modules.add(new SimilarityModule(indexSettings));
|
||||
|
@ -316,6 +337,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
modules.add(new IndexAliasesServiceModule());
|
||||
modules.add(new IndexModule(indexSettings));
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
||||
Injector indexInjector;
|
||||
try {
|
||||
indexInjector = modules.createChildInjector(injector);
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Injector;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.ModulesBuilder;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
|
@ -165,7 +166,11 @@ public class Node implements Releasable {
|
|||
ModulesBuilder modules = new ModulesBuilder();
|
||||
modules.add(new Version.Module(version));
|
||||
modules.add(new CircuitBreakerModule(settings));
|
||||
modules.add(new PluginsModule(settings, pluginsService));
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.nodeModules()) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new PluginsModule(pluginsService));
|
||||
modules.add(new SettingsModule(settings));
|
||||
modules.add(new NodeModule(this));
|
||||
modules.add(new NetworkModule());
|
||||
|
@ -193,6 +198,9 @@ public class Node implements Releasable {
|
|||
modules.add(new RepositoriesModule());
|
||||
modules.add(new TribeModule());
|
||||
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
|
||||
injector = modules.createInjector();
|
||||
|
||||
client = injector.getInstance(Client.class);
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.PreProcessModule;
|
||||
import org.elasticsearch.common.inject.SpawnModules;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class IndexPluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
private final PluginsService pluginsService;
|
||||
|
||||
public IndexPluginsModule(Settings settings, PluginsService pluginsService) {
|
||||
this.settings = settings;
|
||||
this.pluginsService = pluginsService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<? extends Module> spawnModules() {
|
||||
return pluginsService.indexModules(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processModule(Module module) {
|
||||
pluginsService.processModule(module);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
}
|
||||
}
|
|
@ -20,35 +20,15 @@
|
|||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.PreProcessModule;
|
||||
import org.elasticsearch.common.inject.SpawnModules;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class PluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
|
||||
|
||||
private final Settings settings;
|
||||
public class PluginsModule extends AbstractModule {
|
||||
|
||||
private final PluginsService pluginsService;
|
||||
|
||||
public PluginsModule(Settings settings, PluginsService pluginsService) {
|
||||
this.settings = settings;
|
||||
public PluginsModule(PluginsService pluginsService) {
|
||||
this.pluginsService = pluginsService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<? extends Module> spawnModules() {
|
||||
return pluginsService.nodeModules();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processModule(Module module) {
|
||||
pluginsService.processModule(module);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(PluginsService.class).toInstance(pluginsService);
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.PreProcessModule;
|
||||
import org.elasticsearch.common.inject.SpawnModules;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ShardsPluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
|
||||
|
||||
private final Settings settings;
|
||||
|
||||
private final PluginsService pluginsService;
|
||||
|
||||
public ShardsPluginsModule(Settings settings, PluginsService pluginsService) {
|
||||
this.settings = settings;
|
||||
this.pluginsService = pluginsService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterable<? extends Module> spawnModules() {
|
||||
return pluginsService.shardModules(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processModule(Module module) {
|
||||
pluginsService.processModule(module);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
}
|
||||
}
|
|
@ -20,14 +20,8 @@
|
|||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.Modules;
|
||||
import org.elasticsearch.common.inject.SpawnModules;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
/**
|
||||
* Binds repository classes for the specific repository type.
|
||||
*/
|
||||
|
|
|
@ -727,7 +727,7 @@ public class ThreadPool extends AbstractComponent {
|
|||
if (queueSize == null) {
|
||||
builder.field(Fields.QUEUE_SIZE, -1);
|
||||
} else {
|
||||
builder.field(Fields.QUEUE_SIZE, queueSize.toString());
|
||||
builder.field(Fields.QUEUE_SIZE, queueSize.singles());
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
|
|
Binary file not shown.
After Width: | Height: | Size: 1.1 KiB |
|
@ -1006,38 +1006,6 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
return list.get(0);
|
||||
}
|
||||
|
||||
private void ensureStableCluster(int nodeCount) {
|
||||
ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30));
|
||||
}
|
||||
|
||||
private void ensureStableCluster(int nodeCount, TimeValue timeValue) {
|
||||
ensureStableCluster(nodeCount, timeValue, false, null);
|
||||
}
|
||||
|
||||
private void ensureStableCluster(int nodeCount, @Nullable String viaNode) {
|
||||
ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30), false, viaNode);
|
||||
}
|
||||
|
||||
private void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean local, @Nullable String viaNode) {
|
||||
if (viaNode == null) {
|
||||
viaNode = randomFrom(internalCluster().getNodeNames());
|
||||
}
|
||||
logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue);
|
||||
ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth()
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForNodes(Integer.toString(nodeCount))
|
||||
.setTimeout(timeValue)
|
||||
.setLocal(local)
|
||||
.setWaitForRelocatingShards(0)
|
||||
.get();
|
||||
if (clusterHealthResponse.isTimedOut()) {
|
||||
ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get();
|
||||
fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n"
|
||||
+ stateResponse.getState().prettyPrint());
|
||||
}
|
||||
assertThat(clusterHealthResponse.isTimedOut(), is(false));
|
||||
}
|
||||
|
||||
private ClusterState getNodeClusterState(String node) {
|
||||
return client(node).admin().cluster().prepareState().setLocal(true).get().getState();
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.discovery;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
@ -81,12 +82,15 @@ public class ZenUnicastDiscoveryIT extends ESIntegTestCase {
|
|||
int currentNumNodes = randomIntBetween(3, 5);
|
||||
final int min_master_nodes = currentNumNodes / 2 + 1;
|
||||
int currentNumOfUnicastHosts = randomIntBetween(min_master_nodes, currentNumNodes);
|
||||
final Settings settings = Settings.settingsBuilder().put("discovery.zen.minimum_master_nodes", min_master_nodes).build();
|
||||
final Settings settings = Settings.settingsBuilder()
|
||||
.put("discovery.zen.join_timeout", TimeValue.timeValueSeconds(10))
|
||||
.put("discovery.zen.minimum_master_nodes", min_master_nodes)
|
||||
.build();
|
||||
discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, currentNumOfUnicastHosts, settings);
|
||||
|
||||
List<String> nodes = internalCluster().startNodesAsync(currentNumNodes).get();
|
||||
|
||||
ensureGreen();
|
||||
ensureStableCluster(currentNumNodes);
|
||||
|
||||
DiscoveryNode masterDiscoNode = null;
|
||||
for (String node : nodes) {
|
||||
|
|
|
@ -30,6 +30,7 @@ import com.google.common.collect.Lists;
|
|||
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
|
@ -1105,6 +1106,38 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
return ensureGreen(indices);
|
||||
}
|
||||
|
||||
protected void ensureStableCluster(int nodeCount) {
|
||||
ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30));
|
||||
}
|
||||
|
||||
protected void ensureStableCluster(int nodeCount, TimeValue timeValue) {
|
||||
ensureStableCluster(nodeCount, timeValue, false, null);
|
||||
}
|
||||
|
||||
protected void ensureStableCluster(int nodeCount, @Nullable String viaNode) {
|
||||
ensureStableCluster(nodeCount, TimeValue.timeValueSeconds(30), false, viaNode);
|
||||
}
|
||||
|
||||
protected void ensureStableCluster(int nodeCount, TimeValue timeValue, boolean local, @Nullable String viaNode) {
|
||||
if (viaNode == null) {
|
||||
viaNode = randomFrom(internalCluster().getNodeNames());
|
||||
}
|
||||
logger.debug("ensuring cluster is stable with [{}] nodes. access node: [{}]. timeout: [{}]", nodeCount, viaNode, timeValue);
|
||||
ClusterHealthResponse clusterHealthResponse = client(viaNode).admin().cluster().prepareHealth()
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForNodes(Integer.toString(nodeCount))
|
||||
.setTimeout(timeValue)
|
||||
.setLocal(local)
|
||||
.setWaitForRelocatingShards(0)
|
||||
.get();
|
||||
if (clusterHealthResponse.isTimedOut()) {
|
||||
ClusterStateResponse stateResponse = client(viaNode).admin().cluster().prepareState().get();
|
||||
fail("failed to reach a stable cluster of [" + nodeCount + "] nodes. Tried via [" + viaNode + "]. last cluster state:\n"
|
||||
+ stateResponse.getState().prettyPrint());
|
||||
}
|
||||
assertThat(clusterHealthResponse.isTimedOut(), is(false));
|
||||
}
|
||||
|
||||
/**
|
||||
* Syntactic sugar for:
|
||||
* <pre>
|
||||
|
|
|
@ -226,11 +226,6 @@ public final class InternalTestCluster extends TestCluster {
|
|||
private ServiceDisruptionScheme activeDisruptionScheme;
|
||||
private String nodeMode;
|
||||
|
||||
public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir, int minNumDataNodes, int maxNumDataNodes, String clusterName, int numClientNodes,
|
||||
boolean enableHttpPipelining, String nodePrefix) {
|
||||
this(nodeMode, clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, DEFAULT_SETTINGS_SOURCE, numClientNodes, enableHttpPipelining, nodePrefix);
|
||||
}
|
||||
|
||||
public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir,
|
||||
int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes,
|
||||
boolean enableHttpPipelining, String nodePrefix) {
|
||||
|
|
|
@ -99,4 +99,23 @@ public class ThreadPoolSerializationTests extends ESTestCase {
|
|||
assertThat(threadPool.info("index").getQueueSize(), is(nullValue()));
|
||||
terminate(threadPool);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testThatToXContentWritesInteger() throws Exception {
|
||||
ThreadPool.Info info = new ThreadPool.Info("foo", "search", 1, 10, TimeValue.timeValueMillis(3000), SizeValue.parseSizeValue("1k"));
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
builder.startObject();
|
||||
info.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
builder.endObject();
|
||||
|
||||
BytesReference bytesReference = builder.bytes();
|
||||
Map<String, Object> map;
|
||||
try (XContentParser parser = XContentFactory.xContent(bytesReference).createParser(bytesReference)) {
|
||||
map = parser.map();
|
||||
}
|
||||
assertThat(map, hasKey("foo"));
|
||||
map = (Map<String, Object>) map.get("foo");
|
||||
assertThat(map, hasKey("queue_size"));
|
||||
assertThat(map.get("queue_size").toString(), is("1000"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.client.FilterClient;
|
|||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.inject.PreProcessModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
|
@ -405,22 +404,18 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase {
|
|||
public Collection<Module> nodeModules() {
|
||||
return Collections.<Module>singletonList(new ActionLoggingModule());
|
||||
}
|
||||
|
||||
public void onModule(ActionModule module) {
|
||||
module.registerFilter(LoggingFilter.class);
|
||||
}
|
||||
}
|
||||
|
||||
public static class ActionLoggingModule extends AbstractModule implements PreProcessModule {
|
||||
|
||||
|
||||
public static class ActionLoggingModule extends AbstractModule {
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(LoggingFilter.class).asEagerSingleton();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void processModule(Module module) {
|
||||
if (module instanceof ActionModule) {
|
||||
((ActionModule)module).registerFilter(LoggingFilter.class);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class LoggingFilter extends ActionFilter.Simple {
|
||||
|
|
|
@ -23,6 +23,8 @@ import com.google.common.collect.ImmutableMap;
|
|||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.Requests;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -32,11 +34,14 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.discovery.MasterNotDiscoveredException;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.NodeBuilder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
import org.elasticsearch.test.SettingsSource;
|
||||
import org.elasticsearch.test.TestCluster;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
|
@ -44,6 +49,8 @@ import org.junit.BeforeClass;
|
|||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
|
@ -69,8 +76,21 @@ public class TribeIT extends ESIntegTestCase {
|
|||
@BeforeClass
|
||||
public static void setupSecondCluster() throws Exception {
|
||||
ESIntegTestCase.beforeClass();
|
||||
SettingsSource source = new SettingsSource() {
|
||||
@Override
|
||||
public Settings node(int nodeOrdinal) {
|
||||
final int base = InternalTestCluster.BASE_PORT + 1000;
|
||||
return Settings.builder().put("transport.tcp.port", base + "-" + (base + 100)).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Settings transportClient() {
|
||||
return node(0);
|
||||
}
|
||||
};
|
||||
// create another cluster
|
||||
cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, Strings.randomBase64UUID(getRandom()), 0, false, SECOND_CLUSTER_NODE_PREFIX);
|
||||
cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, Strings.randomBase64UUID(getRandom()), source, 0, false, SECOND_CLUSTER_NODE_PREFIX);
|
||||
|
||||
cluster2.beforeTest(getRandom(), 0.1);
|
||||
cluster2.ensureAtLeastNumDataNodes(2);
|
||||
}
|
||||
|
@ -109,6 +129,10 @@ public class TribeIT extends ESIntegTestCase {
|
|||
tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue());
|
||||
tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue());
|
||||
}
|
||||
// give each tribe it's unicast hosts to connect to
|
||||
tribe1Defaults.putArray("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS, getUnicastHosts(internalCluster().client()));
|
||||
tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS, getUnicastHosts(cluster2.client()));
|
||||
|
||||
Settings merged = Settings.builder()
|
||||
.put("tribe.t1.cluster.name", internalCluster().getClusterName())
|
||||
.put("tribe.t2.cluster.name", cluster2.getClusterName())
|
||||
|
@ -421,4 +445,14 @@ public class TribeIT extends ESIntegTestCase {
|
|||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
public String[] getUnicastHosts(Client client) {
|
||||
ArrayList<String> unicastHosts = new ArrayList<>();
|
||||
NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setTransport(true).get();
|
||||
for (NodeInfo info : nodeInfos.getNodes()) {
|
||||
TransportAddress address = info.getTransport().getAddress().publishAddress();
|
||||
unicastHosts.add(address.getAddress() + ":" + address.getPort());
|
||||
}
|
||||
return unicastHosts.toArray(new String[unicastHosts.size()]);
|
||||
}
|
||||
}
|
|
@ -86,7 +86,7 @@ enabled=1
|
|||
|
||||
def run(command, env_vars=None):
|
||||
if env_vars:
|
||||
for key, value in env_vars.iter_items():
|
||||
for key, value in env_vars.items():
|
||||
os.putenv(key, value)
|
||||
if os.system('%s' % (command)):
|
||||
raise RuntimeError(' FAILED: %s' % (command))
|
||||
|
@ -223,10 +223,10 @@ if __name__ == "__main__":
|
|||
shutil.rmtree(localRepoElasticsearch)
|
||||
|
||||
if install_only:
|
||||
mvn_targets = 'install'
|
||||
mvn_target = 'install'
|
||||
else:
|
||||
mvn_targets = 'install deploy'
|
||||
install_command = 'mvn clean %s -Prelease -Dskip.integ.tests=true -Dgpg.keyname="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_targets, gpg_key, localRepo)
|
||||
mvn_target = 'deploy'
|
||||
install_command = 'mvn clean %s -Prelease -Dskip.integ.tests=true -Dgpg.keyname="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, gpg_key, localRepo)
|
||||
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
|
||||
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
|
||||
s3_sync_command = 's3cmd sync %s s3://download.elasticsearch.org/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, release_version, shortHash)
|
||||
|
|
|
@ -152,8 +152,8 @@ be "two hop" operations).
|
|||
// on startup
|
||||
|
||||
Client client = TransportClient.builder().build()
|
||||
.addTransportAddress(new InetSocketTransportAddress("host1", 9300))
|
||||
.addTransportAddress(new InetSocketTransportAddress("host2", 9300));
|
||||
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host1"), 9300))
|
||||
.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName("host2"), 9300));
|
||||
|
||||
// on shutdown
|
||||
|
||||
|
|
|
@ -52,13 +52,13 @@ The following snippet calculates the average of the total monthly `sales`:
|
|||
},
|
||||
"avg_monthly_sales": {
|
||||
"avg_bucket": {
|
||||
"buckets_paths": "sales_per_month>sales" <1>
|
||||
"buckets_path": "sales_per_month>sales" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
<1> `bucket_paths` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the
|
||||
<1> `buckets_path` instructs this avg_bucket aggregation that we want the (mean) average value of the `sales` aggregation in the
|
||||
`sales_per_month` date histogram.
|
||||
|
||||
And the following may be the response:
|
||||
|
|
|
@ -54,7 +54,7 @@ The following snippet calculates the derivative of the total monthly `sales`:
|
|||
},
|
||||
"sales_deriv": {
|
||||
"derivative": {
|
||||
"buckets_paths": "sales" <1>
|
||||
"buckets_path": "sales" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ The following snippet calculates the derivative of the total monthly `sales`:
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
<1> `bucket_paths` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative
|
||||
<1> `buckets_path` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative
|
||||
|
||||
And the following may be the response:
|
||||
|
||||
|
@ -137,12 +137,12 @@ monthly sales:
|
|||
},
|
||||
"sales_deriv": {
|
||||
"derivative": {
|
||||
"buckets_paths": "sales"
|
||||
"buckets_path": "sales"
|
||||
}
|
||||
},
|
||||
"sales_2nd_deriv": {
|
||||
"derivative": {
|
||||
"buckets_paths": "sales_deriv" <1>
|
||||
"buckets_path": "sales_deriv" <1>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ monthly sales:
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
<1> `bucket_paths` for the second derivative points to the name of the first derivative
|
||||
<1> `buckets_path` for the second derivative points to the name of the first derivative
|
||||
|
||||
And the following may be the response:
|
||||
|
||||
|
@ -225,7 +225,7 @@ of the total sales per month but ask for the derivative of the sales as in the u
|
|||
},
|
||||
"sales_deriv": {
|
||||
"derivative": {
|
||||
"buckets_paths": "sales",
|
||||
"buckets_path": "sales",
|
||||
"unit": "day" <1>
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[[indices-templates]]
|
||||
== Index Templates
|
||||
|
||||
Index templates allow to define templates that will automatically be
|
||||
Index templates allow you to define templates that will automatically be
|
||||
applied to new indices created. The templates include both settings and
|
||||
mappings, and a simple pattern template that controls if the template
|
||||
will be applied to the index created. For example:
|
||||
|
|
|
@ -55,3 +55,7 @@ headers by default. Verbosity can be turned off with the `v` parameter:
|
|||
GET _cat/shards?v=0
|
||||
-----------------
|
||||
|
||||
==== Nodes Stats API
|
||||
|
||||
Queue lengths are now reported as basic numeric so they can easily processed by code. Before we used a human
|
||||
readable format. For example, a queue with 1,000 items is now reported as `1000` instead of `1k`.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
Allows to add one or more sort on specific fields. Each sort can be
|
||||
reversed as well. The sort is defined on a per field level, with special
|
||||
field name for `_score` to sort by score.
|
||||
field name for `_score` to sort by score, and `_doc` to sort by index order.
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
|
@ -21,6 +21,10 @@ field name for `_score` to sort by score.
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
NOTE: `_doc` has no real use-case besides being the most efficient sort order.
|
||||
So if you don't care about the order in which documents are returned, then you
|
||||
should sort by `_doc`. This especially helps when <<search-request-scroll,scrolling>>.
|
||||
|
||||
==== Sort Values
|
||||
|
||||
The sort values for each document returned are also returned as part of
|
||||
|
|
|
@ -165,7 +165,7 @@ curl -X POST 'localhost:9200/music/_suggest?pretty' -d '{
|
|||
"song-suggest" : [ {
|
||||
"text" : "n",
|
||||
"offset" : 0,
|
||||
"length" : 4,
|
||||
"length" : 1,
|
||||
"options" : [ {
|
||||
"text" : "Nirvana - Nevermind",
|
||||
"score" : 34.0, "payload" : {"artistId":2321}
|
||||
|
|
8
pom.xml
8
pom.xml
|
@ -875,10 +875,16 @@
|
|||
<version>2.4.1</version>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!-- We just declare which plugin version to use. Each project can have then its own settings -->
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-resources-plugin</artifactId>
|
||||
<version>2.7</version>
|
||||
<!-- add some additonal binary types to prevent maven from
|
||||
screwing them up with resource filtering -->
|
||||
<configuration>
|
||||
<nonFilteredFileExtensions>
|
||||
<nonFilteredFileExtension>ico</nonFilteredFileExtension>
|
||||
</nonFilteredFileExtensions>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
|
|
|
@ -31,37 +31,31 @@
|
|||
# Load test utilities
|
||||
load packaging_test_utils
|
||||
|
||||
# Cleans everything for the 1st execution
|
||||
setup() {
|
||||
if [ "$BATS_TEST_NUMBER" -eq 1 ]; then
|
||||
clean_before_test
|
||||
fi
|
||||
skip_not_tar_gz
|
||||
}
|
||||
|
||||
##################################
|
||||
# Install TAR GZ package
|
||||
##################################
|
||||
@test "[TAR] tar command is available" {
|
||||
skip_not_tar_gz
|
||||
# Cleans everything for the 1st execution
|
||||
clean_before_test
|
||||
run tar --version
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "[TAR] archive is available" {
|
||||
skip_not_tar_gz
|
||||
count=$(find . -type f -name 'elasticsearch*.tar.gz' | wc -l)
|
||||
[ "$count" -eq 1 ]
|
||||
}
|
||||
|
||||
@test "[TAR] archive is not installed" {
|
||||
skip_not_tar_gz
|
||||
count=$(find /tmp -type d -name 'elasticsearch*' | wc -l)
|
||||
[ "$count" -eq 0 ]
|
||||
}
|
||||
|
||||
@test "[TAR] install archive" {
|
||||
skip_not_tar_gz
|
||||
|
||||
# Install the archive
|
||||
install_archive
|
||||
|
||||
|
@ -73,8 +67,6 @@ setup() {
|
|||
# Check that the archive is correctly installed
|
||||
##################################
|
||||
@test "[TAR] verify archive installation" {
|
||||
skip_not_tar_gz
|
||||
|
||||
verify_archive_installation "/tmp/elasticsearch"
|
||||
}
|
||||
|
||||
|
@ -82,14 +74,11 @@ setup() {
|
|||
# Check that Elasticsearch is working
|
||||
##################################
|
||||
@test "[TAR] test elasticsearch" {
|
||||
skip_not_tar_gz
|
||||
|
||||
start_elasticsearch_service
|
||||
|
||||
run_elasticsearch_tests
|
||||
|
||||
stop_elasticsearch_service
|
||||
|
||||
run rm -rf "/tmp/elasticsearch"
|
||||
[ "$status" -eq 0 ]
|
||||
rm -rf "/tmp/elasticsearch"
|
||||
}
|
||||
|
|
|
@ -242,34 +242,27 @@ install_archive() {
|
|||
eshome="$1"
|
||||
fi
|
||||
|
||||
run tar -xzvf elasticsearch*.tar.gz -C "$eshome" >&2
|
||||
[ "$status" -eq 0 ]
|
||||
tar -xzvf elasticsearch*.tar.gz -C "$eshome"
|
||||
|
||||
run find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \;
|
||||
[ "$status" -eq 0 ]
|
||||
find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \;
|
||||
|
||||
# ES cannot run as root so create elasticsearch user & group if needed
|
||||
if ! getent group "elasticsearch" > /dev/null 2>&1 ; then
|
||||
if is_dpkg; then
|
||||
run addgroup --system "elasticsearch"
|
||||
[ "$status" -eq 0 ]
|
||||
addgroup --system "elasticsearch"
|
||||
else
|
||||
run groupadd -r "elasticsearch"
|
||||
[ "$status" -eq 0 ]
|
||||
groupadd -r "elasticsearch"
|
||||
fi
|
||||
fi
|
||||
if ! id "elasticsearch" > /dev/null 2>&1 ; then
|
||||
if is_dpkg; then
|
||||
run adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch"
|
||||
[ "$status" -eq 0 ]
|
||||
adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch"
|
||||
else
|
||||
run useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch"
|
||||
[ "$status" -eq 0 ]
|
||||
useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch"
|
||||
fi
|
||||
fi
|
||||
|
||||
run chown -R elasticsearch:elasticsearch "$eshome/elasticsearch"
|
||||
[ "$status" -eq 0 ]
|
||||
chown -R elasticsearch:elasticsearch "$eshome/elasticsearch"
|
||||
}
|
||||
|
||||
|
||||
|
@ -354,11 +347,12 @@ clean_before_test() {
|
|||
}
|
||||
|
||||
start_elasticsearch_service() {
|
||||
|
||||
if [ -f "/tmp/elasticsearch/bin/elasticsearch" ]; then
|
||||
run /bin/su -s /bin/sh -c '/tmp/elasticsearch/bin/elasticsearch -d -p /tmp/elasticsearch/elasticsearch.pid' elasticsearch
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
# su and the Elasticsearch init script work together to break bats.
|
||||
# sudo isolates bats enough from the init script so everything continues
|
||||
# to tick along
|
||||
sudo -u elasticsearch /tmp/elasticsearch/bin/elasticsearch -d \
|
||||
-p /tmp/elasticsearch/elasticsearch.pid
|
||||
elif is_systemd; then
|
||||
run systemctl daemon-reload
|
||||
[ "$status" -eq 0 ]
|
||||
|
@ -383,9 +377,8 @@ start_elasticsearch_service() {
|
|||
pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
|
||||
[ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
|
||||
|
||||
run ps $pid
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
echo "Looking for elasticsearch pid...."
|
||||
ps $pid
|
||||
elif is_systemd; then
|
||||
run systemctl is-active elasticsearch.service
|
||||
[ "$status" -eq 0 ]
|
||||
|
@ -400,14 +393,11 @@ start_elasticsearch_service() {
|
|||
}
|
||||
|
||||
stop_elasticsearch_service() {
|
||||
|
||||
if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
|
||||
pid=$(cat /tmp/elasticsearch/elasticsearch.pid)
|
||||
[ "x$pid" != "x" ] && [ "$pid" -gt 0 ]
|
||||
|
||||
run kill -SIGTERM $pid
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
kill -SIGTERM $pid
|
||||
elif is_systemd; then
|
||||
run systemctl stop elasticsearch.service
|
||||
[ "$status" -eq 0 ]
|
||||
|
@ -428,36 +418,63 @@ stop_elasticsearch_service() {
|
|||
|
||||
# Waits for Elasticsearch to reach a given status (defaults to "green")
|
||||
wait_for_elasticsearch_status() {
|
||||
local status="green"
|
||||
local desired_status="green"
|
||||
if [ "x$1" != "x" ]; then
|
||||
status="$1"
|
||||
fi
|
||||
|
||||
# Try to connect to elasticsearch and wait for expected status
|
||||
wget --quiet --retry-connrefused --waitretry=1 --timeout=60 \
|
||||
--output-document=/dev/null "http://localhost:9200/_cluster/health?wait_for_status=$status&timeout=60s" || true
|
||||
|
||||
# Checks the cluster health
|
||||
curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "error when checking cluster health" >&2
|
||||
exit 1
|
||||
echo "Making sure elasticsearch is up..."
|
||||
wget -O - --retry-connrefused --waitretry=1 --timeout=60 http://localhost:9200 || {
|
||||
echo "Looks like elasticsearch never started. Here is its log:"
|
||||
if [ -r "/tmp/elasticsearch/elasticsearch.pid" ]; then
|
||||
cat /tmp/elasticsearch/log/elasticsearch.log
|
||||
else
|
||||
if [ -e '/var/log/elasticsearch/elasticsearch.log' ]; then
|
||||
cat /var/log/elasticsearch/elasticsearch.log
|
||||
else
|
||||
echo "The elasticsearch log doesn't exist. Maybe /vag/log/messages has something:"
|
||||
tail -n20 /var/log/messages
|
||||
fi
|
||||
fi
|
||||
false
|
||||
}
|
||||
|
||||
echo "Tring to connect to elasticsearch and wait for expected status..."
|
||||
curl -sS "http://localhost:9200/_cluster/health?wait_for_status=$desired_status&timeout=60s&pretty"
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Connected"
|
||||
else
|
||||
echo "Unable to connect to Elastisearch"
|
||||
false
|
||||
fi
|
||||
|
||||
echo "Checking that the cluster health matches the waited for status..."
|
||||
run curl -sS -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
|
||||
if [ "$status" -ne 0 ]; then
|
||||
echo "error when checking cluster health. code=$status output="
|
||||
echo $output
|
||||
false
|
||||
fi
|
||||
echo $output | grep $desired_status || {
|
||||
echo "unexpected status: '$output' wanted '$desired_status'"
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
# Executes some very basic Elasticsearch tests
|
||||
run_elasticsearch_tests() {
|
||||
# TODO this assertion is the same the one made when waiting for
|
||||
# elasticsearch to start
|
||||
run curl -XGET 'http://localhost:9200/_cat/health?h=status&v=false'
|
||||
[ "$status" -eq 0 ]
|
||||
echo "$output" | grep -w "green"
|
||||
|
||||
run curl -XPOST 'http://localhost:9200/library/book/1?refresh=true' -d '{"title": "Elasticsearch - The Definitive Guide"}' 2>&1
|
||||
[ "$status" -eq 0 ]
|
||||
curl -s -XPOST 'http://localhost:9200/library/book/1?refresh=true&pretty' -d '{
|
||||
"title": "Elasticsearch - The Definitive Guide"
|
||||
}'
|
||||
|
||||
run curl -XGET 'http://localhost:9200/_cat/count?h=count&v=false'
|
||||
[ "$status" -eq 0 ]
|
||||
echo "$output" | grep -w "1"
|
||||
curl -s -XGET 'http://localhost:9200/_cat/count?h=count&v=false&pretty' |
|
||||
grep -w "1"
|
||||
|
||||
run curl -XDELETE 'http://localhost:9200/_all'
|
||||
[ "$status" -eq 0 ]
|
||||
curl -s -XDELETE 'http://localhost:9200/_all'
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue