Merge branch 'master' into feature/query-refactoring

This commit is contained in:
Christoph Büscher 2015-09-15 11:47:42 +02:00
commit dbaa90a3f8
18 changed files with 382 additions and 163 deletions

103
Vagrantfile vendored
View File

@ -34,25 +34,23 @@ Vagrant.configure(2) do |config|
config.vm.box = "ubuntu/vivid64"
ubuntu_common config
end
config.vm.define "wheezy" do |config|
config.vm.box = "debian/wheezy64"
deb_common(config)
end
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
# get the sun jdk on there just aren't worth it. We have jessie for testing
# debian and it works fine.
config.vm.define "jessie" do |config|
config.vm.box = "debian/jessie64"
deb_common(config)
deb_common config,
'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
end
config.vm.define "centos-6" do |config|
# TODO switch from chef to boxcutter to provide?
config.vm.box = "chef/centos-6.6"
rpm_common(config)
config.vm.box = "boxcutter/centos67"
rpm_common config
end
config.vm.define "centos-7" do |config|
# There is a centos/7 box but it doesn't have rsync or virtualbox guest
# stuff on there so its slow to use. So chef it is....
# TODO switch from chef to boxcutter to provide?
config.vm.box = "chef/centos-7.0"
rpm_common(config)
config.vm.box = "boxcutter/centos71"
rpm_common config
end
# This box hangs _forever_ on ```yum check-update```. I have no idea why.
# config.vm.define "oel-6", autostart: false do |config|
@ -61,14 +59,14 @@ Vagrant.configure(2) do |config|
# end
config.vm.define "oel-7" do |config|
config.vm.box = "boxcutter/oel70"
rpm_common(config)
rpm_common config
end
config.vm.define "fedora-22" do |config|
# Fedora hosts their own 'cloud' images that aren't in Vagrant's Atlas but
# and are missing required stuff like rsync. It'd be nice if we could use
# them but they much slower to get up and running then the boxcutter image.
config.vm.box = "boxcutter/fedora22"
dnf_common(config)
dnf_common config
end
# Switch the default share for the project root from /vagrant to
# /elasticsearch because /vagrant is confusing when there is a project inside
@ -105,28 +103,46 @@ SOURCE_PROMPT
end
def ubuntu_common(config)
# Ubuntu is noisy
deb_common config, 'apt-add-repository -y ppa:openjdk-r/ppa > /dev/null 2>&1', 'openjdk-r-*'
end
def deb_common(config, add_openjdk_repository_command, openjdk_list)
# http://foo-o-rama.com/vagrant--stdin-is-not-a-tty--fix.html
config.vm.provision "fix-no-tty", type: "shell" do |s|
s.privileged = false
s.inline = "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile"
end
deb_common(config)
end
def deb_common(config)
provision(config, "apt-get update", "/var/cache/apt/archives/last_update",
"apt-get install -y", "openjdk-7-jdk")
provision(config,
update_command: "apt-get update",
update_tracking_file: "/var/cache/apt/archives/last_update",
install_command: "apt-get install -y",
java_package: "openjdk-8-jdk",
extra: <<-SHELL
export DEBIAN_FRONTEND=noninteractive
ls /etc/apt/sources.list.d/#{openjdk_list}.list > /dev/null 2>&1 ||
(echo "Importing java-8 ppa" &&
#{add_openjdk_repository_command} &&
apt-get update -o \
Dir::Etc::sourcelist="$(ls /etc/apt/sources.list.d/#{openjdk_list}.list)" \
-o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0")
SHELL
)
end
def rpm_common(config)
provision(config, "yum check-update", "/var/cache/yum/last_update",
"yum install -y", "java-1.7.0-openjdk-devel")
provision(config,
update_command: "yum check-update",
update_tracking_file: "/var/cache/yum/last_update",
install_command: "yum install -y",
java_package: "java-1.8.0-openjdk-devel")
end
def dnf_common(config)
provision(config, "dnf check-update", "/var/cache/dnf/last_update",
"dnf install -y", "java-1.8.0-openjdk-devel")
provision(config,
update_command: "dnf check-update",
update_tracking_file: "/var/cache/dnf/last_update",
install_command: "dnf install -y",
java_package: "java-1.8.0-openjdk-devel")
if Vagrant.has_plugin?("vagrant-cachier")
# Autodetect doesn't work....
config.cache.auto_detect = false
@ -134,24 +150,49 @@ def dnf_common(config)
end
end
def provision(config, update_command, update_tracking_file, install_command, java_package)
# Register the main box provisioning script.
# @param config Vagrant's config object. Required.
# @param update_command [String] The command used to update the package
# manager. Required. Think `apt-get update`.
# @param update_tracking_file [String] The location of the file tracking the
# last time the update command was run. Required. Should be in a place that
# is cached by vagrant-cachier.
# @param install_command [String] The command used to install a package.
# Required. Think `apt-get install #{package}`.
# @param java_package [String] The name of the java package. Required.
# @param extra [String] Extra provisioning commands run before anything else.
# Optional. Used for things like setting up the ppa for Java 8.
def provision(config,
update_command: 'required',
update_tracking_file: 'required',
install_command: 'required',
java_package: 'required',
extra: '')
# Vagrant run ruby 2.0.0 which doesn't have required named parameters....
raise ArgumentError.new('update_command is required') if update_command == 'required'
raise ArgumentError.new('update_tracking_file is required') if update_tracking_file == 'required'
raise ArgumentError.new('install_command is required') if install_command == 'required'
raise ArgumentError.new('java_package is required') if java_package == 'required'
config.vm.provision "bats dependencies", type: "shell", inline: <<-SHELL
set -e
set -o pipefail
installed() {
command -v $1 2>&1 >/dev/null
}
install() {
# Only apt-get update if we haven't in the last day
if [ ! -f /tmp/update ] || [ "x$(find /tmp/update -mtime +0)" == "x/tmp/update" ]; then
sudo #{update_command} || true
if [ ! -f #{update_tracking_file} ] || [ "x$(find #{update_tracking_file} -mtime +0)" == "x#{update_tracking_file}" ]; then
#{update_command} || true
touch #{update_tracking_file}
fi
sudo #{install_command} $1
#{install_command} $1
}
ensure() {
installed $1 || install $1
}
#{extra}
installed java || install #{java_package}
ensure curl
ensure unzip
@ -161,8 +202,8 @@ def provision(config, update_command, update_tracking_file, install_command, jav
ensure git
git clone https://github.com/sstephenson/bats /tmp/bats
# Centos doesn't add /usr/local/bin to the path....
sudo /tmp/bats/install.sh /usr
sudo rm -rf /tmp/bats
/tmp/bats/install.sh /usr
rm -rf /tmp/bats
}
cat \<\<VARS > /etc/profile.d/elasticsearch_vars.sh
export ZIP=/elasticsearch/distribution/zip/target/releases

View File

@ -251,7 +251,9 @@ public class Version {
public static final int V_1_7_1_ID = 1070199;
public static final Version V_1_7_1 = new Version(V_1_7_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_2_ID = 1070299;
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final Version V_1_7_2 = new Version(V_1_7_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_1_7_3_ID = 1070399;
public static final Version V_1_7_3 = new Version(V_1_7_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4);
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
@ -281,6 +283,8 @@ public class Version {
return V_2_0_0;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
case V_1_7_3_ID:
return V_1_7_3;
case V_1_7_2_ID:
return V_1_7_2;
case V_1_7_1_ID:

View File

@ -16,12 +16,7 @@
package org.elasticsearch.common.inject.internal;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.elasticsearch.common.SuppressForbidden;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ConcurrentHashMap;
/**
* Lazily creates (and caches) values for keys. If creating the value fails (with errors), an
@ -29,29 +24,21 @@ import java.util.concurrent.ExecutionException;
*
* @author jessewilson@google.com (Jesse Wilson)
*/
// TODO remove this suppression once we get rid of the CacheBuilder and friends
@SuppressForbidden(reason = "this uses Function in it's method declaration somewhere")
public abstract class FailableCache<K, V> {
private final LoadingCache<K, Object> delegate = CacheBuilder.newBuilder().build(new CacheLoader<K, Object>() {
@Override
public Object load(K key) throws Exception {
Errors errors = new Errors();
V result = null;
try {
result = FailableCache.this.create(key, errors);
} catch (ErrorsException e) {
errors.merge(e.getErrors());
}
return errors.hasErrors() ? errors : result;
}
});
private final ConcurrentHashMap<K, Object> cache = new ConcurrentHashMap<>();
protected abstract V create(K key, Errors errors) throws ErrorsException;
public V get(K key, Errors errors) throws ErrorsException {
try {
Object resultOrError = delegate.get(key);
Object resultOrError = cache.get(key);
if (resultOrError == null) {
synchronized (this) {
resultOrError = load(key);
// we can't use cache.computeIfAbsent since this might be recursively call this API
cache.putIfAbsent(key, resultOrError);
}
}
if (resultOrError instanceof Errors) {
errors.merge((Errors) resultOrError);
throw errors.toException();
@ -60,8 +47,17 @@ public abstract class FailableCache<K, V> {
V result = (V) resultOrError;
return result;
}
} catch (ExecutionException e) {
throw new RuntimeException(e);
}
private Object load(K key) {
Errors errors = new Errors();
V result = null;
try {
result = create(key, errors);
} catch (ErrorsException e) {
errors.merge(e.getErrors());
}
return errors.hasErrors() ? errors : result;
}
}

View File

@ -18,16 +18,13 @@
*/
package org.elasticsearch.indices.analysis;
import com.google.common.util.concurrent.UncheckedExecutionException;
import org.apache.lucene.analysis.hunspell.Dictionary;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.collect.CopyOnWriteHashMap;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.KeyedLock;
import org.elasticsearch.env.Environment;
import java.io.IOException;
@ -36,6 +33,8 @@ import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Function;
/**
* Serves as a node level registry for hunspell dictionaries. This services expects all dictionaries to be located under
@ -71,24 +70,29 @@ public class HunspellService extends AbstractComponent {
public final static String HUNSPELL_LAZY_LOAD = "indices.analysis.hunspell.dictionary.lazy";
public final static String HUNSPELL_IGNORE_CASE = "indices.analysis.hunspell.dictionary.ignore_case";
private final static String OLD_HUNSPELL_LOCATION = "indices.analysis.hunspell.dictionary.location";
private final Environment env;
private volatile CopyOnWriteHashMap<String, Dictionary> dictionaries = new CopyOnWriteHashMap<>();
private final ConcurrentHashMap<String, Dictionary> dictionaries = new ConcurrentHashMap<>();
private final Map<String, Dictionary> knownDictionaries;
private KeyedLock<String> keyedLock = new KeyedLock<>();
private final boolean defaultIgnoreCase;
private final Path hunspellDir;
private final Function<String, Dictionary> loadingFunction;
@Inject
public HunspellService(final Settings settings, final Environment env, final Map<String, Dictionary> knownDictionaries) throws IOException {
super(settings);
this.knownDictionaries = knownDictionaries;
this.knownDictionaries = Collections.unmodifiableMap(knownDictionaries);
this.hunspellDir = resolveHunspellDirectory(settings, env);
this.defaultIgnoreCase = settings.getAsBoolean(HUNSPELL_IGNORE_CASE, false);
this.env = env;
this.loadingFunction = (locale) -> {
try {
return loadDictionary(locale, settings, env);
} catch (Throwable e) {
throw new IllegalStateException("failed to load hunspell dictionary for locale: " + locale, e);
}
};
if (!settings.getAsBoolean(HUNSPELL_LAZY_LOAD, false)) {
scanAndLoadDictionaries();
}
}
/**
@ -97,22 +101,9 @@ public class HunspellService extends AbstractComponent {
* @param locale The name of the locale
*/
public Dictionary getDictionary(String locale) {
Dictionary dictionary = dictionaries.get(locale);
Dictionary dictionary = knownDictionaries.get(locale);
if (dictionary == null) {
dictionary = knownDictionaries.get(locale);
if (dictionary == null) {
keyedLock.acquire(locale);
dictionary = dictionaries.get(locale);
if (dictionary == null) {
try {
dictionary = loadDictionary(locale, settings, env);
} catch (Exception e) {
throw new IllegalStateException("failed to load hunspell dictionary for local: " + locale, e);
}
dictionaries = dictionaries.copyAndPut(locale, dictionary);
}
keyedLock.release(locale);
}
dictionary = dictionaries.computeIfAbsent(locale, loadingFunction);
}
return dictionary;
}
@ -137,10 +128,10 @@ public class HunspellService extends AbstractComponent {
if (inner.iterator().hasNext()) { // just making sure it's indeed a dictionary dir
try {
getDictionary(file.getFileName().toString());
} catch (UncheckedExecutionException e) {
} catch (Throwable e) {
// The cache loader throws unchecked exception (see #loadDictionary()),
// here we simply report the exception and continue loading the dictionaries
logger.error("exception while loading dictionary {}", file.getFileName(), e);
logger.error("exception while loading dictionary {}", e, file.getFileName());
}
}
}
@ -198,22 +189,8 @@ public class HunspellService extends AbstractComponent {
logger.error("Could not load hunspell dictionary [{}]", e, locale);
throw e;
} finally {
if (affixStream != null) {
try {
affixStream.close();
} catch (IOException e) {
// nothing much we can do here
}
}
for (InputStream in : dicStreams) {
if (in != null) {
try {
in.close();
} catch (IOException e) {
// nothing much we can do here
}
}
}
IOUtils.close(affixStream);
IOUtils.close(dicStreams);
}
}

View File

@ -268,17 +268,19 @@ public class PluginInfo implements Streamable, ToXContent {
@Override
public String toString() {
final StringBuffer sb = new StringBuffer("PluginInfo{");
sb.append("name='").append(name).append('\'');
sb.append(", description='").append(description).append('\'');
sb.append(", site=").append(site);
sb.append(", jvm=").append(jvm);
final StringBuilder information = new StringBuilder()
.append("- Plugin information:\n")
.append("Name: ").append(name).append("\n")
.append("Description: ").append(description).append("\n")
.append("Site: ").append(site).append("\n")
.append("Version: ").append(version).append("\n")
.append("JVM: ").append(jvm).append("\n");
if (jvm) {
sb.append(", classname=").append(classname);
sb.append(", isolated=").append(isolated);
information.append(" * Classname: ").append(classname).append("\n");
information.append(" * Isolated: ").append(isolated);
}
sb.append(", version='").append(version).append('\'');
sb.append('}');
return sb.toString();
return information.toString();
}
}

View File

@ -222,7 +222,7 @@ public class PluginManager {
// read and validate the plugin descriptor
PluginInfo info = PluginInfo.readFromProperties(root);
terminal.println("%s", info);
terminal.println(VERBOSE, "%s", info);
// check for jar hell before any copying
if (info.isJvm()) {

View File

@ -206,7 +206,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
for (int i = 0; i < numberOfNodes; i++) {
final DiscoveryNode node = newNode(i);
discoBuilder = discoBuilder.put(node);
int numberOfShards = randomIntBetween(0, 10);
int numberOfShards = randomIntBetween(1, 10);
for (int j = 0; j < numberOfShards; j++) {
final ShardId shardId = new ShardId(index, ++shardIndex);
ShardRouting shard = TestShardRouting.newShardRouting(index, shardId.getId(), node.id(), true, ShardRoutingState.STARTED, 1);

View File

@ -161,7 +161,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
}
for (DiskUsage usage : mostUsages.values()) {
logger.info("--> usage: {}", usage);
assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThan(0L));
assertThat("usage has be retrieved", usage.getFreeBytes(), greaterThanOrEqualTo(0L));
}
for (Long size : shardSizes.values()) {
logger.info("--> shard size: {}", size);

View File

@ -27,13 +27,7 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider;
@ -41,6 +35,7 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
@ -54,8 +49,7 @@ import org.elasticsearch.test.disruption.BlockClusterStateProcessing;
import org.elasticsearch.test.disruption.SingleNodeDisruption;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.*;
import org.junit.Test;
import java.io.IOException;
@ -65,7 +59,6 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import static java.lang.Thread.sleep;
@ -171,6 +164,68 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase {
}
@Test
/* Test that shard is deleted in case ShardActiveRequest after relocation and next incoming cluster state is an index delete. */
public void shardCleanupIfShardDeletionAfterRelocationFailedAndIndexDeleted() throws Exception {
final String node_1 = internalCluster().startNode();
logger.info("--> creating index [test] with one shard and on replica");
assertAcked(prepareCreate("test").setSettings(
Settings.builder().put(indexSettings())
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
);
ensureGreen("test");
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
final String node_2 = internalCluster().startDataOnlyNode(Settings.builder().build());
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut());
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(true));
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(true));
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false));
// add a transport delegate that will prevent the shard active request to succeed the first time after relocation has finished.
// node_1 will then wait for the next cluster state change before it tries a next attempt to delet the shard.
MockTransportService transportServiceNode_1 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_1);
String node_2_id = internalCluster().getInstance(DiscoveryService.class, node_2).localNode().id();
DiscoveryNode node_2_disco = internalCluster().clusterService().state().getNodes().dataNodes().get(node_2_id);
final CountDownLatch shardActiveRequestSent = new CountDownLatch(1);
transportServiceNode_1.addDelegate(node_2_disco, new MockTransportService.DelegateTransport(transportServiceNode_1.original()) {
@Override
public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {
if (action.equals("internal:index/shard/exists") && shardActiveRequestSent.getCount() > 0) {
shardActiveRequestSent.countDown();
logger.info("prevent shard active request from being sent");
throw new ConnectTransportException(node, "DISCONNECT: simulated");
}
super.sendRequest(node, requestId, action, request, options);
}
});
logger.info("--> move shard from {} to {}, and wait for relocation to finish", node_1, node_2);
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2)).get();
shardActiveRequestSent.await();
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
.setWaitForRelocatingShards(0)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
logClusterState();
// delete the index. node_1 that still waits for the next cluster state update will then get the delete index next.
// it must still delete the shard, even if it cannot find it anymore in indicesservice
client().admin().indices().prepareDelete("test").get();
assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false));
assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false));
assertThat(Files.exists(shardDirectory(node_1, "test", 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_1, "test")), equalTo(false));
assertThat(waitForShardDeletion(node_2, "test", 0), equalTo(false));
assertThat(waitForIndexDeletion(node_2, "test"), equalTo(false));
assertThat(Files.exists(shardDirectory(node_2, "test", 0)), equalTo(false));
assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(false));
}
@Test
public void shardsCleanup() throws Exception {
final String node_1 = internalCluster().startNode();

View File

@ -171,7 +171,7 @@ public class PluginManagerIT extends ESIntegTestCase {
}
return zip.toUri().toURL().toString();
}
@Test
public void testThatPluginNameMustBeSupplied() throws IOException {
Path pluginDir = createTempDir().resolve("fake-plugin");
String pluginUrl = createPlugin(pluginDir,
@ -185,7 +185,6 @@ public class PluginManagerIT extends ESIntegTestCase {
assertStatus("install", USAGE);
}
@Test
public void testLocalPluginInstallWithBinAndConfig() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
@ -232,7 +231,6 @@ public class PluginManagerIT extends ESIntegTestCase {
/**
* Test for #7890
*/
@Test
public void testLocalPluginInstallWithBinAndConfigInAlreadyExistingConfigDir_7890() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
@ -335,7 +333,6 @@ public class PluginManagerIT extends ESIntegTestCase {
}
// For #7152
@Test
public void testLocalPluginInstallWithBinOnly_7152() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
@ -359,20 +356,39 @@ public class PluginManagerIT extends ESIntegTestCase {
assertDirectoryExists(pluginBinDir);
}
@Test
public void testListInstalledEmpty() throws IOException {
assertStatusOk("list");
assertThat(terminal.getTerminalOutput(), hasItem(containsString("No plugin detected")));
}
@Test
public void testListInstalledEmptyWithExistingPluginDirectory() throws IOException {
Files.createDirectory(environment.pluginsFile());
assertStatusOk("list");
assertThat(terminal.getTerminalOutput(), hasItem(containsString("No plugin detected")));
}
@Test
public void testInstallPluginVerbose() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
System.err.println("install " + pluginUrl + " --verbose");
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl + " --verbose"));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Name: fake-plugin")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Description: fake desc")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Site: false")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Version: 1.0")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("JVM: true")));
assertThatPluginIsListed(pluginName);
}
public void testInstallPlugin() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
@ -384,11 +400,38 @@ public class PluginManagerIT extends ESIntegTestCase {
"java.version", System.getProperty("java.specification.version"),
"jvm", "true",
"classname", "FakePlugin");
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl));
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Name: fake-plugin"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Description:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Site:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Version:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("JVM:"))));
assertThatPluginIsListed(pluginName);
}
@Test
public void testInstallSitePluginVerbose() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
Files.createDirectories(pluginDir.resolve("_site"));
Files.createFile(pluginDir.resolve("_site").resolve("somefile"));
String pluginUrl = createPlugin(pluginDir,
"description", "fake desc",
"name", pluginName,
"version", "1.0",
"site", "true");
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl + " --verbose"));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Name: fake-plugin")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Description: fake desc")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Site: true")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Version: 1.0")));
assertThat(terminal.getTerminalOutput(), hasItem(containsString("JVM: false")));
assertThatPluginIsListed(pluginName);
// We want to check that Plugin Manager moves content to _site
assertFileExists(environment.pluginsFile().resolve(pluginName).resolve("_site"));
}
public void testInstallSitePlugin() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
@ -399,13 +442,18 @@ public class PluginManagerIT extends ESIntegTestCase {
"name", pluginName,
"version", "1.0",
"site", "true");
assertStatusOk(String.format(Locale.ROOT, "install %s --verbose", pluginUrl));
ExitStatus status = new PluginManagerCliParser(terminal).execute(args("install " + pluginUrl));
assertThat("Terminal output was: " + terminal.getTerminalOutput(), status, is(ExitStatus.OK));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Name: fake-plugin"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Description:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Site:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("Version:"))));
assertThat(terminal.getTerminalOutput(), not(hasItem(containsString("JVM:"))));
assertThatPluginIsListed(pluginName);
// We want to check that Plugin Manager moves content to _site
assertFileExists(environment.pluginsFile().resolve(pluginName).resolve("_site"));
}
@Test
public void testInstallPluginWithBadChecksum() throws IOException {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
@ -446,7 +494,6 @@ public class PluginManagerIT extends ESIntegTestCase {
* We test regular form: username/reponame/version
* It should find it in download.elasticsearch.org service
*/
@Test
@Network
@AwaitsFix(bugUrl = "fails with jar hell failures - http://build-us-00.elastic.co/job/es_core_master_oracle_6/519/testReport/")
public void testInstallPluginWithElasticsearchDownloadService() throws IOException {
@ -460,7 +507,6 @@ public class PluginManagerIT extends ESIntegTestCase {
* We test regular form: groupId/artifactId/version
* It should find it in maven central service
*/
@Test
@Network
@AwaitsFix(bugUrl = "fails with jar hell failures - http://build-us-00.elastic.co/job/es_core_master_oracle_6/519/testReport/")
public void testInstallPluginWithMavenCentral() throws IOException {
@ -475,7 +521,6 @@ public class PluginManagerIT extends ESIntegTestCase {
* We test site plugins from github: userName/repoName
* It should find it on github
*/
@Test
@Network @AwaitsFix(bugUrl = "needs to be adapted to 2.0")
public void testInstallPluginWithGithub() throws IOException {
assumeTrue("github.com is accessible", isDownloadServiceWorking("github.com", 443, "/"));
@ -497,7 +542,6 @@ public class PluginManagerIT extends ESIntegTestCase {
return false;
}
@Test
public void testRemovePlugin() throws Exception {
String pluginName = "plugintest";
Path pluginDir = createTempDir().resolve(pluginName);
@ -520,18 +564,15 @@ public class PluginManagerIT extends ESIntegTestCase {
singlePluginInstallAndRemove("groupid/plugintest", "plugintest", pluginUrl);
}
@Test
public void testRemovePlugin_NullName_ThrowsException() throws IOException {
assertStatus("remove ", USAGE);
}
@Test
public void testRemovePluginWithURLForm() throws Exception {
assertStatus("remove file://whatever", USAGE);
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Illegal plugin name")));
}
@Test
public void testForbiddenPluginNames() throws IOException {
assertStatus("remove elasticsearch", USAGE);
assertStatus("remove elasticsearch.bat", USAGE);
@ -543,7 +584,6 @@ public class PluginManagerIT extends ESIntegTestCase {
assertStatus("remove ELASTICSEARCH.IN.SH", USAGE);
}
@Test
public void testOfficialPluginName_ThrowsException() throws IOException {
PluginManager.checkForOfficialPlugins("analysis-icu");
PluginManager.checkForOfficialPlugins("analysis-kuromoji");
@ -569,13 +609,11 @@ public class PluginManagerIT extends ESIntegTestCase {
}
}
@Test
public void testThatBasicAuthIsRejectedOnHttp() throws Exception {
assertStatus(String.format(Locale.ROOT, "install http://user:pass@localhost:12345/foo.zip --verbose"), CliTool.ExitStatus.IO_ERROR);
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Basic auth is only supported for HTTPS!")));
}
@Test
public void testThatBasicAuthIsSupportedWithHttps() throws Exception {
assumeTrue("test requires security manager to be disabled", System.getSecurityManager() == null);

View File

@ -24,6 +24,7 @@ import com.carrotsearch.hppc.ObjectObjectHashMap;
import com.carrotsearch.hppc.ObjectObjectMap;
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.XGeoHashUtils;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
@ -148,6 +149,7 @@ public class GeoHashGridIT extends ESIntegTestCase {
@Test
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/13558")
public void simple() throws Exception {
for (int precision = 1; precision <= XGeoHashUtils.PRECISION; precision++) {
SearchResponse response = client().prepareSearch("idx")

View File

@ -20,6 +20,7 @@ package org.elasticsearch.test;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client;
@ -66,6 +67,10 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
private static void startNode() {
assert NODE == null;
NODE = newNode();
// we must wait for the node to actually be up and running. otherwise the node might have started, elected itself master but might not yet have removed the
// SERVICE_UNAVAILABLE/1/state not recovered / initialized block
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get();
assertFalse(clusterHealthResponse.isTimedOut());
}
private static void stopNode() {

Binary file not shown.

Binary file not shown.

View File

@ -154,7 +154,7 @@ def create_client(http_port=DEFAULT_HTTP_TCP_PORT, timeout=30):
for _ in range(0, timeout):
# TODO: ask Honza if there is a better way to do this?
try:
client = Elasticsearch([{'host': '127.0.0.1', 'port': http_port}])
client = Elasticsearch([{'host': 'localhost', 'port': http_port}])
client.cluster.health(wait_for_nodes=1)
client.count() # can we actually search or do we get a 503? -- anyway retry
return client

View File

@ -34,6 +34,8 @@ import tempfile
import re
import os
import shutil
from functools import partial
import sys
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
@ -61,7 +63,7 @@ To install the deb from an APT repo:
APT line sources.list line:
deb http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/debian/ stable main
deb http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/%(major_minor_version)s/debian/ stable main
To install the RPM, create a YUM file like:
@ -71,7 +73,7 @@ containing:
[elasticsearch-2.0]
name=Elasticsearch repository for packages
baseurl=http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/centos
baseurl=http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/%(major_minor_version)s/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
@ -175,6 +177,64 @@ def find_release_version():
return match.group(1)
raise RuntimeError('Could not find release version in branch')
# Checks if the produced RPM is signed with the supplied GPG key
def ensure_rpm_is_signed(rpm, gpg_key):
rpm_check_signature_cmd = 'rpm -v -K %s | grep -qi %s' % (rpm, gpg_key)
try:
subprocess.check_output(rpm_check_signature_cmd, shell=True)
except:
raise RuntimeError('Aborting. RPM does not seem to be signed, check with: rpm -v -K %s' % rpm)
# Checks if a command exists, needed for external binaries
def check_command_exists(name, cmd):
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise RuntimeError('Could not run command %s - please make sure it is installed and in $PATH' % (name))
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
COLOR_FAIL = '\033[91m'
def run_and_print(text, run_function):
try:
print(text, end='')
run_function()
print(COLOR_OK + 'OK' + COLOR_END)
return True
except RuntimeError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_env_var(text, env_var):
try:
print(text, end='')
os.environ[env_var]
print(COLOR_OK + 'OK' + COLOR_END)
return True
except KeyError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_environment_and_commandline_tools(check_only):
checks = list()
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_ACCESS_KEY_ID... ', 'AWS_SECRET_ACCESS_KEY'))
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY_ID... ', 'AWS_ACCESS_KEY_ID'))
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
checks.append(run_and_print('Checking command: deb-s3... ', partial(check_command_exists, 'deb-s3', 'deb-s3 -h')))
checks.append(run_and_print('Checking command: rpm-s3... ', partial(check_command_exists, 'rpm-s3', 'rpm-s3 -h')))
if check_only:
sys.exit(0)
if False in checks:
print("Exiting due to failing checks")
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
@ -190,6 +250,8 @@ if __name__ == "__main__":
help='Allows you to specify a different gpg_key to be used instead of the default release key')
parser.add_argument('--verbose', '-b', dest='verbose',
help='Runs the script in verbose mode')
parser.add_argument('--check-only', dest='check_only', action='store_true',
help='Checks and reports for all requirements and then exits')
parser.set_defaults(deploy=False)
parser.set_defaults(skip_doc_check=False)
parser.set_defaults(push=False)
@ -203,6 +265,8 @@ if __name__ == "__main__":
install_only = args.install_only
VERBOSE = args.verbose
check_environment_and_commandline_tools(args.check_only)
ensure_checkout_is_clean()
release_version = find_release_version()
if not re.match('(\d+\.\d+)\.*',release_version):
@ -227,7 +291,9 @@ if __name__ == "__main__":
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
localRepo = '/tmp/elasticsearch-%s-%s' % (release_version, shortHash)
releaseDirectory = os.getenv('HOME') + '/elastic-releases'
os.mkdir(releaseDirectory)
localRepo = '%s/elasticsearch-%s-%s' % (releaseDirectory, release_version, shortHash)
localRepoElasticsearch = localRepo + '/org/elasticsearch'
if os.path.exists(localRepoElasticsearch):
print('clean local repository %s' % localRepoElasticsearch)
@ -240,8 +306,6 @@ if __name__ == "__main__":
install_command = 'mvn clean %s -Prelease -Dskip.integ.tests=true -Dgpg.key="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, gpg_key, localRepo)
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
s3_sync_command = 's3cmd sync %s s3://download.elasticsearch.org/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, release_version, shortHash)
s3_bucket_sync_to = 'download.elasticsearch.org/elasticsearch/staging/%s-%s/repos' % (release_version, shortHash)
if install_and_deploy:
for cmd in [install_command, clean_repo_command]:
run(cmd)
@ -252,16 +316,44 @@ if __name__ == "__main__":
print(' %s' % (install_command))
print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command))
print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command))
print('Ensuring that RPM has been signed')
rpm = '%s/distribution/rpm/elasticsearch/%s/elasticsearch-%s.rpm' % (localRepoElasticsearch, release_version, release_version)
ensure_rpm_is_signed(rpm, gpg_key)
# repository push commands
s3cmd_sync_to_staging_bucket_cmd = 's3cmd sync -P %s s3://download.elasticsearch.org/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, release_version, shortHash)
s3_bucket_sync_to = 'download.elasticsearch.org/elasticsearch/staging/%s-%s/repos' % (release_version, shortHash)
s3cmd_sync_official_repo_cmd = 's3cmd sync s3://packages.elasticsearch.org/elasticsearch/%s s3://%s' % (major_minor_version, s3_bucket_sync_to)
debs3_prefix = 'elasticsearch/staging/%s-%s/repos/debian' % (release_version, shortHash)
debs3_upload_cmd = 'deb-s3 upload --preserve-versions %s/distribution/deb/elasticsearch/%s/elasticsearch-%s.deb -b download.elasticsearch.org --prefix %s --sign %s --arch amd64' % (localRepoElasticsearch, release_version, release_version, prefix, gpg_key)
debs3_list_cmd = 'deb-s3 list -b download.elasticsearch.org --prefix %s' % (debs3_prefix)
debs3_verify_cmd = 'deb-s3 verify -b download.elasticsearch.org --prefix %s' % (debs3_prefix)
rpms3_prefix = 'elasticsearch/staging/%s-%s/repos/centos' % (release_version, shortHash)
rpms3_upload_cmd = 'rpm-s3 -v -b download.elasticsearch.org -p %s --sign --visibility public-read -k 0 %s' % (rpms3_prefix, rpm)
if push:
run(s3_sync_command)
print('Use rpm-s3/deb-s3 to push into repositories at %s' % s3_bucket_sync_to)
run(s3cmd_sync_to_staging_bucket_cmd)
print('Syncing official package repository into staging s3 bucket')
run(s3cmd_sync_official_repo_cmd)
print('Uploading debian package (you will be prompted for the passphrase!)')
run(debs3_upload_cmd)
run(debs3_list_cmd)
run(debs3_verify_cmd)
print('Uploading rpm package (you will be prompted for the passphrase!)')
run(rpms3_upload_cmd)
else:
print('')
print('*** To push a release candidate to s3 run: ')
print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch))
print (' %s' % (s3_sync_command))
print (' %s' % (s3cmd_sync_to_staging_bucket_cmd))
print(' 2. Create repositories: ')
print(' Use rpm-s3/deb-s3 to push into repositories at %s' % s3_bucket_sync_to)
print(' 1. Sync existing repo into staging: %s' % s3cmd_sync_official_repo_cmd)
print(' 2. Upload debian package (and sign it) %s' % debs3_upload_cmd)
print(' 3. List all debian packages: %s' % debs3_list_cmd)
print(' 4. Verify debian packages: %s' % debs3_verify_cmd)
print(' 5. Upload RPM: %s' % rpms3_upload_cmd)
print('')
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:')
@ -280,8 +372,13 @@ if __name__ == "__main__":
string_format_dict = {'version' : release_version, 'hash': shortHash, 'major_minor_version' : major_minor_version}
print(MAIL_TEMPLATE % string_format_dict)
print('')
print('You can verify that pushing to the staging repository pushed all the artifacts by running (log into sonatype to find out the correct id):')
print(' python3 -B dev-tools/validate-maven-repository.py %s https://oss.sonatype.org/service/local/repositories/orgelasticsearch-IDTOFINDOUT/content/org/elasticsearch ' %(localElasticsearchRepo))
print('')
print('To publish the release and the repo on S3 execute the following commands:')
print(' s3cmd cp --recursive s3://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(major_minor_version)s' % string_format_dict)
print(' s3cmd cp --recursive s3://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/%(major_minor_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(major_minor_version)s' % string_format_dict)
print(' s3cmd cp --recursive s3://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://download.elasticsearch.org/elasticsearch/release/org' % string_format_dict)
print('Now go ahead and tag the release:')
print(' git tag -a v%(version)s %(hash)s' % string_format_dict)

View File

@ -110,6 +110,8 @@ com.google.common.collect.ImmutableSortedMap
com.google.common.base.Charsets
com.google.common.base.Function
com.google.common.collect.Collections2
com.google.common.cache.LoadingCache
com.google.common.cache.CacheLoader
@defaultMessage Do not violate java's access system
java.lang.reflect.AccessibleObject#setAccessible(boolean)

View File

@ -20,7 +20,7 @@
<testScripts>*.bats</testScripts>
<testCommand>sudo bats $BATS/${testScripts}</testCommand>
<allDebBoxes>precise, trusty, vivid, wheezy, jessie</allDebBoxes>
<allDebBoxes>precise, trusty, vivid, jessie</allDebBoxes>
<allRpmBoxes>centos-6, centos-7, fedora-22, oel-7</allRpmBoxes>
<defaultDebBoxes>trusty</defaultDebBoxes>