Merge branch 'master' into module_culling4

This commit is contained in:
Ryan Ernst 2015-08-22 09:40:17 -07:00
commit 07807f320a
60 changed files with 1150 additions and 1206 deletions

View File

@ -45,8 +45,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip";
public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip";
private static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = false;
private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
private static final String INDEX_HEADER_KEY = "es.index";
private static final String SHARD_HEADER_KEY = "es.shard";
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";

View File

@ -19,45 +19,7 @@
package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ValidationException;
import java.util.ArrayList;
import java.util.List;
/**
*
*/
public class ActionRequestValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ActionRequestValidationException() {
super("validation failed");
}
public void addValidationError(String error) {
validationErrors.add(error);
}
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
public class ActionRequestValidationException extends ValidationException {
}

View File

@ -21,15 +21,12 @@ package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@ -44,16 +41,14 @@ import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import static com.google.common.collect.Sets.newHashSet;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
* A main entry point when starting from the command line.
* Internal startup code.
*/
public class Bootstrap {
final class Bootstrap {
private static volatile Bootstrap INSTANCE;
@ -137,10 +132,6 @@ public class Bootstrap {
OsProbe.getInstance();
}
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception {
initializeNatives(settings.getAsBoolean("bootstrap.mlockall", false),
settings.getAsBoolean("bootstrap.ctrlhandler", true));
@ -222,7 +213,11 @@ public class Bootstrap {
}
}
public static void main(String[] args) throws Throwable {
/**
* This method is invoked by {@link Elasticsearch#main(String[])}
* to startup elasticsearch.
*/
static void init(String[] args) throws Throwable {
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
@ -291,7 +286,7 @@ public class Bootstrap {
Loggers.enableConsoleLogging();
}
throw new StartupError(e);
throw e;
}
}

View File

@ -38,7 +38,7 @@ import java.util.Properties;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
public class BootstrapCLIParser extends CliTool {
final class BootstrapCLIParser extends CliTool {
private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class)
.cmds(Start.CMD, Version.CMD)

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
/**
* Exposes system startup information
*/
public final class BootstrapInfo {
/** no instantiation */
private BootstrapInfo() {}
/**
* Returns true if we successfully loaded native libraries.
* <p>
* If this returns false, then native operations such as locking
* memory did not work.
*/
public static boolean isNativesAvailable() {
return Natives.JNA_AVAILABLE;
}
/**
* Returns true if we were able to lock the process's address space.
*/
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}

View File

@ -20,11 +20,23 @@
package org.elasticsearch.bootstrap;
/**
* A wrapper around {@link Bootstrap} just so the process will look nicely on things like jps.
* This class starts elasticsearch.
*/
public class Elasticsearch extends Bootstrap {
public final class Elasticsearch {
public static void main(String[] args) throws Throwable {
Bootstrap.main(args);
/** no instantiation */
private Elasticsearch() {}
/**
* Main entry point for starting elasticsearch
*/
public static void main(String[] args) throws StartupError {
try {
Bootstrap.init(args);
} catch (Throwable t) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.
throw new StartupError(t);
}
}
}

View File

@ -59,7 +59,7 @@ final class JNACLibrary {
public long rlim_max = 0;
@Override
protected List getFieldOrder() {
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] { "rlim_cur", "rlim_max" });
}
}

View File

@ -35,7 +35,7 @@ import java.util.List;
/**
* Library for Windows/Kernel32
*/
class JNAKernel32Library {
final class JNAKernel32Library {
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
@ -148,7 +148,7 @@ class JNAKernel32Library {
public NativeLong Type;
@Override
protected List getFieldOrder() {
protected List<String> getFieldOrder() {
return Arrays.asList(new String[]{"BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"});
}
}

View File

@ -34,10 +34,13 @@ import static org.elasticsearch.bootstrap.JNAKernel32Library.SizeT;
*/
class JNANatives {
/** no instantiation */
private JNANatives() {}
private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful
public static boolean LOCAL_MLOCKALL = false;
static boolean LOCAL_MLOCKALL = false;
static void tryMlockall() {
int errno = Integer.MIN_VALUE;

View File

@ -29,6 +29,8 @@ import java.util.Map;
/** Checks that the JVM is ok and won't cause index corruption */
final class JVMCheck {
/** no instantiation */
private JVMCheck() {}
/**
* URL with latest JVM recommendations

View File

@ -37,15 +37,30 @@ import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
/** Simple check for duplicate class files across the classpath */
/**
* Simple check for duplicate class files across the classpath.
* <p>
* This class checks for incompatibilities in the following ways:
* <ul>
* <li>Checks that class files are not duplicated across jars.</li>
* <li>Checks any {@code X-Compile-Target-JDK} value in the jar
* manifest is compatible with current JRE</li>
* <li>Checks any {@code X-Compile-Elasticsearch-Version} value in
* the jar manifest is compatible with the current ES</li>
* </ul>
*/
public class JarHell {
/** no instantiation */
private JarHell() {}
/** Simple driver class, can be used eg. from builds. Returns non-zero on jar-hell */
@SuppressForbidden(reason = "command line tool")
public static void main(String args[]) throws Exception {
@ -69,7 +84,7 @@ public class JarHell {
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs()));
}
checkJarHell(((URLClassLoader)loader).getURLs());
checkJarHell(((URLClassLoader) loader).getURLs());
}
/**
@ -141,6 +156,7 @@ public class JarHell {
// give a nice error if jar requires a newer java version
String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK");
if (targetVersion != null) {
checkVersionFormat(targetVersion);
checkJavaVersion(jar.toString(), targetVersion);
}
@ -153,23 +169,34 @@ public class JarHell {
}
}
public static void checkVersionFormat(String targetVersion) {
if (!JavaVersion.isValid(targetVersion)) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s",
targetVersion
)
);
}
}
/**
* Checks that the java specification version {@code targetVersion}
* required by {@code resource} is compatible with the current installation.
*/
public static void checkJavaVersion(String resource, String targetVersion) {
String systemVersion = System.getProperty("java.specification.version");
float current = Float.POSITIVE_INFINITY;
float target = Float.NEGATIVE_INFINITY;
try {
current = Float.parseFloat(systemVersion);
target = Float.parseFloat(targetVersion);
} catch (NumberFormatException e) {
// some spec changed, time for a more complex parser
}
if (current < target) {
throw new IllegalStateException(resource + " requires Java " + targetVersion
+ ", your system: " + systemVersion);
JavaVersion version = JavaVersion.parse(targetVersion);
if (JavaVersion.current().compareTo(version) < 0) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"%s requires Java %s:, your system: %s",
resource,
targetVersion,
JavaVersion.current().toString()
)
);
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.Strings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
class JavaVersion implements Comparable<JavaVersion> {
private final List<Integer> version;
public List<Integer> getVersion() {
return Collections.unmodifiableList(version);
}
private JavaVersion(List<Integer> version) {
this.version = version;
}
public static JavaVersion parse(String value) {
if (value == null) {
throw new NullPointerException("value");
}
if ("".equals(value)) {
throw new IllegalArgumentException("value");
}
List<Integer> version = new ArrayList<>();
String[] components = value.split("\\.");
for (String component : components) {
version.add(Integer.valueOf(component));
}
return new JavaVersion(version);
}
public static boolean isValid(String value) {
return value.matches("^0*[0-9]+(\\.[0-9]+)*$");
}
private final static JavaVersion CURRENT = parse(System.getProperty("java.specification.version"));
public static JavaVersion current() {
return CURRENT;
}
@Override
public int compareTo(JavaVersion o) {
int len = Math.max(version.size(), o.version.size());
for (int i = 0; i < len; i++) {
int d = (i < version.size() ? version.get(i) : 0);
int s = (i < o.version.size() ? o.version.get(i) : 0);
if (s < d)
return 1;
if (s > d)
return -1;
}
return 0;
}
@Override
public String toString() {
return Strings.collectionToDelimitedString(version, ".");
}
}

View File

@ -26,27 +26,32 @@ import org.elasticsearch.common.logging.Loggers;
* The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on
* startup. If they are not available, this class will avoid calling code that loads these classes.
*/
class Natives {
final class Natives {
/** no instantiation */
private Natives() {}
private static final ESLogger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM
private static boolean jnaAvailable = false;
static final boolean JNA_AVAILABLE;
static {
boolean v = false;
try {
// load one of the main JNA classes to see if the classes are available. this does not ensure that all native
// libraries are available, only the ones necessary by JNA to function
Class.forName("com.sun.jna.Native");
jnaAvailable = true;
v = true;
} catch (ClassNotFoundException e) {
logger.warn("JNA not found. native methods will be disabled.", e);
} catch (UnsatisfiedLinkError e) {
logger.warn("unable to load JNA native support library, native methods will be disabled.", e);
}
JNA_AVAILABLE = v;
}
static void tryMlockall() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot mlockall because JNA is not available");
return;
}
@ -54,7 +59,7 @@ class Natives {
}
static boolean definitelyRunningAsRoot() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot check if running as root because JNA is not available");
return false;
}
@ -62,7 +67,7 @@ class Natives {
}
static void tryVirtualLock() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot mlockall because JNA is not available");
return;
}
@ -70,7 +75,7 @@ class Natives {
}
static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot register console handler because JNA is not available");
return;
}
@ -78,7 +83,7 @@ class Natives {
}
static boolean isMemoryLocked() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
return false;
}
return JNANatives.LOCAL_MLOCKALL;

View File

@ -38,15 +38,59 @@ import java.util.Map;
import java.util.regex.Pattern;
/**
* Initializes securitymanager with necessary permissions.
* Initializes SecurityManager with necessary permissions.
* <p>
* We use a template file (the one we test with), and add additional
* permissions based on the environment (data paths, etc)
* <h1>Initialization</h1>
* The JVM is not initially started with security manager enabled,
* instead we turn it on early in the startup process. This is a tradeoff
* between security and ease of use:
* <ul>
* <li>Assigns file permissions to user-configurable paths that can
* be specified from the command-line or {@code elasticsearch.yml}.</li>
* <li>Allows for some contained usage of native code that would not
* otherwise be permitted.</li>
* </ul>
* <p>
* <h1>Permissions</h1>
* Permissions use a policy file packaged as a resource, this file is
* also used in tests. File permissions are generated dynamically and
* combined with this policy file.
* <p>
* For each configured path, we ensure it exists and is accessible before
* granting permissions, otherwise directory creation would require
* permissions to parent directories.
* <p>
* In some exceptional cases, permissions are assigned to specific jars only,
* when they are so dangerous that general code should not be granted the
* permission, but there are extenuating circumstances.
* <p>
* Groovy scripts are assigned no permissions. This does not provide adequate
* sandboxing, as these scripts still have access to ES classes, and could
* modify members, etc that would cause bad things to happen later on their
* behalf (no package protections are yet in place, this would need some
* cleanups to the scripting apis). But still it can provide some defense for users
* that enable dynamic scripting without being fully aware of the consequences.
* <p>
* <h1>Disabling Security</h1>
* SecurityManager can be disabled completely with this setting:
* <pre>
* es.security.manager.enabled = false
* </pre>
* <p>
* <h1>Debugging Security</h1>
* A good place to start when there is a problem is to turn on security debugging:
* <pre>
* JAVA_OPTS="-Djava.security.debug=access:failure" bin/elasticsearch
* </pre>
* See <a href="https://docs.oracle.com/javase/7/docs/technotes/guides/security/troubleshooting-security.html">
* Troubleshooting Security</a> for information.
*/
final class Security {
/** no instantiation */
private Security() {}
/**
* Initializes securitymanager for the environment
* Initializes SecurityManager for the environment
* Can only happen once!
*/
static void configure(Environment environment) throws Exception {
@ -118,25 +162,25 @@ final class Security {
static Permissions createPermissions(Environment environment) throws IOException {
Permissions policy = new Permissions();
// read-only dirs
addPath(policy, environment.binFile(), "read,readlink");
addPath(policy, environment.libFile(), "read,readlink");
addPath(policy, environment.pluginsFile(), "read,readlink");
addPath(policy, environment.configFile(), "read,readlink");
addPath(policy, environment.scriptsFile(), "read,readlink");
addPath(policy, "path.home", environment.binFile(), "read,readlink");
addPath(policy, "path.home", environment.libFile(), "read,readlink");
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
// read-write dirs
addPath(policy, environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, environment.logsFile(), "read,readlink,write,delete");
addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, "path.logs", environment.logsFile(), "read,readlink,write,delete");
if (environment.sharedDataFile() != null) {
addPath(policy, environment.sharedDataFile(), "read,readlink,write,delete");
addPath(policy, "path.shared_data", environment.sharedDataFile(), "read,readlink,write,delete");
}
for (Path path : environment.dataFiles()) {
addPath(policy, path, "read,readlink,write,delete");
addPath(policy, "path.data", path, "read,readlink,write,delete");
}
for (Path path : environment.dataWithClusterFiles()) {
addPath(policy, path, "read,readlink,write,delete");
addPath(policy, "path.data", path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) {
addPath(policy, path, "read,readlink,write,delete");
addPath(policy, "path.repo", path, "read,readlink,write,delete");
}
if (environment.pidFile() != null) {
// we just need permission to remove the file if its elsewhere.
@ -145,10 +189,20 @@ final class Security {
return policy;
}
/** Add access to path (and all files underneath it */
static void addPath(Permissions policy, Path path, String permissions) throws IOException {
// paths may not exist yet
ensureDirectoryExists(path);
/**
* Add access to path (and all files underneath it)
* @param policy current policy to add permissions to
* @param configurationName the configuration name associated with the path (for error messages only)
* @param path the path itself
* @param permissions set of filepermissions to grant to the path
*/
static void addPath(Permissions policy, String configurationName, Path path, String permissions) {
// paths may not exist yet, this also checks accessibility
try {
ensureDirectoryExists(path);
} catch (IOException e) {
throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e);
}
// add each path twice: once for itself, again for files underneath it
policy.add(new FilePermission(path.toString(), permissions));

View File

@ -32,7 +32,7 @@ import java.io.PrintStream;
*/
//TODO: remove this when guice is removed, and exceptions are cleaned up
//this is horrible, but its what we must do
class StartupError extends RuntimeException {
final class StartupError extends RuntimeException {
/** maximum length of a stacktrace, before we truncate it */
static final int STACKTRACE_LIMIT = 30;
@ -59,10 +59,7 @@ class StartupError extends RuntimeException {
cause = getFirstGuiceCause((CreationException)cause);
}
String message = cause.getMessage();
if (message == null) {
message = "Unknown Error";
}
String message = cause.toString();
s.println(message);
if (cause != null) {

View File

@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -46,6 +45,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
@ -60,12 +60,15 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.indices.*;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexNameException;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.joda.time.DateTime;
@ -514,6 +517,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException {
List<String> validationErrors = getIndexSettingsValidationErrors(settings);
if (validationErrors.isEmpty() == false) {
ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
throw new IndexCreationException(new Index(indexName), validationException);
}
}
List<String> getIndexSettingsValidationErrors(Settings settings) {
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
List<String> validationErrors = Lists.newArrayList();
if (customPath != null && env.sharedDataFile() == null) {
@ -530,22 +542,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
validationErrors.add("index must have 1 or more primary shards");
}
if (number_of_replicas != null && number_of_replicas < 0) {
validationErrors.add("index must have 0 or more replica shards");
validationErrors.add("index must have 0 or more replica shards");
}
if (validationErrors.isEmpty() == false) {
throw new IndexCreationException(new Index(indexName),
new IllegalArgumentException(getMessage(validationErrors)));
}
}
private String getMessage(List<String> validationErrors) {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
return validationErrors;
}
private static class DefaultIndexTemplateFilter implements IndexTemplateFilter {

View File

@ -29,12 +29,12 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.InvalidIndexTemplateException;
@ -179,41 +179,44 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
private void validate(PutRequest request) {
List<String> validationErrors = Lists.newArrayList();
if (request.name.contains(" ")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a space");
validationErrors.add("name must not contain a space");
}
if (request.name.contains(",")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a ','");
validationErrors.add("name must not contain a ','");
}
if (request.name.contains("#")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a '#'");
validationErrors.add("name must not contain a '#'");
}
if (request.name.startsWith("_")) {
throw new InvalidIndexTemplateException(request.name, "name must not start with '_'");
validationErrors.add("name must not start with '_'");
}
if (!request.name.toLowerCase(Locale.ROOT).equals(request.name)) {
throw new InvalidIndexTemplateException(request.name, "name must be lower cased");
validationErrors.add("name must be lower cased");
}
if (request.template.contains(" ")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a space");
validationErrors.add("template must not contain a space");
}
if (request.template.contains(",")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a ','");
validationErrors.add("template must not contain a ','");
}
if (request.template.contains("#")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a '#'");
validationErrors.add("template must not contain a '#'");
}
if (request.template.startsWith("_")) {
throw new InvalidIndexTemplateException(request.name, "template must not start with '_'");
validationErrors.add("template must not start with '_'");
}
if (!Strings.validFileNameExcludingAstrix(request.template)) {
throw new InvalidIndexTemplateException(request.name, "template must not container the following characters " + Strings.INVALID_FILENAME_CHARS);
validationErrors.add("template must not container the following characters " + Strings.INVALID_FILENAME_CHARS);
}
try {
metaDataCreateIndexService.validateIndexSettings(request.name, request.settings);
} catch (IndexCreationException exception) {
throw new InvalidIndexTemplateException(request.name, exception.getDetailedMessage());
List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
validationErrors.addAll(indexSettingsValidation);
if (!validationErrors.isEmpty()) {
ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
throw new InvalidIndexTemplateException(request.name, validationException.getMessage());
}
for (Alias alias : request.aliases) {
@ -271,7 +274,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
this.mappings.putAll(mappings);
return this;
}
public PutRequest aliases(Set<Alias> aliases) {
this.aliases.addAll(aliases);
return this;

View File

@ -0,0 +1,71 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import java.util.ArrayList;
import java.util.List;
/**
* Encapsulates an accumulation of validation errors
*/
public class ValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ValidationException() {
super("validation failed");
}
/**
* Add a new validation error to the accumulating validation errors
* @param error the error to add
*/
public void addValidationError(String error) {
validationErrors.add(error);
}
/**
* Add a sequence of validation errors to the accumulating validation errors
* @param errors the errors to add
*/
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
/**
* Returns the validation errors accumulated
* @return
*/
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
}

View File

@ -19,7 +19,6 @@ package org.elasticsearch.common.inject.spi;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.common.inject.*;
import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
@ -343,7 +342,7 @@ public final class Elements {
return builder;
}
private static ESLogger logger = Loggers.getLogger(Bootstrap.class);
private static ESLogger logger = Loggers.getLogger(Elements.class);
protected Object getSource() {
Object ret;

View File

@ -64,6 +64,7 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
public static final String DISCOVERY_ZEN_PING_UNICAST_HOSTS = "discovery.zen.ping.unicast.hosts";
// these limits are per-address
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
@ -116,7 +117,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
}
this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10);
String[] hostArr = this.settings.getAsArray("discovery.zen.ping.unicast.hosts");
String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS);
// trim the hosts
for (int i = 0; i < hostArr.length; i++) {
hostArr[i] = hostArr[i].trim();

View File

@ -261,6 +261,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
}
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
}

View File

@ -68,7 +68,6 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
private Locale locale;
private float boost = -1;
private Fuzziness fuzziness;
@ -99,6 +98,8 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
/** To limit effort spent determinizing regexp queries. */
private Integer maxDeterminizedStates;
private Boolean escape;
public QueryStringQueryBuilder(String queryString) {
this.queryString = queryString;
}
@ -159,11 +160,11 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
/**
* Sets the boolean operator of the query parser used to parse the query string.
* <p/>
* <p>In default mode ({@link FieldQueryBuilder.Operator#OR}) terms without any modifiers
* <p>In default mode ({@link Operator#OR}) terms without any modifiers
* are considered optional: for example <code>capital of Hungary</code> is equal to
* <code>capital OR of OR Hungary</code>.
* <p/>
* <p>In {@link FieldQueryBuilder.Operator#AND} mode terms are considered to be in conjunction: the
* <p>In {@link Operator#AND} mode terms are considered to be in conjunction: the
* above mentioned query is parsed as <code>capital AND of AND Hungary</code>
*/
public QueryStringQueryBuilder defaultOperator(Operator defaultOperator) {
@ -342,6 +343,14 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
return this;
}
/**
* Set to <tt>true</tt> to enable escaping of the query string
*/
public QueryStringQueryBuilder escape(boolean escape) {
this.escape = escape;
return this;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(QueryStringQueryParser.NAME);
@ -431,6 +440,9 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
if (timeZone != null) {
builder.field("time_zone", timeZone);
}
if (escape != null) {
builder.field("escape", escape);
}
builder.endObject();
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.monitor.process;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapInfo;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
@ -136,7 +136,7 @@ public class ProcessProbe {
}
public ProcessInfo processInfo() {
return new ProcessInfo(jvmInfo().pid(), Bootstrap.isMemoryLocked());
return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked());
}
public ProcessStats processStats() {

View File

@ -21,6 +21,7 @@ package org.elasticsearch.node.internal;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Sets;
import com.google.common.collect.UnmodifiableIterator;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Booleans;
@ -40,6 +41,7 @@ import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.common.Strings.cleanPath;
@ -113,13 +115,21 @@ public class InternalSettingsPreparer {
}
}
if (loadFromEnv) {
boolean settingsFileFound = false;
Set<String> foundSuffixes = Sets.newHashSet();
for (String allowedSuffix : ALLOWED_SUFFIXES) {
try {
settingsBuilder.loadFromPath(environment.configFile().resolve("elasticsearch" + allowedSuffix));
} catch (SettingsException e) {
// ignore
Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix);
if (Files.exists(path)) {
if (!settingsFileFound) {
settingsBuilder.loadFromPath(path);
}
settingsFileFound = true;
foundSuffixes.add(allowedSuffix);
}
}
if (foundSuffixes.size() > 1) {
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ","));
}
}
}

View File

@ -120,6 +120,7 @@ public class PluginInfo implements Streamable, ToXContent {
if (javaVersionString == null) {
throw new IllegalArgumentException("Property [java.version] is missing for jvm plugin [" + name + "]");
}
JarHell.checkVersionFormat(javaVersionString);
JarHell.checkJavaVersion(name, javaVersionString);
isolated = Boolean.parseBoolean(props.getProperty("isolated", "true"));
classname = props.getProperty("classname");

View File

@ -22,9 +22,14 @@ package org.elasticsearch.plugins;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
@ -107,7 +112,7 @@ public class PluginsService extends AbstractComponent {
List<Bundle> bundles = getPluginBundles(environment);
tupleBuilder.addAll(loadBundles(bundles));
} catch (IOException ex) {
throw new IllegalStateException(ex);
throw new IllegalStateException("Unable to initialize plugins", ex);
}
plugins = tupleBuilder.build();
@ -279,9 +284,10 @@ public class PluginsService extends AbstractComponent {
}
static List<Bundle> getPluginBundles(Environment environment) throws IOException {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
ESLogger logger = Loggers.getLogger(PluginsService.class);
Path pluginsDirectory = environment.pluginsFile();
// TODO: remove this leniency, but tests bogusly rely on it
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return Collections.emptyList();
}
@ -347,6 +353,8 @@ public class PluginsService extends AbstractComponent {
for (PluginInfo pluginInfo : bundle.plugins) {
final Plugin plugin;
if (pluginInfo.isJvm()) {
// reload lucene SPI with any new services from the plugin
reloadLuceneSPI(loader);
plugin = loadPlugin(pluginInfo.getClassname(), settings, loader);
} else {
plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription());
@ -358,6 +366,24 @@ public class PluginsService extends AbstractComponent {
return plugins.build();
}
/**
* Reloads all Lucene SPI implementations using the new classloader.
* This method must be called after the new classloader has been created to
* register the services for use.
*/
static void reloadLuceneSPI(ClassLoader loader) {
// do NOT change the order of these method calls!
// Codecs:
PostingsFormat.reloadPostingsFormats(loader);
DocValuesFormat.reloadDocValuesFormats(loader);
Codec.reloadCodecs(loader);
// Analysis:
CharFilterFactory.reloadCharFilters(loader);
TokenFilterFactory.reloadTokenFilters(loader);
TokenizerFactory.reloadTokenizers(loader);
}
private Plugin loadPlugin(String className, Settings settings, ClassLoader loader) {
try {
Class<? extends Plugin> pluginClass = loader.loadClass(className).asSubclass(Plugin.class);

View File

@ -25,6 +25,8 @@ import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -115,11 +117,20 @@ public class BytesRestResponse extends RestResponse {
return this.status;
}
private static final ESLogger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed");
private static XContentBuilder convert(RestChannel channel, RestStatus status, Throwable t) throws IOException {
XContentBuilder builder = channel.newErrorBuilder().startObject();
if (t == null) {
builder.field("error", "unknown");
} else if (channel.detailedErrorsEnabled()) {
final ToXContent.Params params;
if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) {
params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request());
} else {
SUPPRESSED_ERROR_LOGGER.info("{} Params: {}", t, channel.request().path(), channel.request().params());
params = channel.request();
}
builder.field("error");
builder.startObject();
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
@ -127,16 +138,13 @@ public class BytesRestResponse extends RestResponse {
builder.startArray();
for (ElasticsearchException rootCause : rootCauses){
builder.startObject();
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), channel.request()));
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
builder.endObject();
}
builder.endArray();
ElasticsearchException.toXContent(builder, channel.request(), t);
ElasticsearchException.toXContent(builder, params, t);
builder.endObject();
if (channel.request().paramAsBoolean("error_trace", false)) {
buildErrorTrace(t, builder);
}
} else {
builder.field("error", simpleMessage(t));
}
@ -145,45 +153,6 @@ public class BytesRestResponse extends RestResponse {
return builder;
}
private static void buildErrorTrace(Throwable t, XContentBuilder builder) throws IOException {
builder.startObject("error_trace");
boolean first = true;
int counter = 0;
while (t != null) {
// bail if there are more than 10 levels, becomes useless really...
if (counter++ > 10) {
break;
}
if (!first) {
builder.startObject("cause");
}
buildThrowable(t, builder);
if (!first) {
builder.endObject();
}
t = t.getCause();
first = false;
}
builder.endObject();
}
private static void buildThrowable(Throwable t, XContentBuilder builder) throws IOException {
builder.field("message", t.getMessage());
for (StackTraceElement stElement : t.getStackTrace()) {
builder.startObject("at")
.field("class", stElement.getClassName())
.field("method", stElement.getMethodName());
if (stElement.getFileName() != null) {
builder.field("file", stElement.getFileName());
}
if (stElement.getLineNumber() >= 0) {
builder.field("line", stElement.getLineNumber());
}
builder.endObject();
}
}
/*
* Builds a simple error string from the message of the first ElasticsearchException
*/

View File

@ -186,9 +186,8 @@ public class AggregationPath {
}
public AggregationPath subPath(int offset, int length) {
PathElement[] subTokens = new PathElement[length];
System.arraycopy(pathElements, offset, subTokens, 0, length);
return new AggregationPath(pathElements);
List<PathElement> subTokens = new ArrayList<>(pathElements.subList(offset, offset + length));
return new AggregationPath(subTokens);
}
/**
@ -266,12 +265,12 @@ public class AggregationPath {
}
return aggregator;
}
/**
* Resolves the topmost aggregator pointed by this path using the given root as a point of reference.
*
* @param root The point of reference of this path
* @return The first child aggregator of the root pointed by this path
* @return The first child aggregator of the root pointed by this path
*/
public Aggregator resolveTopmostAggregator(Aggregator root) {
AggregationPath.PathElement token = pathElements.get(0);
@ -279,7 +278,7 @@ public class AggregationPath {
assert (aggregator instanceof SingleBucketAggregator )
|| (aggregator instanceof NumericMetricsAggregator) : "this should be picked up before aggregation execution - on validate";
return aggregator;
}
}
/**
* Validates this path over the given aggregator as a point of reference.

View File

@ -57,7 +57,7 @@ import java.util.Collections;
import static org.hamcrest.Matchers.equalTo;
public class ESExceptionTests extends ESTestCase {
private static final ToXContent.Params PARAMS = new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true"));
private static final ToXContent.Params PARAMS = ToXContent.EMPTY_PARAMS;
@Test
public void testStatus() {

View File

@ -526,7 +526,7 @@ public class ExceptionSerializationTests extends ESTestCase {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
x.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true")));
x.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {

View File

@ -33,13 +33,47 @@ import org.elasticsearch.indices.InvalidIndexTemplateException;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.Matchers.contains;
public class MetaDataIndexTemplateServiceTests extends ESTestCase {
@Test
public void testIndexTemplateInvalidNumberOfShards() throws IOException {
public void testIndexTemplateInvalidNumberOfShards() {
PutRequest request = new PutRequest("test", "test_shards");
request.template("test_shards*");
Map<String, Object> map = Maps.newHashMap();
map.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "0");
request.settings(Settings.settingsBuilder().put(map).build());
List<Throwable> throwables = putTemplate(request);
assertEquals(throwables.size(), 1);
assertThat(throwables.get(0), instanceOf(InvalidIndexTemplateException.class));
assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
}
@Test
public void testIndexTemplateValidationAccumulatesValidationErrors() {
PutRequest request = new PutRequest("test", "putTemplate shards");
request.template("_test_shards*");
Map<String, Object> map = Maps.newHashMap();
map.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "0");
request.settings(Settings.settingsBuilder().put(map).build());
List<Throwable> throwables = putTemplate(request);
assertEquals(throwables.size(), 1);
assertThat(throwables.get(0), instanceOf(InvalidIndexTemplateException.class));
assertThat(throwables.get(0).getMessage(), containsString("name must not contain a space"));
assertThat(throwables.get(0).getMessage(), containsString("template must not start with '_'"));
assertThat(throwables.get(0).getMessage(), containsString("index must have 1 or more primary shards"));
}
private static List<Throwable> putTemplate(PutRequest request) {
MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(
Settings.EMPTY,
null,
@ -55,13 +89,6 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
);
MetaDataIndexTemplateService service = new MetaDataIndexTemplateService(Settings.EMPTY, null, createIndexService, null);
PutRequest request = new PutRequest("test", "test_shards");
request.template("test_shards*");
Map<String, Object> map = Maps.newHashMap();
map.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "0");
request.settings(Settings.settingsBuilder().put(map).build());
final List<Throwable> throwables = Lists.newArrayList();
service.putTemplate(request, new MetaDataIndexTemplateService.PutListener() {
@Override
@ -74,8 +101,7 @@ public class MetaDataIndexTemplateServiceTests extends ESTestCase {
throwables.add(t);
}
});
assertEquals(throwables.size(), 1);
assertTrue(throwables.get(0) instanceof InvalidIndexTemplateException);
assertTrue(throwables.get(0).getMessage().contains("index must have 1 or more primary shards"));
return throwables;
}
}

View File

@ -125,7 +125,7 @@ public class MultiSearchRequestTests extends ESTestCase {
public void testResponseErrorToXContent() throws IOException {
MultiSearchResponse response = new MultiSearchResponse(new MultiSearchResponse.Item[]{new MultiSearchResponse.Item(null, new IllegalStateException("foobar")), new MultiSearchResponse.Item(null, new IllegalStateException("baaaaaazzzz"))});
XContentBuilder builder = XContentFactory.jsonBuilder();
response.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true")));
response.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("\"responses\"[{\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"}],\"type\":\"illegal_state_exception\",\"reason\":\"foobar\"}},{\"error\":{\"root_cause\":[{\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"}],\"type\":\"illegal_state_exception\",\"reason\":\"baaaaaazzzz\"}}]",
builder.string());
}

View File

@ -21,7 +21,7 @@ package org.elasticsearch.benchmark.mapping;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -85,7 +85,7 @@ public class ManyMappingsBenchmark {
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
Bootstrap.initializeNatives(true, false);
BootstrapForTesting.ensureInitialized();
Settings settings = settingsBuilder()
.put("")
.put(SETTING_NUMBER_OF_SHARDS, 5)

View File

@ -20,7 +20,7 @@ package org.elasticsearch.benchmark.recovery;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
import org.elasticsearch.action.admin.indices.recovery.ShardRecoveryResponse;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
@ -57,7 +57,7 @@ public class ReplicaRecoveryBenchmark {
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
Bootstrap.initializeNatives(true, false);
BootstrapForTesting.ensureInitialized();
Settings settings = settingsBuilder()
.put("gateway.type", "local")

View File

@ -21,12 +21,13 @@ package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.hppc.IntIntHashMap;
import com.carrotsearch.hppc.ObjectHashSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.benchmark.search.aggregations.TermsAggregationSearchBenchmark.StatsResult;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -66,7 +67,7 @@ public class GlobalOrdinalsBenchmark {
public static void main(String[] args) throws Exception {
System.setProperty("es.logger.prefix", "");
Bootstrap.initializeNatives(true, false);
BootstrapForTesting.ensureInitialized();
Random random = new Random();
Settings settings = settingsBuilder()

View File

@ -27,7 +27,7 @@ import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
@ -71,7 +71,7 @@ public class SubAggregationSearchCollectModeBenchmark {
static Node[] nodes;
public static void main(String[] args) throws Exception {
Bootstrap.initializeNatives(true, false);
BootstrapForTesting.ensureInitialized();
Random random = new Random();
Settings settings = settingsBuilder()

View File

@ -20,13 +20,14 @@ package org.elasticsearch.benchmark.search.aggregations;
import com.carrotsearch.hppc.ObjectScatterSet;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.settings.Settings;
@ -71,7 +72,7 @@ public class TermsAggregationSearchAndIndexingBenchmark {
static Node[] nodes;
public static void main(String[] args) throws Exception {
Bootstrap.initializeNatives(true, false);
BootstrapForTesting.ensureInitialized();
Settings settings = settingsBuilder()
.put("refresh_interval", "-1")
.put(SETTING_NUMBER_OF_SHARDS, 1)

View File

@ -28,7 +28,7 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.search.SearchRequestBuilder;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapForTesting;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.StopWatch;
@ -99,7 +99,7 @@ public class TermsAggregationSearchBenchmark {
}
public static void main(String[] args) throws Exception {
Bootstrap.initializeNatives(true, false);
BootstrapForTesting.ensureInitialized();
Random random = new Random();
Settings settings = settingsBuilder()

View File

@ -101,7 +101,7 @@ public class BootstrapForTesting {
}
}
// java.io.tmpdir
Security.addPath(perms, javaTmpDir, "read,readlink,write,delete");
Security.addPath(perms, "java.io.tmpdir", javaTmpDir, "read,readlink,write,delete");
// custom test config file
if (Strings.hasLength(System.getProperty("tests.config"))) {
perms.add(new FilePermission(System.getProperty("tests.config"), "read,readlink"));

View File

@ -20,6 +20,7 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.Version;
import org.elasticsearch.common.Strings;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@ -27,6 +28,8 @@ import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
import java.util.List;
import java.util.jar.Attributes;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
@ -153,22 +156,25 @@ public class JarHellTests extends ESTestCase {
public void testRequiredJDKVersionTooOld() throws Exception {
Path dir = createTempDir();
String previousJavaVersion = System.getProperty("java.specification.version");
System.setProperty("java.specification.version", "1.7");
List<Integer> current = JavaVersion.current().getVersion();
List<Integer> target = new ArrayList<>(current.size());
for (int i = 0; i < current.size(); i++) {
target.add(current.get(i) + 1);
}
JavaVersion targetVersion = JavaVersion.parse(Strings.collectionToDelimitedString(target, "."));
Manifest manifest = new Manifest();
Attributes attributes = manifest.getMainAttributes();
attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0");
attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "1.8");
attributes.put(new Attributes.Name("X-Compile-Target-JDK"), targetVersion.toString());
URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")};
try {
JarHell.checkJarHell(jars);
fail("did not get expected exception");
} catch (IllegalStateException e) {
assertTrue(e.getMessage().contains("requires Java 1.8"));
assertTrue(e.getMessage().contains("your system: 1.7"));
} finally {
System.setProperty("java.specification.version", previousJavaVersion);
assertTrue(e.getMessage().contains("requires Java " + targetVersion.toString()));
assertTrue(e.getMessage().contains("your system: " + JavaVersion.current().toString()));
}
}
@ -213,7 +219,12 @@ public class JarHellTests extends ESTestCase {
attributes.put(Attributes.Name.MANIFEST_VERSION, "1.0.0");
attributes.put(new Attributes.Name("X-Compile-Target-JDK"), "bogus");
URL[] jars = {makeJar(dir, "foo.jar", manifest, "Foo.class")};
JarHell.checkJarHell(jars);
try {
JarHell.checkJarHell(jars);
fail("did not get expected exception");
} catch (IllegalStateException e) {
assertTrue(e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was bogus"));
}
}
/** make sure if a plugin is compiled against the same ES version, it works */
@ -242,4 +253,26 @@ public class JarHellTests extends ESTestCase {
assertTrue(e.getMessage().contains("requires Elasticsearch 1.0-bogus"));
}
}
public void testValidVersions() {
String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"};
for (String version : versions) {
try {
JarHell.checkVersionFormat(version);
} catch (IllegalStateException e) {
fail(version + " should be accepted as a valid version format");
}
}
}
public void testInvalidVersions() {
String[] versions = new String[]{"", "1.7.0_80", "1.7."};
for (String version : versions) {
try {
JarHell.checkVersionFormat(version);
fail("\"" + version + "\"" + " should be rejected as an invalid version format");
} catch (IllegalStateException e) {
}
}
}
}

View File

@ -0,0 +1,79 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import java.util.List;
import static org.hamcrest.CoreMatchers.is;
public class JavaVersionTests extends ESTestCase {
@Test
public void testParse() {
JavaVersion javaVersion = JavaVersion.parse("1.7.0");
List<Integer> version = javaVersion.getVersion();
assertThat(3, is(version.size()));
assertThat(1, is(version.get(0)));
assertThat(7, is(version.get(1)));
assertThat(0, is(version.get(2)));
}
@Test
public void testToString() {
JavaVersion javaVersion = JavaVersion.parse("1.7.0");
assertThat("1.7.0", is(javaVersion.toString()));
}
@Test
public void testCompare() {
JavaVersion onePointSix = JavaVersion.parse("1.6");
JavaVersion onePointSeven = JavaVersion.parse("1.7");
JavaVersion onePointSevenPointZero = JavaVersion.parse("1.7.0");
JavaVersion onePointSevenPointOne = JavaVersion.parse("1.7.1");
JavaVersion onePointSevenPointTwo = JavaVersion.parse("1.7.2");
JavaVersion onePointSevenPointOnePointOne = JavaVersion.parse("1.7.1.1");
JavaVersion onePointSevenPointTwoPointOne = JavaVersion.parse("1.7.2.1");
assertTrue(onePointSix.compareTo(onePointSeven) < 0);
assertTrue(onePointSeven.compareTo(onePointSix) > 0);
assertTrue(onePointSix.compareTo(onePointSix) == 0);
assertTrue(onePointSeven.compareTo(onePointSevenPointZero) == 0);
assertTrue(onePointSevenPointOnePointOne.compareTo(onePointSevenPointOne) > 0);
assertTrue(onePointSevenPointTwo.compareTo(onePointSevenPointTwoPointOne) < 0);
}
@Test
public void testValidVersions() {
String[] versions = new String[]{"1.7", "1.7.0", "0.1.7", "1.7.0.80"};
for (String version : versions) {
assertTrue(JavaVersion.isValid(version));
}
}
@Test
public void testInvalidVersions() {
String[] versions = new String[]{"", "1.7.0_80", "1.7."};
for (String version : versions) {
assertFalse(JavaVersion.isValid(version));
}
}
}

View File

@ -244,7 +244,7 @@ public class SecurityTests extends ESTestCase {
assumeNoException("test cannot create symbolic links with security manager enabled", e);
}
Permissions permissions = new Permissions();
Security.addPath(permissions, link, "read");
Security.addPath(permissions, "testing", link, "read");
assertExactPermissions(new FilePermission(link.toString(), "read"), permissions);
assertExactPermissions(new FilePermission(link.resolve("foo").toString(), "read"), permissions);
assertExactPermissions(new FilePermission(target.toString(), "read"), permissions);

View File

@ -19,12 +19,18 @@
package org.elasticsearch.index.mapper;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import static org.elasticsearch.test.VersionUtils.getFirstVersion;
import static org.elasticsearch.test.VersionUtils.getPreviousVersion;
import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.Matchers.hasToString;
public class MapperServiceTest extends ESSingleNodeTestCase {
@ -46,4 +52,39 @@ public class MapperServiceTest extends ESSingleNodeTestCase {
.execute()
.actionGet();
}
@Test
public void testThatLongTypeNameIsNotRejectedOnPreElasticsearchVersionTwo() {
String index = "text-index";
String field = "field";
String type = new String(new char[256]).replace("\0", "a");
CreateIndexResponse response =
client()
.admin()
.indices()
.prepareCreate(index)
.setSettings(settings(randomVersionBetween(random(), getFirstVersion(), getPreviousVersion(Version.V_2_0_0_beta1))))
.addMapping(type, field, "type=string")
.execute()
.actionGet();
assertNotNull(response);
}
@Test
public void testTypeNameTooLong() {
String index = "text-index";
String field = "field";
String type = new String(new char[256]).replace("\0", "a");
expectedException.expect(MapperParsingException.class);
expectedException.expect(hasToString(containsString("mapping type name [" + type + "] is too long; limit is length 255 but was [256]")));
client()
.admin()
.indices()
.prepareCreate(index)
.addMapping(type, field, "type=string")
.execute()
.actionGet();
}
}

View File

@ -20,7 +20,7 @@
package org.elasticsearch.monitor.process;
import org.apache.lucene.util.Constants;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
@ -37,7 +37,7 @@ public class ProcessProbeTests extends ESTestCase {
assertNotNull(info);
assertThat(info.getRefreshInterval(), greaterThanOrEqualTo(0L));
assertThat(info.getId(), equalTo(jvmInfo().pid()));
assertThat(info.isMlockall(), equalTo(Bootstrap.isMemoryLocked()));
assertThat(info.isMlockall(), equalTo(BootstrapInfo.isMemoryLocked()));
}
@Test

View File

@ -23,15 +23,17 @@ import org.elasticsearch.common.cli.CliToolTestCase;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
@ -42,6 +44,8 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.*;
public class InternalSettingsPreparerTests extends ESTestCase {
@Rule
public ExpectedException expectedException = ExpectedException.none();
@Before
public void setupSystemProperties() {
@ -75,29 +79,6 @@ public class InternalSettingsPreparerTests extends ESTestCase {
assertThat(tuple.v1().get("node.zone"), equalTo("bar"));
}
@Test
public void testAlternateConfigFileSuffixes() throws Exception {
InputStream yaml = getClass().getResourceAsStream("/config/elasticsearch.yaml");
InputStream json = getClass().getResourceAsStream("/config/elasticsearch.json");
InputStream properties = getClass().getResourceAsStream("/config/elasticsearch.properties");
Path home = createTempDir();
Path config = home.resolve("config");
Files.createDirectory(config);
Files.copy(yaml, config.resolve("elasticsearch.yaml"));
Files.copy(json, config.resolve("elasticsearch.json"));
Files.copy(properties, config.resolve("elasticsearch.properties"));
// test that we can read config files with .yaml, .json, and .properties suffixes
Tuple<Settings, Environment> tuple = InternalSettingsPreparer.prepareSettings(settingsBuilder()
.put("config.ignore_system_properties", true)
.put("path.home", home)
.build(), true);
assertThat(tuple.v1().get("yaml.config.exists"), equalTo("true"));
assertThat(tuple.v1().get("json.config.exists"), equalTo("true"));
assertThat(tuple.v1().get("properties.config.exists"), equalTo("true"));
}
@Test
public void testReplacePromptPlaceholders() {
final List<String> replacedSecretProperties = new ArrayList<>();
@ -235,4 +216,37 @@ public class InternalSettingsPreparerTests extends ESTestCase {
assertThat(settings.get("name"), is("prompted name 0"));
assertThat(settings.get("node.name"), is("prompted name 0"));
}
@Test(expected = SettingsException.class)
public void testGarbageIsNotSwallowed() throws IOException {
InputStream garbage = getClass().getResourceAsStream("/config/garbage/garbage.yml");
Path home = createTempDir();
Path config = home.resolve("config");
Files.createDirectory(config);
Files.copy(garbage, config.resolve("elasticsearch.yml"));
InternalSettingsPreparer.prepareSettings(settingsBuilder()
.put("config.ignore_system_properties", true)
.put("path.home", home)
.build(), true);
}
public void testMultipleSettingsFileNotAllowed() throws IOException {
InputStream yaml = getClass().getResourceAsStream("/config/elasticsearch.yaml");
InputStream properties = getClass().getResourceAsStream("/config/elasticsearch.properties");
Path home = createTempDir();
Path config = home.resolve("config");
Files.createDirectory(config);
Files.copy(yaml, config.resolve("elasticsearch.yaml"));
Files.copy(properties, config.resolve("elasticsearch.properties"));
expectedException.expect(SettingsException.class);
expectedException.expectMessage("multiple settings files found with suffixes: ");
expectedException.expectMessage("yaml");
expectedException.expectMessage("properties");
InternalSettingsPreparer.prepareSettings(settingsBuilder()
.put("config.ignore_system_properties", true)
.put("path.home", home)
.build(), true);
}
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.test.rest.client.http.HttpResponse;
import org.junit.Test;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.not;
/**
* Tests that by default the error_trace parameter can be used to show stacktraces
@ -59,6 +60,16 @@ public class DetailedErrorsEnabledIT extends ESIntegTestCase {
.execute();
assertThat(response.getHeaders().get("Content-Type"), containsString("application/json"));
assertThat(response.getBody(), containsString("\"error_trace\":{\"message\":\"Validation Failed"));
assertThat(response.getBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; nested: ActionRequestValidationException[Validation Failed: 1:"));
// Make the HTTP request
response = new HttpRequestBuilder(HttpClients.createDefault())
.httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class))
.path("/")
.method(HttpDeleteWithEntity.METHOD_NAME)
.execute();
assertThat(response.getHeaders().get("Content-Type"), containsString("application/json"));
assertThat(response.getBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; nested: ActionRequestValidationException[Validation Failed: 1:")));
}
}

View File

@ -176,6 +176,25 @@ public class PluginInfoTests extends ESTestCase {
}
}
public void testReadFromPropertiesBadJavaVersionFormat() throws Exception {
String pluginName = "fake-plugin";
Path pluginDir = createTempDir().resolve(pluginName);
writeProperties(pluginDir,
"description", "fake desc",
"name", pluginName,
"elasticsearch.version", Version.CURRENT.toString(),
"java.version", "1.7.0_80",
"classname", "FakePlugin",
"version", "1.0",
"jvm", "true");
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected bad java version format exception");
} catch (IllegalStateException e) {
assertTrue(e.getMessage(), e.getMessage().equals("version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was 1.7.0_80"));
}
}
public void testReadFromPropertiesBogusElasticsearchVersion() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,

View File

@ -563,6 +563,7 @@ public class PluginManagerIT extends ESIntegTestCase {
PluginManager.checkForOfficialPlugins("lang-python");
PluginManager.checkForOfficialPlugins("mapper-murmur3");
PluginManager.checkForOfficialPlugins("mapper-size");
PluginManager.checkForOfficialPlugins("discovery-multicast");
try {
PluginManager.checkForOfficialPlugins("elasticsearch-mapper-attachment");

View File

@ -105,8 +105,8 @@ public class BytesRestResponseTests extends ESTestCase {
BytesRestResponse response = new BytesRestResponse(channel, t);
String text = response.content().toUtf8();
assertThat(text, containsString("\"type\":\"throwable\",\"reason\":\"an error occurred reading data\""));
assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}"));
assertThat(text, containsString("\"error_trace\":{\"message\":\"an error occurred reading data\""));
assertThat(text, containsString("{\"type\":\"file_not_found_exception\""));
assertThat(text, containsString("\"stack_trace\":\"[an error occurred reading data]"));
}
public void testGuessRootCause() throws IOException {
@ -176,7 +176,6 @@ public class BytesRestResponseTests extends ESTestCase {
DetailedExceptionRestChannel(RestRequest request) {
super(request, true);
request.params().put(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true");
}
@Override

View File

@ -22,11 +22,13 @@ import com.google.common.base.Strings;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
import org.elasticsearch.index.mapper.internal.IndexFieldMapper;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.script.Script;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode;
import org.elasticsearch.search.aggregations.bucket.filter.Filter;
import org.elasticsearch.search.aggregations.bucket.histogram.Histogram;
@ -388,7 +390,7 @@ public class StringTermsIT extends AbstractTermsTestCase {
assertThat(bucket.getDocCount(), equalTo(1l));
}
// Check case with only exact term exclude clauses
// Check case with only exact term exclude clauses
response = client()
.prepareSearch("idx")
.setTypes("high_card_type")
@ -690,11 +692,11 @@ public class StringTermsIT extends AbstractTermsTestCase {
}
/*
*
*
* [foo_val0, foo_val1] [foo_val1, foo_val2] [foo_val2, foo_val3] [foo_val3,
* foo_val4] [foo_val4, foo_val5]
*
*
*
*
* foo_val0 - doc_count: 1 - val_count: 2 foo_val1 - doc_count: 2 -
* val_count: 4 foo_val2 - doc_count: 2 - val_count: 4 foo_val3 - doc_count:
* 2 - val_count: 4 foo_val4 - doc_count: 2 - val_count: 4 foo_val5 -
@ -995,6 +997,36 @@ public class StringTermsIT extends AbstractTermsTestCase {
}
}
@Test
public void singleValuedField_OrderedByIllegalAgg() throws Exception {
boolean asc = true;
try {
client()
.prepareSearch("idx")
.setTypes("type")
.addAggregation(
terms("terms").executionHint(randomExecutionHint()).field(SINGLE_VALUED_FIELD_NAME)
.collectMode(randomFrom(SubAggCollectionMode.values()))
.order(Terms.Order.aggregation("inner_terms>avg", asc))
.subAggregation(terms("inner_terms").field(MULTI_VALUED_FIELD_NAME).subAggregation(avg("avg").field("i"))))
.execute().actionGet();
fail("Expected an exception");
} catch (SearchPhaseExecutionException e) {
ElasticsearchException[] rootCauses = e.guessRootCauses();
if (rootCauses.length == 1) {
ElasticsearchException rootCause = rootCauses[0];
if (rootCause instanceof AggregationExecutionException) {
AggregationExecutionException aggException = (AggregationExecutionException) rootCause;
assertThat(aggException.getMessage(), Matchers.startsWith("Invalid terms aggregation order path"));
} else {
throw e;
}
} else {
throw e;
}
}
}
@Test
public void singleValuedField_OrderedBySingleBucketSubAggregationAsc() throws Exception {
boolean asc = randomBoolean();

View File

@ -227,11 +227,6 @@ public final class InternalTestCluster extends TestCluster {
private ServiceDisruptionScheme activeDisruptionScheme;
private String nodeMode;
public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir, int minNumDataNodes, int maxNumDataNodes, String clusterName, int numClientNodes,
boolean enableHttpPipelining, String nodePrefix) {
this(nodeMode, clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, DEFAULT_SETTINGS_SOURCE, numClientNodes, enableHttpPipelining, nodePrefix);
}
public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir,
int minNumDataNodes, int maxNumDataNodes, String clusterName, SettingsSource settingsSource, int numClientNodes,
boolean enableHttpPipelining, String nodePrefix) {

View File

@ -64,10 +64,16 @@ public class VersionUtils {
public static List<Version> allVersions() {
return Collections.unmodifiableList(SORTED_VERSIONS);
}
public static Version getPreviousVersion(Version version) {
int index = SORTED_VERSIONS.indexOf(version);
assert index > 0;
return SORTED_VERSIONS.get(index - 1);
}
/** Returns the {@link Version} before the {@link Version#CURRENT} */
public static Version getPreviousVersion() {
Version version = SORTED_VERSIONS.get(SORTED_VERSIONS.size() - 2);
Version version = getPreviousVersion(Version.CURRENT);
assert version.before(Version.CURRENT);
return version;
}

View File

@ -23,6 +23,8 @@ import com.google.common.collect.ImmutableMap;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.Requests;
import org.elasticsearch.cluster.ClusterState;
@ -32,11 +34,14 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.SettingsSource;
import org.elasticsearch.test.TestCluster;
import org.junit.After;
import org.junit.AfterClass;
@ -44,6 +49,8 @@ import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@ -69,8 +76,21 @@ public class TribeIT extends ESIntegTestCase {
@BeforeClass
public static void setupSecondCluster() throws Exception {
ESIntegTestCase.beforeClass();
SettingsSource source = new SettingsSource() {
@Override
public Settings node(int nodeOrdinal) {
final int base = InternalTestCluster.BASE_PORT + 1000;
return Settings.builder().put("transport.tcp.port", base + "-" + (base + 100)).build();
}
@Override
public Settings transportClient() {
return node(0);
}
};
// create another cluster
cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, Strings.randomBase64UUID(getRandom()), 0, false, SECOND_CLUSTER_NODE_PREFIX);
cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, Strings.randomBase64UUID(getRandom()), source, 0, false, SECOND_CLUSTER_NODE_PREFIX);
cluster2.beforeTest(getRandom(), 0.1);
cluster2.ensureAtLeastNumDataNodes(2);
}
@ -109,6 +129,10 @@ public class TribeIT extends ESIntegTestCase {
tribe1Defaults.put("tribe.t1." + entry.getKey(), entry.getValue());
tribe2Defaults.put("tribe.t2." + entry.getKey(), entry.getValue());
}
// give each tribe it's unicast hosts to connect to
tribe1Defaults.putArray("tribe.t1." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS, getUnicastHosts(internalCluster().client()));
tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS, getUnicastHosts(cluster2.client()));
Settings merged = Settings.builder()
.put("tribe.t1.cluster.name", internalCluster().getClusterName())
.put("tribe.t2.cluster.name", cluster2.getClusterName())
@ -421,4 +445,14 @@ public class TribeIT extends ESIntegTestCase {
}
return count;
}
public String[] getUnicastHosts(Client client) {
ArrayList<String> unicastHosts = new ArrayList<>();
NodesInfoResponse nodeInfos = client.admin().cluster().prepareNodesInfo().clear().setTransport(true).get();
for (NodeInfo info : nodeInfos.getNodes()) {
TransportAddress address = info.getTransport().getAddress().publishAddress();
unicastHosts.add(address.getAddress() + ":" + address.getPort());
}
return unicastHosts.toArray(new String[unicastHosts.size()]);
}
}

View File

@ -0,0 +1,7 @@
SKDFLK@$#L%@KL#%L#@$#@L$ #L$@$ #L@K$#L $L $K#L#@L $#L
!!@!@$(#%#)(@)% #(%)
#(%#@)%@#)% (@#%()
()#%@#% (@ )%@%(@#)% @( %)@ %(@)
)(%)@()(%)()(#%)@#
node.name: "Hiro Takachiho"

View File

@ -1,764 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import json
import time
import sys
import argparse
import hmac
import urllib
import fnmatch
import socket
import urllib.request
import subprocess
from functools import partial
from http.client import HTTPConnection
from http.client import HTTPSConnection
"""
This tool builds a release from the a given elasticsearch branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done. The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisit checks ie. check for Java 1.7 being presend or S3 credentials available as env variables
- detect the version to release from the specified branch (--branch) or the current branch
- creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
- builds the artifacts and runs smoke-tests on the build zip & tar.gz files
- commits the new version and merges the release branch into the source branch
- creates a tag and pushes the commit to the specified origin (--remote)
- publishes the releases to Sonatype and S3
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
- Boto for S3 Upload ($ apt-get install python-boto)
- RPM for RPM building ($ apt-get install rpm)
- S3 keys exported via ENV variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- GPG data exported via ENV variables (GPG_KEY_ID, GPG_PASSPHRASE, optionally GPG_KEYRING)
- S3 target repository via ENV variables (S3_BUCKET_SYNC_TO, optionally S3_BUCKET_SYNC_FROM)
"""
env = os.environ
PLUGINS = [('license', 'elasticsearch/license/latest'),
('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
COLOR_FAIL = '\033[91m'
def log(msg):
log_plain('\n%s' % msg)
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
try:
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
JAVA_HOME = env['JAVA7_HOME']
except KeyError:
pass #no JAVA7_HOME - we rely on JAVA_HOME
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
# Verifies the java version. We guarantee that we run with Java 1.7
# If 1.7 is not available fail the build!
def verify_mvn_java_version(version, mvn):
s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
if 'Java version: %s' % version not in s:
raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the hash of the given tag revision
def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
# Utility that returns the name of the release branch for a given version
def release_branch(version):
return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
run('git checkout %s' % src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(release)))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % (release)
replacement = '<version>%s</version>' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(version_file, callback)
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % (file))
# Executes a git commit with 'release [version]' as the commit message
def commit_release(release):
run('git commit -m "release [%s]"' % release)
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
def run_mvn(*cmd):
for c in cmd:
run('%s; %s %s' % (java_exe(), MVN, c))
def build_release(release_version, run_tests=False, dry_run=True, cpus=1, bwc_version=None):
target = 'deploy'
if dry_run:
target = 'package'
if run_tests:
run_mvn('clean',
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
if bwc_version:
print('Running Backwards compatibility tests against version [%s]' % (bwc_version))
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
# dont sign the RPM, so older distros will be able to use the uploaded RPM package
gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true -Drpm.sign=false' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE'))
if env.get('GPG_KEYRING'):
gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING')
run_mvn('clean %s -DskipTests %s' % (target, gpg_args))
success = False
try:
# create additional signed RPM for the repositories
run_mvn('-f distribution/rpm/pom.xml package -DskipTests -Dsign.rpm=true -Drpm.outputDirectory=target/releases/signed/ %s' % (gpg_args))
rpm = os.path.join('target/releases/signed', 'elasticsearch-%s.rpm' % release_version)
if os.path.isfile(rpm):
log('Signed RPM [%s] contains: ' % rpm)
run('rpm -pqli %s' % rpm)
success = True
finally:
if not success:
print("""
RPM Bulding failed make sure "rpm" tools are installed.
Use on of the following commands to install:
$ brew install rpm # on OSX
$ apt-get install rpm # on Ubuntu et.al
""")
# Uses the github API to fetch open tickets for the given release version
# if it finds any tickets open for that version it will throw an exception
def ensure_no_open_tickets(version):
version = "v%s" % version
conn = HTTPSConnection('api.github.com')
try:
log('Checking for open tickets on Github for version %s' % version)
log('Check if node is available')
conn.request('GET', '/repos/elastic/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
res = conn.getresponse()
if res.status == 200:
issues = json.loads(res.read().decode("utf-8"))
if issues:
urls = []
for issue in issues:
urls.append(issue['html_url'])
raise RuntimeError('Found open issues for release version %s:\n%s' % (version, '\n'.join(urls)))
else:
log("No open issues found for version %s" % version)
else:
raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
except socket.error as e:
log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
#that is ok it might not be there yet
finally:
conn.close()
def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
for _ in range(timeout):
conn = HTTPConnection(host, port, timeout)
try:
log('Waiting until node becomes available for 1 second')
time.sleep(1)
log('Check if node is available')
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
log("Failed while waiting for node - Exception: [%s]" % e)
#that is ok it might not be there yet
finally:
conn.close()
return False
# Ensures we are using a true Lucene release, not a snapshot build:
def verify_lucene_version():
s = open('pom.xml', encoding='utf-8').read()
if 'download.elastic.co/lucenesnapshots' in s:
raise RuntimeError('pom.xml contains download.elastic.co/lucenesnapshots repository: remove that before releasing')
m = re.search(r'<lucene.version>(.*?)</lucene.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.version in pom.xml')
lucene_version = m.group(1)
m = re.search(r'<lucene.maven.version>(.*?)</lucene.maven.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.maven.version in pom.xml')
lucene_maven_version = m.group(1)
if lucene_version != lucene_maven_version:
raise RuntimeError('pom.xml is still using a snapshot release of lucene (%s): cutover to a real lucene release before releasing' % lucene_maven_version)
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
run('git checkout %s' % src_branch)
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
def artifact_names(release):
artifacts = []
artifacts.append(os.path.join('distribution/zip/target/releases', 'elasticsearch-%s.zip' % (release)))
artifacts.append(os.path.join('distribution/tar/target/releases', 'elasticsearch-%s.tar.gz' % (release)))
artifacts.append(os.path.join('distribution/deb/target/releases', 'elasticsearch-%s.deb' % (release)))
artifacts.append(os.path.join('distribution/rpm/target/releases', 'elasticsearch-%s.rpm' % (release)))
return artifacts
def get_artifacts(release):
common_artifacts = artifact_names(release)
for f in common_artifacts:
if not os.path.isfile(f):
raise RuntimeError('Could not find required artifact at %s' % f)
return common_artifacts
# Sample URL:
# http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/elasticsearch-rpm/2.0.0-beta1-SNAPSHOT/elasticsearch-rpm-2.0.0-beta1-SNAPSHOT.rpm
def download_and_verify(release, files, plugins=None, base_url='https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution'):
print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
for file in files:
name = os.path.basename(file)
if name.endswith('tar.gz'):
url = '%s/tar/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('zip'):
url = '%s/zip/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('rpm'):
url = '%s/rpm/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('deb'):
url = '%s/deb/elasticsearch/%s/%s' % (base_url, release, name)
abs_file_path = os.path.join(tmp_dir, name)
print(' Downloading %s' % (url))
downloaded_files.append(abs_file_path)
urllib.request.urlretrieve(url, abs_file_path)
url = ''.join([url, '.sha1'])
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1']))
urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
log('Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s; %s install %s' % (java_exe(), es_plugin_path, plugin))
plugin_names[name] = True
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.script.inline=on -Des.script.indexed=on %s'
% (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup()
try:
try:
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.cluster=%s -Dtests.jvms=1 -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.request('POST', '/_cluster/nodes/_local/_shutdown')
time.sleep(1) # give the node some time to shut down
if conn.getresponse().status != 200:
raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
finally:
conn.close()
shutil.rmtree(tmp_dir)
def merge_tag_push(remote, src_branch, release_version, dry_run):
run('git checkout %s' % src_branch)
run('git merge %s' % release_branch(release_version))
run('git tag v%s' % release_version)
if not dry_run:
run('git push %s %s' % (remote, src_branch)) # push the commit
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s' % remote)
def publish_repositories(version, dry_run=True):
if dry_run:
print('Skipping package repository update')
else:
print('Triggering repository update for version %s - calling dev-tools/build_repositories.sh %s' % (version, src_branch))
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
run('dev-tools/build_repositories.sh %s' % src_branch)
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
def check_command_exists(name, cmd):
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise RuntimeError('Could not run command %s - please make sure it is installed' % (name))
VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
# finds the highest available bwc version to test against
def find_bwc_version(release_version, bwc_dir='backwards'):
log(' Lookup bwc version in directory [%s]' % bwc_dir)
bwc_version = None
if os.path.exists(bwc_dir) and os.path.isdir(bwc_dir):
max_version = [int(x) for x in release_version.split('.')]
for dir in os.listdir(bwc_dir):
if os.path.isdir(os.path.join(bwc_dir, dir)) and dir.startswith('elasticsearch-'):
version = [int(x) for x in dir[len('elasticsearch-'):].split('.')]
if version < max_version: # bwc tests only against smaller versions
if (not bwc_version) or version > [int(x) for x in bwc_version.split('.')]:
bwc_version = dir[len('elasticsearch-'):]
log(' Using bwc version [%s]' % bwc_version)
else:
log(' bwc directory [%s] does not exists or is not a directory - skipping' % bwc_dir)
return bwc_version
def ensure_checkout_is_clean(branchName):
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we are on the right branch (NOTE: a bit weak, since we default to current branch):
if 'On branch %s' % branchName not in s:
raise RuntimeError('git status does not show branch %s: got:\n%s' % (branchName, s))
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin %s": got:\n%s' % (branchName, s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
# Checks all source files for //NORELEASE comments
def check_norelease(path='src'):
pattern = re.compile(r'\bnorelease\b', re.IGNORECASE)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.java'):
full_path = os.path.join(root, file_name)
line_number = 0
with open(full_path, 'r', encoding='utf-8') as current_file:
for line in current_file:
line_number = line_number + 1
if pattern.search(line):
raise RuntimeError('Found //norelease comment in %s line %s' % (full_path, line_number))
def run_and_print(text, run_function):
try:
print(text, end='')
run_function()
print(COLOR_OK + 'OK' + COLOR_END)
return True
except RuntimeError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_env_var(text, env_var):
try:
print(text, end='')
env[env_var]
print(COLOR_OK + 'OK' + COLOR_END)
return True
except KeyError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_environment_and_commandline_tools(check_only):
checks = list()
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_ACCESS_KEY_ID... ', 'AWS_SECRET_ACCESS_KEY'))
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY_ID... ', 'AWS_ACCESS_KEY_ID'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_USERNAME... ', 'SONATYPE_USERNAME'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_PASSWORD... ', 'SONATYPE_PASSWORD'))
checks.append(check_env_var('Checking for GPG env configuration GPG_KEY_ID... ', 'GPG_KEY_ID'))
checks.append(check_env_var('Checking for GPG env configuration GPG_PASSPHRASE... ', 'GPG_PASSPHRASE'))
checks.append(check_env_var('Checking for S3 repo upload env configuration S3_BUCKET_SYNC_TO... ', 'S3_BUCKET_SYNC_TO'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_NAME... ', 'GIT_AUTHOR_NAME'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_EMAIL... ', 'GIT_AUTHOR_EMAIL'))
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
checks.append(run_and_print('Checking command: apt-ftparchive... ', partial(check_command_exists, 'apt-ftparchive', 'apt-ftparchive --version')))
# boto, check error code being returned
location = os.path.dirname(os.path.realpath(__file__))
command = 'python %s/upload-s3.py -h' % (location)
checks.append(run_and_print('Testing boto python dependency... ', partial(check_command_exists, 'python-boto', command)))
checks.append(run_and_print('Checking java version... ', partial(verify_java_version, '1.7')))
checks.append(run_and_print('Checking java mvn version... ', partial(verify_mvn_java_version, '1.7', MVN)))
if check_only:
sys.exit(0)
if False in checks:
print("Exiting due to failing checks")
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--cpus', '-c', metavar='1', default=1,
help='The number of cpus to use for running the test. Default is [1]')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--smoke', '-s', dest='smoke', default='',
help='Smoke tests the given release')
parser.add_argument('--bwc', '-w', dest='bwc', metavar='backwards', default='backwards',
help='Backwards compatibility version path to use to run compatibility tests against')
parser.add_argument('--check-only', dest='check_only', action='store_true',
help='Checks and reports for all requirements and then exits')
parser.set_defaults(dryrun=True)
parser.set_defaults(smoke=None)
parser.set_defaults(check_only=False)
args = parser.parse_args()
bwc_path = args.bwc
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
cpus = args.cpus
build = not args.smoke
smoke_test_version = args.smoke
check_environment_and_commandline_tools(args.check_only)
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
# we require to build with 1.7
verify_java_version('1.7')
verify_mvn_java_version('1.7', MVN)
if os.path.exists(LOG):
raise RuntimeError('please remove old release log %s first' % LOG)
if not dry_run:
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
if build:
check_norelease(path='src')
ensure_checkout_is_clean(src_branch)
verify_lucene_version()
release_version = find_release_version(src_branch)
ensure_no_open_tickets(release_version)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash()
run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version)
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(release_version)))
success = False
try:
pending_files = [POM_FILE, VERSION_FILE]
remove_maven_snapshot(POM_FILE, release_version)
remove_version_snapshot(VERSION_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(release_version)
pending_files = update_reference_docs(release_version)
version_head_hash = None
# split commits for docs and version to enable easy cherry-picking
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
version_head_hash = get_head_hash()
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to Sonatype and S3 - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(release_version, run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
artifacts = get_artifacts(release_version)
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
merge_tag_push(remote, src_branch, release_version, dry_run)
print(' Updating package repositories -- dry_run: %s' % dry_run)
publish_repositories(src_branch, dry_run=dry_run)
cherry_pick_command = '.'
if version_head_hash:
cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash)
pending_msg = """
Release successful pending steps:
* create a new vX.Y.Z label on github for the next release, with label color #dddddd (https://github.com/elastic/elasticsearch/labels)
* publish the maven artifacts on Sonatype: https://oss.sonatype.org/index.html
- here is a guide: http://central.sonatype.org/pages/releasing-the-deployment.html
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
* announce the release on the website / blog post
* tweet about the release
* announce the release in the google group/mailinglist
* Move to a Snapshot version to the current branch for the next point release%(cherry_pick)s
"""
print(pending_msg % { 'version' : release_version, 'cherry_pick' : cherry_pick_command} )
success = True
finally:
if not success:
run('git reset --hard HEAD')
run('git checkout %s' % src_branch)
elif dry_run:
run('git reset --hard %s' % head_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version:
fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)

View File

@ -0,0 +1,273 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release
#
# 1. Update the Version.java to remove the snapshot bit
# 2. Remove the -SNAPSHOT suffix in all pom.xml files
#
# USAGE:
#
# python3 ./dev-tools/prepare-release.py
#
# Note: Ensure the script is run from the root directory
#
import fnmatch
import argparse
from prepare_release_update_documentation import update_reference_docs
import subprocess
import tempfile
import re
import os
import shutil
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
MAIL_TEMPLATE = """
Hi all
The new release candidate for %(version)s based on this commit[1] is now available, including the x-plugins, and RPM/deb repos:
- ZIP [2]
- tar.gz [3]
- RPM [4]
- deb [5]
Plugins can be installed as follows,
bin/plugin -Des.plugins.staging=true install cloud-aws
The same goes for the x-plugins:
bin/plugin -Des.plugins.staging=true install license
bin/plugin -Des.plugins.staging=true install shield
bin/plugin -Des.plugins.staging=true install watcher
To install the deb from an APT repo:
APT line sources.list line:
deb http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/debian/ stable main
To install the RPM, create a YUM file like:
/etc/yum.repos.d/elasticsearch.repo
containing:
[elasticsearch-2.0]
name=Elasticsearch repository for packages
baseurl=http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
[1] https://github.com/elastic/elasticsearch/commit/%(hash)s
[2] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip
[3] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz
[4] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm
[5] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb
"""
def run(command, env_vars=None):
if env_vars:
for key, value in env_vars.iter_items():
os.putenv(key, value)
if os.system('%s' % (command)):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8')
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
if 'dev-tools/__pycache__/' in s:
print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***')
raise RuntimeError('git status shows untracked files got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
release = release.replace('-', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
processed = process_file(version_file, callback)
if not processed:
raise RuntimeError('failed to remove snapshot version for %s' % (release))
def rename_local_meta_files(path):
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'):
full_path = os.path.join(root, file_name)
os.rename(full_path, os.path.join(root, file_name.replace('-local', '')))
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--deploy', '-d', dest='deploy', action='store_true',
help='Installs and Deploys the release on a sonartype staging repository.')
parser.add_argument('--skipDocCheck', '-c', dest='skip_doc_check', action='store_false',
help='Skips any checks for pending documentation changes')
parser.add_argument('--push-s3', '-p', dest='push', action='store_true',
help='Pushes artifacts to the S3 staging area')
parser.add_argument('--install_only', '-i', dest='install_only', action='store_true',
help='Only runs a maven install to skip the remove deployment step')
parser.add_argument('--gpg-key', '-k', dest='gpg_key', default="D88E42B4",
help='Allows you to specify a different gpg_key to be used instead of the default release key')
parser.set_defaults(deploy=False)
parser.set_defaults(skip_doc_check=False)
parser.set_defaults(push=False)
parser.set_defaults(install_only=False)
args = parser.parse_args()
install_and_deploy = args.deploy
skip_doc_check = args.skip_doc_check
push = args.push
gpg_key = args.gpg_key
install_only = args.install_only
ensure_checkout_is_clean()
release_version = find_release_version()
if not re.match('(\d+\.\d+)\.*',release_version):
raise RuntimeError('illegal release version format: %s' % (release_version))
major_minor_version = re.match('(\d+\.\d+)\.*',release_version).group(1)
print('*** Preparing release version: [%s]' % release_version)
if not skip_doc_check:
print('*** Check for pending documentation changes')
pending_files = update_reference_docs(release_version)
if pending_files:
raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files))
run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
remove_version_snapshot(VERSION_FILE, release_version)
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
localRepo = '/tmp/elasticsearch-%s-%s' % (release_version, shortHash)
localRepoElasticsearch = localRepo + '/org/elasticsearch'
if os.path.exists(localRepoElasticsearch):
print('clean local repository %s' % localRepoElasticsearch)
shutil.rmtree(localRepoElasticsearch)
if install_only:
mvn_targets = 'install'
else:
mvn_targets = 'install deploy'
install_command = 'mvn clean %s -Prelease -Dskip.integ.tests=true -Dgpg.keyname="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_targets, gpg_key, localRepo)
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
s3_sync_command = 's3cmd sync %s s3://download.elasticsearch.org/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, release_version, shortHash)
s3_bucket_sync_to = 'download.elasticsearch.org/elasticsearch/staging/%s-%s/repos' % (release_version, shortHash)
build_repo_command = 'dev-tools/build_repositories.sh %s' % (major_minor_version)
if install_and_deploy:
for cmd in [install_command, clean_repo_command]:
run(cmd)
rename_local_meta_files(localRepoElasticsearch)
else:
print('')
print('*** To create a release candidate run: ')
print(' %s' % (install_command))
print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command))
print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command))
if push:
run(s3_sync_command)
env_vars = {'S3_BUCKET_SYNC_TO': s3_bucket_sync_to}
run(build_repo_command, env_vars)
else:
print('')
print('*** To push a release candidate to s3 run: ')
print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch))
print (' %s' % (s3_sync_command))
print(' 2. Create repositories: ')
print (' export S3_BUCKET_SYNC_TO="%s"' % (s3_bucket_sync_to))
print(' %s' % (build_repo_command))
print('')
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:')
print("""
<profiles>
<profile>
<id>release</id>
<properties>
<gpg.passphrase>YourPasswordGoesHere</gpg.passphrase>
</properties>
</profile>
</profiles>
""")
print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!')
print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:')
print(MAIL_TEMPLATE % ({'version' : release_version, 'hash': shortHash, 'major_minor_version' : major_minor_version}))

View File

@ -1,144 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release
#
# 1. Update the Version.java to remove the snapshot bit
# 2. Remove the -SNAPSHOT suffix in all pom.xml files
#
# USAGE:
#
# python3 ./dev-tools/prepare-release.py
#
# Note: Ensure the script is run from the root directory
#
import fnmatch
import subprocess
import tempfile
import re
import os
import shutil
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
def run(command):
if os.system('%s' % (command)):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch: got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch: got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
release = release.replace('-', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
processed = process_file(version_file, callback)
if not processed:
raise RuntimeError('failed to remove snapshot version for %s' % (release))
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
if __name__ == "__main__":
release_version = find_release_version()
print('*** Preparing release version: [%s]' % release_version)
ensure_checkout_is_clean()
run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
remove_version_snapshot(VERSION_FILE, release_version)
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
localRepo = '/tmp/elasticsearch-%s-%s' % (release_version, shortHash)
localRepoElasticsearch = localRepo + '/org/elasticsearch'
print('')
print('*** To create a release candidate run: ')
print(' mvn clean install deploy -Prelease -DskipTests -Dgpg.keyname="D88E42B4" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (localRepo))
print(' 1. Remove all _remote.repositories: find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch))
print(' 2. Rename all maven metadata files: for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch))
print(' 3. Sync %s into S3 bucket' % (localRepoElasticsearch))
print (' s3cmd sync %s s3://download.elasticsearch.org/elasticsearch/staging/elasticsearch-%s-%s/maven/org/' % (localRepoElasticsearch, release_version, shortHash))
print(' 4. Create repositories: ')
print (' export S3_BUCKET_SYNC_TO="download.elasticsearch.org/elasticsearch/staging/elasticsearch-%s-%s/repos"' % (release_version, shortHash))
print (' export S3_BUCKET_SYNC_FROM="$S3_BUCKET_SYNC_TO"')
print(' dev-tools/build_repositories.sh %s' % (release_version))
print('')
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!')

View File

@ -58,6 +58,9 @@ jvm=${elasticsearch.plugin.jvm}
classname=${elasticsearch.plugin.classname}
#
# 'java.version' version of java the code is built against
# use the system property java.specification.version
# version string must be a sequence of nonnegative decimal integers
# separated by "."'s and may have leading zeros
java.version=${maven.compiler.target}
#
# 'elasticsearch.version' version of elasticsearch compiled against

View File

@ -50,9 +50,6 @@ public class AwsEc2Service extends AbstractLifecycleComponent<AwsEc2Service> {
@Inject
public AwsEc2Service(Settings settings, SettingsFilter settingsFilter, NetworkService networkService, DiscoveryNodeService discoveryNodeService) {
super(settings);
// Filter global settings
settingsFilter.addFilter("cloud.key");
settingsFilter.addFilter("cloud.account");
settingsFilter.addFilter("cloud.aws.access_key");
settingsFilter.addFilter("cloud.aws.secret_key");
// Filter repository-specific settings
@ -81,8 +78,8 @@ public class AwsEc2Service extends AbstractLifecycleComponent<AwsEc2Service> {
} else {
throw new IllegalArgumentException("No protocol supported [" + protocol + "], can either be [http] or [https]");
}
String account = settings.get("cloud.aws.access_key", settings.get("cloud.account"));
String key = settings.get("cloud.aws.secret_key", settings.get("cloud.key"));
String account = settings.get("cloud.aws.access_key");
String key = settings.get("cloud.aws.secret_key");
String proxyHost = settings.get("cloud.aws.proxy_host");
proxyHost = settings.get("cloud.aws.ec2.proxy_host", proxyHost);

View File

@ -55,8 +55,8 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
@Override
public synchronized AmazonS3 client() {
String endpoint = getDefaultEndpoint();
String account = settings.get("cloud.aws.access_key", settings.get("cloud.account"));
String key = settings.get("cloud.aws.secret_key", settings.get("cloud.key"));
String account = settings.get("cloud.aws.access_key");
String key = settings.get("cloud.aws.secret_key");
return getClient(endpoint, null, account, key, null);
}
@ -75,8 +75,8 @@ public class InternalAwsS3Service extends AbstractLifecycleComponent<AwsS3Servic
endpoint = getDefaultEndpoint();
}
if (account == null || key == null) {
account = settings.get("cloud.aws.access_key", settings.get("cloud.account"));
key = settings.get("cloud.aws.secret_key", settings.get("cloud.key"));
account = settings.get("cloud.aws.access_key");
key = settings.get("cloud.aws.secret_key");
}
return getClient(endpoint, protocol, account, key, maxRetries);