Merge branch 'master' into lists_are_simple

This commit is contained in:
Ryan Ernst 2015-08-24 19:06:58 -07:00
commit 311faa822a
245 changed files with 4362 additions and 2755 deletions

View File

@ -0,0 +1,119 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.queries;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import java.io.IOException;
/** A {@link Query} that only matches documents that are greater than or equal
* to a configured doc ID. */
public final class MinDocQuery extends Query {
private final int minDoc;
/** Sole constructor. */
public MinDocQuery(int minDoc) {
this.minDoc = minDoc;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + minDoc;
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
MinDocQuery that = (MinDocQuery) obj;
return minDoc == that.minDoc;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final int maxDoc = context.reader().maxDoc();
if (context.docBase + maxDoc <= minDoc) {
return null;
}
final int segmentMinDoc = Math.max(0, minDoc - context.docBase);
final DocIdSetIterator disi = new DocIdSetIterator() {
int doc = -1;
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
assert target > doc;
if (doc == -1) {
// skip directly to minDoc
doc = Math.max(target, segmentMinDoc);
} else {
doc = target;
}
while (doc < maxDoc) {
if (acceptDocs == null || acceptDocs.get(doc)) {
break;
}
doc += 1;
}
if (doc >= maxDoc) {
doc = NO_MORE_DOCS;
}
return doc;
}
@Override
public long cost() {
return maxDoc - segmentMinDoc;
}
};
return new ConstantScoreScorer(this, score(), disi);
}
};
}
@Override
public String toString(String field) {
return "MinDocQuery(minDoc=" + minDoc + ")";
}
}

View File

@ -45,8 +45,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip";
public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip";
private static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = false;
private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
private static final String INDEX_HEADER_KEY = "es.index";
private static final String SHARD_HEADER_KEY = "es.shard";
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";

View File

@ -19,45 +19,7 @@
package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ValidationException;
import java.util.ArrayList;
import java.util.List;
/**
*
*/
public class ActionRequestValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ActionRequestValidationException() {
super("validation failed");
}
public void addValidationError(String error) {
validationErrors.add(error);
}
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
public class ActionRequestValidationException extends ValidationException {
}

View File

@ -54,7 +54,9 @@ public enum SearchType {
/**
* Performs scanning of the results which executes the search without any sorting.
* It will automatically start scrolling the result set.
* @deprecated will be removed in 3.0, you should do a regular scroll instead, ordered by `_doc`
*/
@Deprecated
SCAN((byte) 4),
/**
* Only counts the results, will still execute aggregations and the like.
@ -69,6 +71,7 @@ public enum SearchType {
public static final SearchType DEFAULT = QUERY_THEN_FETCH;
private static final ParseField COUNT_VALUE = new ParseField("count").withAllDeprecated("query_then_fetch");
private static final ParseField SCAN_VALUE = new ParseField("scan").withAllDeprecated("query_then_fetch sorting on `_doc`");
private byte id;
@ -121,7 +124,7 @@ public enum SearchType {
return SearchType.QUERY_THEN_FETCH;
} else if ("query_and_fetch".equals(searchType)) {
return SearchType.QUERY_AND_FETCH;
} else if ("scan".equals(searchType)) {
} else if (parseFieldMatcher.match(searchType, SCAN_VALUE)) {
return SearchType.SCAN;
} else if (parseFieldMatcher.match(searchType, COUNT_VALUE)) {
return SearchType.COUNT;

View File

@ -40,6 +40,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
@Deprecated // remove in 3.0
public class TransportSearchScanAction extends TransportSearchTypeAction {
@Inject

View File

@ -21,15 +21,12 @@ package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@ -44,16 +41,14 @@ import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import static com.google.common.collect.Sets.newHashSet;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/**
* A main entry point when starting from the command line.
* Internal startup code.
*/
public class Bootstrap {
final class Bootstrap {
private static volatile Bootstrap INSTANCE;
@ -137,10 +132,6 @@ public class Bootstrap {
OsProbe.getInstance();
}
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception {
initializeNatives(settings.getAsBoolean("bootstrap.mlockall", false),
settings.getAsBoolean("bootstrap.ctrlhandler", true));
@ -222,7 +213,11 @@ public class Bootstrap {
}
}
public static void main(String[] args) throws Throwable {
/**
* This method is invoked by {@link Elasticsearch#main(String[])}
* to startup elasticsearch.
*/
static void init(String[] args) throws Throwable {
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
@ -277,11 +272,19 @@ public class Bootstrap {
closeSysError();
}
} catch (Throwable e) {
// disable console logging, so user does not see the exception twice (jvm will show it already)
if (foreground) {
Loggers.disableConsoleLogging();
}
ESLogger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("name"));
}
logger.error("Exception", e);
// re-enable it if appropriate, so they can see any logging during the shutdown process
if (foreground) {
Loggers.enableConsoleLogging();
}
throw e;
}

View File

@ -38,7 +38,7 @@ import java.util.Properties;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
public class BootstrapCLIParser extends CliTool {
final class BootstrapCLIParser extends CliTool {
private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class)
.cmds(Start.CMD, Version.CMD)

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
/**
* Exposes system startup information
*/
public final class BootstrapInfo {
/** no instantiation */
private BootstrapInfo() {}
/**
* Returns true if we successfully loaded native libraries.
* <p>
* If this returns false, then native operations such as locking
* memory did not work.
*/
public static boolean isNativesAvailable() {
return Natives.JNA_AVAILABLE;
}
/**
* Returns true if we were able to lock the process's address space.
*/
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}

View File

@ -20,11 +20,23 @@
package org.elasticsearch.bootstrap;
/**
* A wrapper around {@link Bootstrap} just so the process will look nicely on things like jps.
* This class starts elasticsearch.
*/
public class Elasticsearch extends Bootstrap {
public final class Elasticsearch {
public static void main(String[] args) throws Throwable {
Bootstrap.main(args);
/** no instantiation */
private Elasticsearch() {}
/**
* Main entry point for starting elasticsearch
*/
public static void main(String[] args) throws StartupError {
try {
Bootstrap.init(args);
} catch (Throwable t) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.
throw new StartupError(t);
}
}
}

View File

@ -59,7 +59,7 @@ final class JNACLibrary {
public long rlim_max = 0;
@Override
protected List getFieldOrder() {
protected List<String> getFieldOrder() {
return Arrays.asList(new String[] { "rlim_cur", "rlim_max" });
}
}

View File

@ -35,7 +35,7 @@ import java.util.List;
/**
* Library for Windows/Kernel32
*/
class JNAKernel32Library {
final class JNAKernel32Library {
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
@ -148,7 +148,7 @@ class JNAKernel32Library {
public NativeLong Type;
@Override
protected List getFieldOrder() {
protected List<String> getFieldOrder() {
return Arrays.asList(new String[]{"BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"});
}
}

View File

@ -34,10 +34,13 @@ import static org.elasticsearch.bootstrap.JNAKernel32Library.SizeT;
*/
class JNANatives {
/** no instantiation */
private JNANatives() {}
private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful
public static boolean LOCAL_MLOCKALL = false;
static boolean LOCAL_MLOCKALL = false;
static void tryMlockall() {
int errno = Integer.MIN_VALUE;
@ -72,16 +75,18 @@ class JNANatives {
}
// mlockall failed for some reason
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg + ". This can result in part of the JVM being swapped out.");
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
logger.warn("This can result in part of the JVM being swapped out.");
if (errno == JNACLibrary.ENOMEM) {
if (rlimitSuccess) {
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
if (Constants.LINUX) {
// give specific instructions for the linux case to make it easy
String user = System.getProperty("user.name");
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
"\t# allow user 'esuser' mlockall\n" +
"\tesuser soft memlock unlimited\n" +
"\tesuser hard memlock unlimited"
"\t# allow user '" + user + "' mlockall\n" +
"\t" + user + " soft memlock unlimited\n" +
"\t" + user + " hard memlock unlimited"
);
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
}

View File

@ -29,6 +29,8 @@ import java.util.Map;
/** Checks that the JVM is ok and won't cause index corruption */
final class JVMCheck {
/** no instantiation */
private JVMCheck() {}
/**
* URL with latest JVM recommendations

View File

@ -37,15 +37,30 @@ import java.util.Arrays;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.jar.JarEntry;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
/** Simple check for duplicate class files across the classpath */
/**
* Simple check for duplicate class files across the classpath.
* <p>
* This class checks for incompatibilities in the following ways:
* <ul>
* <li>Checks that class files are not duplicated across jars.</li>
* <li>Checks any {@code X-Compile-Target-JDK} value in the jar
* manifest is compatible with current JRE</li>
* <li>Checks any {@code X-Compile-Elasticsearch-Version} value in
* the jar manifest is compatible with the current ES</li>
* </ul>
*/
public class JarHell {
/** no instantiation */
private JarHell() {}
/** Simple driver class, can be used eg. from builds. Returns non-zero on jar-hell */
@SuppressForbidden(reason = "command line tool")
public static void main(String args[]) throws Exception {
@ -69,7 +84,7 @@ public class JarHell {
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs()));
}
checkJarHell(((URLClassLoader)loader).getURLs());
checkJarHell(((URLClassLoader) loader).getURLs());
}
/**
@ -141,6 +156,7 @@ public class JarHell {
// give a nice error if jar requires a newer java version
String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK");
if (targetVersion != null) {
checkVersionFormat(targetVersion);
checkJavaVersion(jar.toString(), targetVersion);
}
@ -153,23 +169,34 @@ public class JarHell {
}
}
public static void checkVersionFormat(String targetVersion) {
if (!JavaVersion.isValid(targetVersion)) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s",
targetVersion
)
);
}
}
/**
* Checks that the java specification version {@code targetVersion}
* required by {@code resource} is compatible with the current installation.
*/
public static void checkJavaVersion(String resource, String targetVersion) {
String systemVersion = System.getProperty("java.specification.version");
float current = Float.POSITIVE_INFINITY;
float target = Float.NEGATIVE_INFINITY;
try {
current = Float.parseFloat(systemVersion);
target = Float.parseFloat(targetVersion);
} catch (NumberFormatException e) {
// some spec changed, time for a more complex parser
}
if (current < target) {
throw new IllegalStateException(resource + " requires Java " + targetVersion
+ ", your system: " + systemVersion);
JavaVersion version = JavaVersion.parse(targetVersion);
if (JavaVersion.current().compareTo(version) < 0) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"%s requires Java %s:, your system: %s",
resource,
targetVersion,
JavaVersion.current().toString()
)
);
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.Strings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
class JavaVersion implements Comparable<JavaVersion> {
private final List<Integer> version;
public List<Integer> getVersion() {
return Collections.unmodifiableList(version);
}
private JavaVersion(List<Integer> version) {
this.version = version;
}
public static JavaVersion parse(String value) {
if (value == null) {
throw new NullPointerException("value");
}
if ("".equals(value)) {
throw new IllegalArgumentException("value");
}
List<Integer> version = new ArrayList<>();
String[] components = value.split("\\.");
for (String component : components) {
version.add(Integer.valueOf(component));
}
return new JavaVersion(version);
}
public static boolean isValid(String value) {
return value.matches("^0*[0-9]+(\\.[0-9]+)*$");
}
private final static JavaVersion CURRENT = parse(System.getProperty("java.specification.version"));
public static JavaVersion current() {
return CURRENT;
}
@Override
public int compareTo(JavaVersion o) {
int len = Math.max(version.size(), o.version.size());
for (int i = 0; i < len; i++) {
int d = (i < version.size() ? version.get(i) : 0);
int s = (i < o.version.size() ? o.version.get(i) : 0);
if (s < d)
return 1;
if (s > d)
return -1;
}
return 0;
}
@Override
public String toString() {
return Strings.collectionToDelimitedString(version, ".");
}
}

View File

@ -26,27 +26,32 @@ import org.elasticsearch.common.logging.Loggers;
* The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on
* startup. If they are not available, this class will avoid calling code that loads these classes.
*/
class Natives {
final class Natives {
/** no instantiation */
private Natives() {}
private static final ESLogger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM
private static boolean jnaAvailable = false;
static final boolean JNA_AVAILABLE;
static {
boolean v = false;
try {
// load one of the main JNA classes to see if the classes are available. this does not ensure that all native
// libraries are available, only the ones necessary by JNA to function
Class.forName("com.sun.jna.Native");
jnaAvailable = true;
v = true;
} catch (ClassNotFoundException e) {
logger.warn("JNA not found. native methods will be disabled.", e);
} catch (UnsatisfiedLinkError e) {
logger.warn("unable to load JNA native support library, native methods will be disabled.", e);
}
JNA_AVAILABLE = v;
}
static void tryMlockall() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot mlockall because JNA is not available");
return;
}
@ -54,7 +59,7 @@ class Natives {
}
static boolean definitelyRunningAsRoot() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot check if running as root because JNA is not available");
return false;
}
@ -62,7 +67,7 @@ class Natives {
}
static void tryVirtualLock() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot mlockall because JNA is not available");
return;
}
@ -70,7 +75,7 @@ class Natives {
}
static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
logger.warn("cannot register console handler because JNA is not available");
return;
}
@ -78,7 +83,7 @@ class Natives {
}
static boolean isMemoryLocked() {
if (!jnaAvailable) {
if (!JNA_AVAILABLE) {
return false;
}
return JNANatives.LOCAL_MLOCKALL;

View File

@ -38,15 +38,59 @@ import java.util.Map;
import java.util.regex.Pattern;
/**
* Initializes securitymanager with necessary permissions.
* Initializes SecurityManager with necessary permissions.
* <p>
* We use a template file (the one we test with), and add additional
* permissions based on the environment (data paths, etc)
* <h1>Initialization</h1>
* The JVM is not initially started with security manager enabled,
* instead we turn it on early in the startup process. This is a tradeoff
* between security and ease of use:
* <ul>
* <li>Assigns file permissions to user-configurable paths that can
* be specified from the command-line or {@code elasticsearch.yml}.</li>
* <li>Allows for some contained usage of native code that would not
* otherwise be permitted.</li>
* </ul>
* <p>
* <h1>Permissions</h1>
* Permissions use a policy file packaged as a resource, this file is
* also used in tests. File permissions are generated dynamically and
* combined with this policy file.
* <p>
* For each configured path, we ensure it exists and is accessible before
* granting permissions, otherwise directory creation would require
* permissions to parent directories.
* <p>
* In some exceptional cases, permissions are assigned to specific jars only,
* when they are so dangerous that general code should not be granted the
* permission, but there are extenuating circumstances.
* <p>
* Groovy scripts are assigned no permissions. This does not provide adequate
* sandboxing, as these scripts still have access to ES classes, and could
* modify members, etc that would cause bad things to happen later on their
* behalf (no package protections are yet in place, this would need some
* cleanups to the scripting apis). But still it can provide some defense for users
* that enable dynamic scripting without being fully aware of the consequences.
* <p>
* <h1>Disabling Security</h1>
* SecurityManager can be disabled completely with this setting:
* <pre>
* es.security.manager.enabled = false
* </pre>
* <p>
* <h1>Debugging Security</h1>
* A good place to start when there is a problem is to turn on security debugging:
* <pre>
* JAVA_OPTS="-Djava.security.debug=access:failure" bin/elasticsearch
* </pre>
* See <a href="https://docs.oracle.com/javase/7/docs/technotes/guides/security/troubleshooting-security.html">
* Troubleshooting Security</a> for information.
*/
final class Security {
/** no instantiation */
private Security() {}
/**
* Initializes securitymanager for the environment
* Initializes SecurityManager for the environment
* Can only happen once!
*/
static void configure(Environment environment) throws Exception {
@ -118,25 +162,25 @@ final class Security {
static Permissions createPermissions(Environment environment) throws IOException {
Permissions policy = new Permissions();
// read-only dirs
addPath(policy, environment.binFile(), "read,readlink");
addPath(policy, environment.libFile(), "read,readlink");
addPath(policy, environment.pluginsFile(), "read,readlink");
addPath(policy, environment.configFile(), "read,readlink");
addPath(policy, environment.scriptsFile(), "read,readlink");
addPath(policy, "path.home", environment.binFile(), "read,readlink");
addPath(policy, "path.home", environment.libFile(), "read,readlink");
addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
addPath(policy, "path.conf", environment.configFile(), "read,readlink");
addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
// read-write dirs
addPath(policy, environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, environment.logsFile(), "read,readlink,write,delete");
addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, "path.logs", environment.logsFile(), "read,readlink,write,delete");
if (environment.sharedDataFile() != null) {
addPath(policy, environment.sharedDataFile(), "read,readlink,write,delete");
addPath(policy, "path.shared_data", environment.sharedDataFile(), "read,readlink,write,delete");
}
for (Path path : environment.dataFiles()) {
addPath(policy, path, "read,readlink,write,delete");
addPath(policy, "path.data", path, "read,readlink,write,delete");
}
for (Path path : environment.dataWithClusterFiles()) {
addPath(policy, path, "read,readlink,write,delete");
addPath(policy, "path.data", path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) {
addPath(policy, path, "read,readlink,write,delete");
addPath(policy, "path.repo", path, "read,readlink,write,delete");
}
if (environment.pidFile() != null) {
// we just need permission to remove the file if its elsewhere.
@ -145,10 +189,20 @@ final class Security {
return policy;
}
/** Add access to path (and all files underneath it */
static void addPath(Permissions policy, Path path, String permissions) throws IOException {
// paths may not exist yet
ensureDirectoryExists(path);
/**
* Add access to path (and all files underneath it)
* @param policy current policy to add permissions to
* @param configurationName the configuration name associated with the path (for error messages only)
* @param path the path itself
* @param permissions set of filepermissions to grant to the path
*/
static void addPath(Permissions policy, String configurationName, Path path, String permissions) {
// paths may not exist yet, this also checks accessibility
try {
ensureDirectoryExists(path);
} catch (IOException e) {
throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e);
}
// add each path twice: once for itself, again for files underneath it
policy.add(new FilePermission(path.toString(), permissions));

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import java.io.PrintStream;
/**
* Wraps an exception in a special way that it gets formatted
* "reasonably". This means limits on stacktrace frames and
* cleanup for guice, and some guidance about consulting full
* logs for the whole exception.
*/
//TODO: remove this when guice is removed, and exceptions are cleaned up
//this is horrible, but its what we must do
final class StartupError extends RuntimeException {
/** maximum length of a stacktrace, before we truncate it */
static final int STACKTRACE_LIMIT = 30;
/** all lines from this package are RLE-compressed */
static final String GUICE_PACKAGE = "org.elasticsearch.common.inject";
/**
* Create a new StartupError that will format {@code cause}
* to the console on failure.
*/
StartupError(Throwable cause) {
super(cause);
}
/*
* This logic actually prints the exception to the console, its
* what is invoked by the JVM when we throw the exception from main()
*/
@Override
public void printStackTrace(PrintStream s) {
Throwable originalCause = getCause();
Throwable cause = originalCause;
if (cause instanceof CreationException) {
cause = getFirstGuiceCause((CreationException)cause);
}
String message = cause.toString();
s.println(message);
if (cause != null) {
// walk to the root cause
while (cause.getCause() != null) {
cause = cause.getCause();
}
// print the root cause message, only if it differs!
if (cause != originalCause && (message.equals(cause.toString()) == false)) {
s.println("Likely root cause: " + cause);
}
// print stacktrace of cause
StackTraceElement stack[] = cause.getStackTrace();
int linesWritten = 0;
for (int i = 0; i < stack.length; i++) {
if (linesWritten == STACKTRACE_LIMIT) {
s.println("\t<<<truncated>>>");
break;
}
String line = stack[i].toString();
// skip past contiguous runs of this garbage:
if (line.startsWith(GUICE_PACKAGE)) {
while (i + 1 < stack.length && stack[i + 1].toString().startsWith(GUICE_PACKAGE)) {
i++;
}
s.println("\tat <<<guice>>>");
linesWritten++;
continue;
}
s.println("\tat " + line.toString());
linesWritten++;
}
}
s.println("Refer to the log for complete error details.");
}
/**
* Returns first cause from a guice error (it can have multiple).
*/
static Throwable getFirstGuiceCause(CreationException guice) {
for (Message message : guice.getErrorMessages()) {
Throwable cause = message.getCause();
if (cause != null) {
return cause;
}
}
return guice; // we tried
}
}

View File

@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
@ -132,7 +133,11 @@ public class TransportClient extends AbstractClient {
try {
ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version));
modules.add(new PluginsModule(this.settings, pluginsService));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new EnvironmentModule(environment));
modules.add(new SettingsModule(this.settings));
modules.add(new NetworkModule());
@ -149,6 +154,8 @@ public class TransportClient extends AbstractClient {
modules.add(new ClientTransportModule());
modules.add(new CircuitBreakerModule(this.settings));
pluginsService.processModules(modules);
Injector injector = modules.createInjector();
injector.getInstance(TransportService.class).start();
TransportClient transportClient = new TransportClient(injector);

View File

@ -30,7 +30,7 @@ import java.util.Map;
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
* for the key used in the shardSizes map
*/
public final class ClusterInfo {
public class ClusterInfo {
private final Map<String, DiskUsage> usages;
final Map<String, Long> shardSizes;
@ -54,6 +54,11 @@ public final class ClusterInfo {
return shardSizes.get(shardIdentifierFromRouting(shardRouting));
}
public long getShardSize(ShardRouting shardRouting, long defaultValue) {
Long shardSize = getShardSize(shardRouting);
return shardSize == null ? defaultValue : shardSize;
}
/**
* Method that incorporates the ShardId for the shard into a string that
* includes a 'p' or 'r' depending on whether the shard is a primary.

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@ -36,6 +37,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.node.settings.NodeSettingsService;
@ -45,6 +47,7 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
/**
* InternalClusterInfoService provides the ClusterInfoService interface,

View File

@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -46,6 +45,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
@ -60,12 +60,15 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.indices.*;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexNameException;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool;
import org.joda.time.DateTime;
@ -514,6 +517,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException {
List<String> validationErrors = getIndexSettingsValidationErrors(settings);
if (validationErrors.isEmpty() == false) {
ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
throw new IndexCreationException(new Index(indexName), validationException);
}
}
List<String> getIndexSettingsValidationErrors(Settings settings) {
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
List<String> validationErrors = Lists.newArrayList();
if (customPath != null && env.sharedDataFile() == null) {
@ -530,22 +542,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
validationErrors.add("index must have 1 or more primary shards");
}
if (number_of_replicas != null && number_of_replicas < 0) {
validationErrors.add("index must have 0 or more replica shards");
validationErrors.add("index must have 0 or more replica shards");
}
if (validationErrors.isEmpty() == false) {
throw new IndexCreationException(new Index(indexName),
new IllegalArgumentException(getMessage(validationErrors)));
}
}
private String getMessage(List<String> validationErrors) {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
return validationErrors;
}
private static class DefaultIndexTemplateFilter implements IndexTemplateFilter {

View File

@ -29,12 +29,12 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.InvalidIndexTemplateException;
@ -179,41 +179,44 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
}
private void validate(PutRequest request) {
List<String> validationErrors = Lists.newArrayList();
if (request.name.contains(" ")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a space");
validationErrors.add("name must not contain a space");
}
if (request.name.contains(",")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a ','");
validationErrors.add("name must not contain a ','");
}
if (request.name.contains("#")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a '#'");
validationErrors.add("name must not contain a '#'");
}
if (request.name.startsWith("_")) {
throw new InvalidIndexTemplateException(request.name, "name must not start with '_'");
validationErrors.add("name must not start with '_'");
}
if (!request.name.toLowerCase(Locale.ROOT).equals(request.name)) {
throw new InvalidIndexTemplateException(request.name, "name must be lower cased");
validationErrors.add("name must be lower cased");
}
if (request.template.contains(" ")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a space");
validationErrors.add("template must not contain a space");
}
if (request.template.contains(",")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a ','");
validationErrors.add("template must not contain a ','");
}
if (request.template.contains("#")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a '#'");
validationErrors.add("template must not contain a '#'");
}
if (request.template.startsWith("_")) {
throw new InvalidIndexTemplateException(request.name, "template must not start with '_'");
validationErrors.add("template must not start with '_'");
}
if (!Strings.validFileNameExcludingAstrix(request.template)) {
throw new InvalidIndexTemplateException(request.name, "template must not container the following characters " + Strings.INVALID_FILENAME_CHARS);
validationErrors.add("template must not container the following characters " + Strings.INVALID_FILENAME_CHARS);
}
try {
metaDataCreateIndexService.validateIndexSettings(request.name, request.settings);
} catch (IndexCreationException exception) {
throw new InvalidIndexTemplateException(request.name, exception.getDetailedMessage());
List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
validationErrors.addAll(indexSettingsValidation);
if (!validationErrors.isEmpty()) {
ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
throw new InvalidIndexTemplateException(request.name, validationException.getMessage());
}
for (Alias alias : request.aliases) {
@ -271,7 +274,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
this.mappings.putAll(mappings);
return this;
}
public PutRequest aliases(Set<Alias> aliases) {
this.aliases.addAll(aliases);
return this;

View File

@ -361,16 +361,16 @@ public class DiscoveryNode implements Streamable, ToXContent {
public String toString() {
StringBuilder sb = new StringBuilder();
if (nodeName.length() > 0) {
sb.append('[').append(nodeName).append(']');
sb.append('{').append(nodeName).append('}');
}
if (nodeId != null) {
sb.append('[').append(nodeId).append(']');
sb.append('{').append(nodeId).append('}');
}
if (Strings.hasLength(hostName)) {
sb.append('[').append(hostName).append(']');
sb.append('{').append(hostName).append('}');
}
if (address != null) {
sb.append('[').append(address).append(']');
sb.append('{').append(address).append('}');
}
if (!attributes.isEmpty()) {
sb.append(attributes);

View File

@ -345,10 +345,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Moves a shard from unassigned to initialize state
*/
public void initialize(ShardRouting shard, String nodeId) {
public void initialize(ShardRouting shard, String nodeId, long expectedSize) {
ensureMutable();
assert shard.unassigned() : shard;
shard.initialize(nodeId);
shard.initialize(nodeId, expectedSize);
node(nodeId).add(shard);
inactiveShardCount++;
if (shard.primary()) {
@ -362,10 +362,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* shard as well as assigning it. And returning the target initializing
* shard.
*/
public ShardRouting relocate(ShardRouting shard, String nodeId) {
public ShardRouting relocate(ShardRouting shard, String nodeId, long expectedShardSize) {
ensureMutable();
relocatingShards++;
shard.relocate(nodeId);
shard.relocate(nodeId, expectedShardSize);
ShardRouting target = shard.buildTargetRelocatingShard();
node(target.currentNodeId()).add(target);
assignedShardsAdd(target);
@ -608,16 +608,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
*/
public void initialize(String nodeId) {
initialize(nodeId, current.version());
}
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
*/
public void initialize(String nodeId, long version) {
public void initialize(String nodeId, long version, long expectedShardSize) {
innerRemove();
nodes.initialize(new ShardRouting(current, version), nodeId);
nodes.initialize(new ShardRouting(current, version), nodeId, expectedShardSize);
}
/**

View File

@ -37,6 +37,11 @@ import java.util.List;
*/
public final class ShardRouting implements Streamable, ToXContent {
/**
* Used if shard size is not available
*/
public static final long UNAVAILABLE_EXPECTED_SHARD_SIZE = -1;
private String index;
private int shardId;
private String currentNodeId;
@ -50,6 +55,7 @@ public final class ShardRouting implements Streamable, ToXContent {
private final transient List<ShardRouting> asList;
private transient ShardId shardIdentifier;
private boolean frozen = false;
private long expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
private ShardRouting() {
this.asList = Collections.singletonList(this);
@ -60,7 +66,7 @@ public final class ShardRouting implements Streamable, ToXContent {
}
public ShardRouting(ShardRouting copy, long version) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true);
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
}
/**
@ -69,7 +75,7 @@ public final class ShardRouting implements Streamable, ToXContent {
*/
ShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal) {
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) {
this.index = index;
this.shardId = shardId;
this.currentNodeId = currentNodeId;
@ -81,6 +87,9 @@ public final class ShardRouting implements Streamable, ToXContent {
this.restoreSource = restoreSource;
this.unassignedInfo = unassignedInfo;
this.allocationId = allocationId;
this.expectedShardSize = expectedShardSize;
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
if (!internal) {
assert state == ShardRoutingState.UNASSIGNED;
@ -88,13 +97,14 @@ public final class ShardRouting implements Streamable, ToXContent {
assert relocatingNodeId == null;
assert allocationId == null;
}
}
/**
* Creates a new unassigned shard.
*/
public static ShardRouting newUnassigned(String index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true);
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
}
/**
@ -205,7 +215,7 @@ public final class ShardRouting implements Streamable, ToXContent {
public ShardRouting buildTargetRelocatingShard() {
assert relocating();
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo,
AllocationId.newTargetRelocation(allocationId), true);
AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
}
/**
@ -317,6 +327,11 @@ public final class ShardRouting implements Streamable, ToXContent {
if (in.readBoolean()) {
allocationId = new AllocationId(in);
}
if (relocating() || initializing()) {
expectedShardSize = in.readLong();
} else {
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
}
freeze();
}
@ -368,6 +383,10 @@ public final class ShardRouting implements Streamable, ToXContent {
} else {
out.writeBoolean(false);
}
if (relocating() || initializing()) {
out.writeLong(expectedShardSize);
}
}
@Override
@ -397,12 +416,13 @@ public final class ShardRouting implements Streamable, ToXContent {
relocatingNodeId = null;
this.unassignedInfo = unassignedInfo;
allocationId = null;
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
}
/**
* Initializes an unassigned shard on a node.
*/
void initialize(String nodeId) {
void initialize(String nodeId, long expectedShardSize) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.UNASSIGNED : this;
@ -410,6 +430,7 @@ public final class ShardRouting implements Streamable, ToXContent {
state = ShardRoutingState.INITIALIZING;
currentNodeId = nodeId;
allocationId = AllocationId.newInitializing();
this.expectedShardSize = expectedShardSize;
}
/**
@ -417,13 +438,14 @@ public final class ShardRouting implements Streamable, ToXContent {
*
* @param relocatingNodeId id of the node to relocate the shard
*/
void relocate(String relocatingNodeId) {
void relocate(String relocatingNodeId, long expectedShardSize) {
ensureNotFrozen();
version++;
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
state = ShardRoutingState.RELOCATING;
this.relocatingNodeId = relocatingNodeId;
this.allocationId = AllocationId.newRelocation(allocationId);
this.expectedShardSize = expectedShardSize;
}
/**
@ -436,7 +458,7 @@ public final class ShardRouting implements Streamable, ToXContent {
assert state == ShardRoutingState.RELOCATING : this;
assert assignedToNode() : this;
assert relocatingNodeId != null : this;
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
state = ShardRoutingState.STARTED;
relocatingNodeId = null;
allocationId = AllocationId.cancelRelocation(allocationId);
@ -470,6 +492,7 @@ public final class ShardRouting implements Streamable, ToXContent {
// relocation target
allocationId = AllocationId.finishRelocation(allocationId);
}
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
state = ShardRoutingState.STARTED;
}
@ -669,6 +692,9 @@ public final class ShardRouting implements Streamable, ToXContent {
if (this.unassignedInfo != null) {
sb.append(", ").append(unassignedInfo.toString());
}
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
sb.append(", expected_shard_size[").append(expectedShardSize).append("]");
}
return sb.toString();
}
@ -682,7 +708,9 @@ public final class ShardRouting implements Streamable, ToXContent {
.field("shard", shardId().id())
.field("index", shardId().index().name())
.field("version", version);
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE){
builder.field("expected_shard_size_in_bytes", expectedShardSize);
}
if (restoreSource() != null) {
builder.field("restore_source");
restoreSource().toXContent(builder, params);
@ -709,4 +737,12 @@ public final class ShardRouting implements Streamable, ToXContent {
boolean isFrozen() {
return frozen;
}
/**
* Returns the expected shard size for {@link ShardRoutingState#RELOCATING} and {@link ShardRoutingState#INITIALIZING}
* shards. If it's size is not available {@value #UNAVAILABLE_EXPECTED_SHARD_SIZE} will be returned.
*/
public long getExpectedShardSize() {
return expectedShardSize;
}
}

View File

@ -507,7 +507,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
Decision decision = allocation.deciders().canAllocate(shard, target, allocation);
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
sourceNode.removeShard(shard);
ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId());
ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
currentNode.addShard(targetRelocatingShard, decision);
if (logger.isTraceEnabled()) {
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
@ -687,7 +687,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
}
routingNodes.initialize(shard, routingNodes.node(minNode.getNodeId()).nodeId());
routingNodes.initialize(shard, routingNodes.node(minNode.getNodeId()).nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
changed = true;
continue; // don't add to ignoreUnassigned
} else {
@ -779,10 +779,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
/* now allocate on the cluster - if we are started we need to relocate the shard */
if (candidate.started()) {
RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
routingNodes.relocate(candidate, lowRoutingNode.nodeId());
routingNodes.relocate(candidate, lowRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
} else {
routingNodes.initialize(candidate, routingNodes.node(minNode.getNodeId()).nodeId());
routingNodes.initialize(candidate, routingNodes.node(minNode.getNodeId()).nodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
}
return true;

View File

@ -231,7 +231,7 @@ public class AllocateAllocationCommand implements AllocationCommand {
unassigned.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
"force allocation from previous reason " + unassigned.unassignedInfo().getReason() + ", " + unassigned.unassignedInfo().getMessage(), unassigned.unassignedInfo().getFailure()));
}
it.initialize(routingNode.nodeId());
it.initialize(routingNode.nodeId(), unassigned.version(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
break;
}
return new RerouteExplanation(this, decision);

View File

@ -178,7 +178,7 @@ public class MoveAllocationCommand implements AllocationCommand {
if (decision.type() == Decision.Type.THROTTLE) {
// its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it...
}
allocation.routingNodes().relocate(shardRouting, toRoutingNode.nodeId());
allocation.routingNodes().relocate(shardRouting, toRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
}
if (!found) {

View File

@ -0,0 +1,71 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import java.util.ArrayList;
import java.util.List;
/**
* Encapsulates an accumulation of validation errors
*/
public class ValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ValidationException() {
super("validation failed");
}
/**
* Add a new validation error to the accumulating validation errors
* @param error the error to add
*/
public void addValidationError(String error) {
validationErrors.add(error);
}
/**
* Add a sequence of validation errors to the accumulating validation errors
* @param errors the errors to add
*/
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
/**
* Returns the validation errors accumulated
* @return
*/
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
}

View File

@ -360,10 +360,11 @@ public class HttpDownloadHelper {
if (connection instanceof HttpURLConnection) {
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
((HttpURLConnection) connection).setUseCaches(true);
((HttpURLConnection) connection).setConnectTimeout(5000);
connection.setUseCaches(true);
connection.setConnectTimeout(5000);
}
connection.setRequestProperty("ES-Version", Version.CURRENT.toString());
connection.setRequestProperty("ES-Build-Hash", Build.CURRENT.hashShort());
connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager");
// connect to the remote site (may take some time)

View File

@ -1,65 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.inject;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import java.lang.reflect.Constructor;
/**
*
*/
public class Modules {
public static Module createModule(Class<? extends Module> moduleClass, @Nullable Settings settings) {
Constructor<? extends Module> constructor;
try {
constructor = moduleClass.getConstructor(Settings.class);
try {
return constructor.newInstance(settings);
} catch (Exception e) {
throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
}
} catch (NoSuchMethodException e) {
try {
constructor = moduleClass.getConstructor();
try {
return constructor.newInstance();
} catch (Exception e1) {
throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
}
} catch (NoSuchMethodException e1) {
throw new ElasticsearchException("No constructor for [" + moduleClass + "]");
}
}
}
public static void processModules(Iterable<Module> modules) {
for (Module module : modules) {
if (module instanceof PreProcessModule) {
for (Module module1 : modules) {
((PreProcessModule) module).processModule(module1);
}
}
}
}
}

View File

@ -31,20 +31,9 @@ public class ModulesBuilder implements Iterable<Module> {
private final List<Module> modules = Lists.newArrayList();
public ModulesBuilder add(Module... modules) {
for (Module module : modules) {
add(module);
}
return this;
}
public ModulesBuilder add(Module module) {
modules.add(module);
if (module instanceof SpawnModules) {
Iterable<? extends Module> spawned = ((SpawnModules) module).spawnModules();
for (Module spawn : spawned) {
add(spawn);
}
public ModulesBuilder add(Module... newModules) {
for (Module module : newModules) {
modules.add(module);
}
return this;
}
@ -55,7 +44,6 @@ public class ModulesBuilder implements Iterable<Module> {
}
public Injector createInjector() {
Modules.processModules(modules);
Injector injector = Guice.createInjector(modules);
Injectors.cleanCaches(injector);
// in ES, we always create all instances as if they are eager singletons
@ -65,7 +53,6 @@ public class ModulesBuilder implements Iterable<Module> {
}
public Injector createChildInjector(Injector injector) {
Modules.processModules(modules);
Injector childInjector = injector.createChildInjector(modules);
Injectors.cleanCaches(childInjector);
// in ES, we always create all instances as if they are eager singletons

View File

@ -18,7 +18,6 @@ package org.elasticsearch.common.inject.spi;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.common.inject.*;
import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
@ -342,7 +341,7 @@ public final class Elements {
return builder;
}
private static ESLogger logger = Loggers.getLogger(Bootstrap.class);
private static ESLogger logger = Loggers.getLogger(Elements.class);
protected Object getSource() {
Object ret;

View File

@ -542,6 +542,8 @@ public abstract class StreamInput extends InputStream {
return (T) readStackTrace(new IllegalStateException(readOptionalString(), readThrowable()), this);
case 17:
return (T) readStackTrace(new LockObtainFailedException(readOptionalString(), readThrowable()), this);
case 18:
return (T) readStackTrace(new InterruptedException(readOptionalString()), this);
default:
assert false : "no such exception for id: " + key;
}

View File

@ -590,6 +590,9 @@ public abstract class StreamOutput extends OutputStream {
writeVInt(16);
} else if (throwable instanceof LockObtainFailedException) {
writeVInt(17);
} else if (throwable instanceof InterruptedException) {
writeVInt(18);
writeCause = false;
} else {
ElasticsearchException ex;
final String name = throwable.getClass().getName();

View File

@ -275,9 +275,9 @@ public class Joda {
.toFormatter()
.withZoneUTC();
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[] {longFormatter.getParser(), shortFormatter.getParser()});
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)});
return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd", builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT);
return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT);
}

View File

@ -84,6 +84,7 @@ public class Loggers {
}
}
@SuppressForbidden(reason = "do not know what this method does")
public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
List<String> prefixesList = newArrayList();
if (settings.getAsBoolean("logger.logHostAddress", false)) {

View File

@ -0,0 +1,167 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.network;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InterfaceAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.util.List;
import java.util.Locale;
/**
* Simple class to log {@code ifconfig}-style output at DEBUG logging.
*/
final class IfConfig {
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
private static final String INDENT = " ";
/** log interface configuration at debug level, if its enabled */
static void logIfNecessary() {
if (logger.isDebugEnabled()) {
try {
doLogging();
} catch (IOException | SecurityException e) {
logger.warn("unable to gather network information", e);
}
}
}
/** perform actual logging: might throw exception if things go wrong */
private static void doLogging() throws IOException {
StringBuilder msg = new StringBuilder();
for (NetworkInterface nic : NetworkUtils.getInterfaces()) {
msg.append(System.lineSeparator());
// ordinary name
msg.append(nic.getName());
msg.append(System.lineSeparator());
// display name (e.g. on windows)
if (!nic.getName().equals(nic.getDisplayName())) {
msg.append(INDENT);
msg.append(nic.getDisplayName());
msg.append(System.lineSeparator());
}
// addresses: v4 first, then v6
List<InterfaceAddress> addresses = nic.getInterfaceAddresses();
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address == false) {
msg.append(INDENT);
msg.append(formatAddress(address));
msg.append(System.lineSeparator());
}
}
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address) {
msg.append(INDENT);
msg.append(formatAddress(address));
msg.append(System.lineSeparator());
}
}
// hardware address
byte hardware[] = nic.getHardwareAddress();
if (hardware != null) {
msg.append(INDENT);
msg.append("hardware ");
for (int i = 0; i < hardware.length; i++) {
if (i > 0) {
msg.append(":");
}
msg.append(String.format(Locale.ROOT, "%02X", hardware[i]));
}
msg.append(System.lineSeparator());
}
// attributes
msg.append(INDENT);
msg.append(formatFlags(nic));
msg.append(System.lineSeparator());
}
logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString());
}
/** format internet address: java's default doesn't include everything useful */
private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException {
StringBuilder sb = new StringBuilder();
InetAddress address = interfaceAddress.getAddress();
if (address instanceof Inet6Address) {
sb.append("inet6 ");
sb.append(NetworkAddress.formatAddress(address));
sb.append(" prefixlen:");
sb.append(interfaceAddress.getNetworkPrefixLength());
} else {
sb.append("inet ");
sb.append(NetworkAddress.formatAddress(address));
int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength());
sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] {
(byte)(netmask >>> 24),
(byte)(netmask >>> 16 & 0xFF),
(byte)(netmask >>> 8 & 0xFF),
(byte)(netmask & 0xFF)
})));
InetAddress broadcast = interfaceAddress.getBroadcast();
if (broadcast != null) {
sb.append(" broadcast:" + NetworkAddress.formatAddress(broadcast));
}
}
if (address.isLoopbackAddress()) {
sb.append(" scope:host");
} else if (address.isLinkLocalAddress()) {
sb.append(" scope:link");
} else if (address.isSiteLocalAddress()) {
sb.append(" scope:site");
}
return sb.toString();
}
/** format network interface flags */
private static String formatFlags(NetworkInterface nic) throws SocketException {
StringBuilder flags = new StringBuilder();
if (nic.isUp()) {
flags.append("UP ");
}
if (nic.supportsMulticast()) {
flags.append("MULTICAST ");
}
if (nic.isLoopback()) {
flags.append("LOOPBACK ");
}
if (nic.isPointToPoint()) {
flags.append("POINTOPOINT ");
}
if (nic.isVirtual()) {
flags.append("VIRTUAL ");
}
flags.append("mtu:" + nic.getMTU());
flags.append(" index:" + nic.getIndex());
return flags.toString();
}
}

View File

@ -0,0 +1,183 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.network;
import com.google.common.net.InetAddresses;
import org.elasticsearch.common.SuppressForbidden;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Objects;
/**
* Utility functions for presentation of network addresses.
* <p>
* Java's address formatting is particularly bad, every address
* has an optional host if its resolved, so IPv4 addresses often
* look like this (note the confusing leading slash):
* <pre>
* {@code /127.0.0.1}
* </pre>
* IPv6 addresses are even worse, with no IPv6 address compression,
* and often containing things like numeric scopeids, which are even
* more confusing (e.g. not going to work in any user's browser, refer
* to an interface on <b>another</b> machine, etc):
* <pre>
* {@code /0:0:0:0:0:0:0:1%1}
* </pre>
* This class provides sane address formatting instead, e.g.
* {@code 127.0.0.1} and {@code ::1} respectively. No methods do reverse
* lookups.
*/
public final class NetworkAddress {
/** No instantiation */
private NetworkAddress() {}
/**
* Formats a network address (with optional host) for display purposes.
* <p>
* If the host is already resolved (typically because, we looked up
* a name to do that), then we include it, otherwise it is
* omitted. See {@link #formatAddress(InetAddress)} if you only
* want the address.
* <p>
* IPv6 addresses are compressed and without scope
* identifiers.
* <p>
* Example output with already-resolved hostnames:
* <ul>
* <li>IPv4: {@code localhost/127.0.0.1}</li>
* <li>IPv6: {@code localhost/::1}</li>
* </ul>
* <p>
* Example output with just an address:
* <ul>
* <li>IPv4: {@code 127.0.0.1}</li>
* <li>IPv6: {@code ::1}</li>
* </ul>
* @param address IPv4 or IPv6 address
* @return formatted string
* @see #formatAddress(InetAddress)
*/
public static String format(InetAddress address) {
return format(address, -1, true);
}
/**
* Formats a network address and port for display purposes.
* <p>
* If the host is already resolved (typically because, we looked up
* a name to do that), then we include it, otherwise it is
* omitted. See {@link #formatAddress(InetSocketAddress)} if you only
* want the address.
* <p>
* This formats the address with {@link #format(InetAddress)}
* and appends the port number. IPv6 addresses will be bracketed.
* <p>
* Example output with already-resolved hostnames:
* <ul>
* <li>IPv4: {@code localhost/127.0.0.1:9300}</li>
* <li>IPv6: {@code localhost/[::1]:9300}</li>
* </ul>
* <p>
* Example output with just an address:
* <ul>
* <li>IPv4: {@code 127.0.0.1:9300}</li>
* <li>IPv6: {@code [::1]:9300}</li>
* </ul>
* @param address IPv4 or IPv6 address with port
* @return formatted string
* @see #formatAddress(InetSocketAddress)
*/
public static String format(InetSocketAddress address) {
return format(address.getAddress(), address.getPort(), true);
}
/**
* Formats a network address for display purposes.
* <p>
* This formats only the address, any hostname information,
* if present, is ignored. IPv6 addresses are compressed
* and without scope identifiers.
* <p>
* Example output with just an address:
* <ul>
* <li>IPv4: {@code 127.0.0.1}</li>
* <li>IPv6: {@code ::1}</li>
* </ul>
* @param address IPv4 or IPv6 address
* @return formatted string
*/
public static String formatAddress(InetAddress address) {
return format(address, -1, false);
}
/**
* Formats a network address and port for display purposes.
* <p>
* This formats the address with {@link #formatAddress(InetAddress)}
* and appends the port number. IPv6 addresses will be bracketed.
* Any host information, if present is ignored.
* <p>
* Example output:
* <ul>
* <li>IPv4: {@code 127.0.0.1:9300}</li>
* <li>IPv6: {@code [::1]:9300}</li>
* </ul>
* @param address IPv4 or IPv6 address with port
* @return formatted string
*/
public static String formatAddress(InetSocketAddress address) {
return format(address.getAddress(), address.getPort(), false);
}
// note, we don't validate port, because we only allow InetSocketAddress
@SuppressForbidden(reason = "we call toString to avoid a DNS lookup")
static String format(InetAddress address, int port, boolean includeHost) {
Objects.requireNonNull(address);
StringBuilder builder = new StringBuilder();
if (includeHost) {
// must use toString, to avoid DNS lookup. but the format is specified in the spec
String toString = address.toString();
int separator = toString.indexOf('/');
if (separator > 0) {
// append hostname, with the slash too
builder.append(toString, 0, separator + 1);
}
}
if (port != -1 && address instanceof Inet6Address) {
builder.append(InetAddresses.toUriString(address));
} else {
builder.append(InetAddresses.toAddrString(address));
}
if (port != -1) {
builder.append(':');
builder.append(port);
}
return builder.toString();
}
}

View File

@ -82,7 +82,7 @@ public class NetworkService extends AbstractComponent {
@Inject
public NetworkService(Settings settings) {
super(settings);
InetSocketTransportAddress.setResolveAddress(settings.getAsBoolean("network.address.serialization.resolve", false));
IfConfig.logIfNecessary();
}
/**

View File

@ -21,8 +21,6 @@ package org.elasticsearch.common.network;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.net.Inet4Address;
import java.net.Inet6Address;
@ -34,10 +32,12 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
/**
* Utilities for network interfaces / addresses
* Utilities for network interfaces / addresses binding and publishing.
* Its only intended for that purpose, not general purpose usage!!!!
*/
public abstract class NetworkUtils {
@ -52,6 +52,31 @@ public abstract class NetworkUtils {
*/
@Deprecated
static final boolean PREFER_V6 = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses", "false"));
/**
* True if we can bind to a v6 address. Its silly, but for *binding* we have a need to know
* if the stack works. this can prevent scary noise on IPv4-only hosts.
* @deprecated transition mechanism only, do not use
*/
@Deprecated
public static final boolean SUPPORTS_V6;
static {
boolean v = false;
try {
for (NetworkInterface nic : getInterfaces()) {
for (InetAddress address : Collections.list(nic.getInetAddresses())) {
if (address instanceof Inet6Address) {
v = true;
break;
}
}
}
} catch (SecurityException | SocketException misconfiguration) {
v = true; // be optimistic, you misconfigure, then you get noise to your screen
}
SUPPORTS_V6 = v;
}
/** Sorts an address by preference. This way code like publishing can just pick the first one */
static int sortKey(InetAddress address, boolean prefer_v6) {
@ -84,7 +109,7 @@ public abstract class NetworkUtils {
* @deprecated remove this when multihoming is really correct
*/
@Deprecated
private static void sortAddresses(List<InetAddress> list) {
static void sortAddresses(List<InetAddress> list) {
Collections.sort(list, new Comparator<InetAddress>() {
@Override
public int compare(InetAddress left, InetAddress right) {
@ -97,8 +122,6 @@ public abstract class NetworkUtils {
});
}
private final static ESLogger logger = Loggers.getLogger(NetworkUtils.class);
/** Return all interfaces (and subinterfaces) on the system */
static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
@ -128,7 +151,7 @@ public abstract class NetworkUtils {
}
/** Returns addresses for all loopback interfaces that are up. */
public static InetAddress[] getLoopbackAddresses() throws SocketException {
static InetAddress[] getLoopbackAddresses() throws SocketException {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
if (intf.isLoopback() && intf.isUp()) {
@ -143,7 +166,7 @@ public abstract class NetworkUtils {
}
/** Returns addresses for the first non-loopback interface that is up. */
public static InetAddress[] getFirstNonLoopbackAddresses() throws SocketException {
static InetAddress[] getFirstNonLoopbackAddresses() throws SocketException {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
if (intf.isLoopback() == false && intf.isUp()) {
@ -159,7 +182,7 @@ public abstract class NetworkUtils {
}
/** Returns addresses for the given interface (it must be marked up) */
public static InetAddress[] getAddressesForInterface(String name) throws SocketException {
static InetAddress[] getAddressesForInterface(String name) throws SocketException {
NetworkInterface intf = NetworkInterface.getByName(name);
if (intf == null) {
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
@ -176,14 +199,17 @@ public abstract class NetworkUtils {
}
/** Returns addresses for the given host, sorted by order of preference */
public static InetAddress[] getAllByName(String host) throws UnknownHostException {
static InetAddress[] getAllByName(String host) throws UnknownHostException {
InetAddress addresses[] = InetAddress.getAllByName(host);
sortAddresses(Arrays.asList(addresses));
return addresses;
// deduplicate, in case of resolver misconfiguration
// stuff like https://bugzilla.redhat.com/show_bug.cgi?id=496300
List<InetAddress> unique = new ArrayList<>(new HashSet<>(Arrays.asList(addresses)));
sortAddresses(unique);
return unique.toArray(new InetAddress[unique.size()]);
}
/** Returns only the IPV4 addresses in {@code addresses} */
public static InetAddress[] filterIPV4(InetAddress addresses[]) {
static InetAddress[] filterIPV4(InetAddress addresses[]) {
List<InetAddress> list = new ArrayList<>();
for (InetAddress address : addresses) {
if (address instanceof Inet4Address) {
@ -197,7 +223,7 @@ public abstract class NetworkUtils {
}
/** Returns only the IPV6 addresses in {@code addresses} */
public static InetAddress[] filterIPV6(InetAddress addresses[]) {
static InetAddress[] filterIPV6(InetAddress addresses[]) {
List<InetAddress> list = new ArrayList<>();
for (InetAddress address : addresses) {
if (address instanceof Inet6Address) {

View File

@ -28,7 +28,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.elasticsearch.Version;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Classes;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -21,9 +21,9 @@ package org.elasticsearch.common.transport;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.network.NetworkAddress;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
@ -32,52 +32,34 @@ import java.net.InetSocketAddress;
*/
public final class InetSocketTransportAddress implements TransportAddress {
private static boolean resolveAddress = false;
public static void setResolveAddress(boolean resolveAddress) {
InetSocketTransportAddress.resolveAddress = resolveAddress;
}
public static boolean getResolveAddress() {
return resolveAddress;
}
public static final InetSocketTransportAddress PROTO = new InetSocketTransportAddress();
private final InetSocketAddress address;
public InetSocketTransportAddress(StreamInput in) throws IOException {
if (in.readByte() == 0) {
int len = in.readByte();
byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
in.readFully(a);
InetAddress inetAddress;
if (len == 16) {
int scope_id = in.readInt();
inetAddress = Inet6Address.getByAddress(null, a, scope_id);
} else {
inetAddress = InetAddress.getByAddress(a);
}
int port = in.readInt();
this.address = new InetSocketAddress(inetAddress, port);
} else {
this.address = new InetSocketAddress(in.readString(), in.readInt());
}
final int len = in.readByte();
final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
in.readFully(a);
InetAddress inetAddress = InetAddress.getByAddress(a);
int port = in.readInt();
this.address = new InetSocketAddress(inetAddress, port);
}
private InetSocketTransportAddress() {
address = null;
}
public InetSocketTransportAddress(String hostname, int port) {
this(new InetSocketAddress(hostname, port));
}
public InetSocketTransportAddress(InetAddress address, int port) {
this(new InetSocketAddress(address, port));
}
public InetSocketTransportAddress(InetSocketAddress address) {
if (address == null) {
throw new IllegalArgumentException("InetSocketAddress must not be null");
}
if (address.getAddress() == null) {
throw new IllegalArgumentException("Address must be resolved but wasn't - InetSocketAddress#getAddress() returned null");
}
this.address = address;
}
@ -94,12 +76,12 @@ public final class InetSocketTransportAddress implements TransportAddress {
@Override
public String getHost() {
return address.getHostName();
return getAddress(); // just delegate no resolving
}
@Override
public String getAddress() {
return address.getAddress().getHostAddress();
return NetworkAddress.formatAddress(address.getAddress());
}
@Override
@ -118,20 +100,16 @@ public final class InetSocketTransportAddress implements TransportAddress {
@Override
public void writeTo(StreamOutput out) throws IOException {
if (!resolveAddress && address.getAddress() != null) {
out.writeByte((byte) 0);
byte[] bytes = address().getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
out.writeByte((byte) bytes.length); // 1 byte
out.write(bytes, 0, bytes.length);
if (address().getAddress() instanceof Inet6Address)
out.writeInt(((Inet6Address) address.getAddress()).getScopeId());
} else {
out.writeByte((byte) 1);
out.writeString(address.getHostName());
}
byte[] bytes = address().getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
out.writeByte((byte) bytes.length); // 1 byte
out.write(bytes, 0, bytes.length);
// don't serialize scope ids over the network!!!!
// these only make sense with respect to the local machine, and will only formulate
// the address incorrectly remotely.
out.writeInt(address.getPort());
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
@ -147,6 +125,6 @@ public final class InetSocketTransportAddress implements TransportAddress {
@Override
public String toString() {
return "inet[" + address + "]";
return NetworkAddress.format(address);
}
}

View File

@ -187,7 +187,7 @@ public abstract class ExtensionPoint {
protected final void bindExtensions(Binder binder) {
Multibinder<T> allocationMultibinder = Multibinder.newSetBinder(binder, extensionClass);
for (Class<? extends T> clazz : extensions) {
allocationMultibinder.addBinding().to(clazz);
allocationMultibinder.addBinding().to(clazz).asEagerSingleton();
}
}
}

View File

@ -19,18 +19,20 @@
package org.elasticsearch.discovery;
import com.google.common.collect.Lists;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.local.LocalDiscovery;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -44,7 +46,8 @@ public class DiscoveryModule extends AbstractModule {
public static final String ZEN_MASTER_SERVICE_TYPE_KEY = "discovery.zen.masterservice.type";
private final Settings settings;
private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = Lists.newArrayList();
private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = new ArrayList<>();
private final ExtensionPoint.ClassSet<ZenPing> zenPings = new ExtensionPoint.ClassSet<>("zen_ping", ZenPing.class);
private final Map<String, Class<? extends Discovery>> discoveryTypes = new HashMap<>();
private final Map<String, Class<? extends ElectMasterService>> masterServiceType = new HashMap<>();
@ -53,6 +56,8 @@ public class DiscoveryModule extends AbstractModule {
addDiscoveryType("local", LocalDiscovery.class);
addDiscoveryType("zen", ZenDiscovery.class);
addElectMasterService("zen", ElectMasterService.class);
// always add the unicast hosts, or things get angry!
addZenPing(UnicastZenPing.class);
}
/**
@ -82,6 +87,10 @@ public class DiscoveryModule extends AbstractModule {
this.masterServiceType.put(type, masterService);
}
public void addZenPing(Class<? extends ZenPing> clazz) {
zenPings.registerExtension(clazz);
}
@Override
protected void configure() {
String defaultType = DiscoveryNode.localNode(settings) ? "local" : "zen";
@ -107,6 +116,7 @@ public class DiscoveryModule extends AbstractModule {
for (Class<? extends UnicastHostsProvider> unicastHostProvider : unicastHostProviders) {
unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider);
}
zenPings.bind(binder());
}
bind(Discovery.class).to(discoveryClass).asEagerSingleton();
bind(DiscoveryService.class).asEagerSingleton();

View File

@ -19,21 +19,11 @@
package org.elasticsearch.discovery.zen.ping;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.multicast.MulticastZenPing;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
import java.util.Collections;
@ -43,28 +33,17 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
*
*/
public class ZenPingService extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
private volatile List<? extends ZenPing> zenPings = Collections.emptyList();
private List<ZenPing> zenPings = Collections.emptyList();
@Inject
public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService,
Version version, ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
public ZenPingService(Settings settings, Set<ZenPing> zenPings) {
super(settings);
List<ZenPing> zenPingsBuilder = new ArrayList<>();
if (this.settings.getAsBoolean("discovery.zen.ping.multicast.enabled", true)) {
zenPingsBuilder.add(new MulticastZenPing(settings, threadPool, transportService, clusterName, networkService, version));
}
// always add the unicast hosts, so it will be able to receive unicast requests even when working in multicast
zenPingsBuilder.add(new UnicastZenPing(settings, threadPool, transportService, clusterName, version, electMasterService, unicastHostsProviders));
this.zenPings = Collections.unmodifiableList(zenPingsBuilder);
this.zenPings = Collections.unmodifiableList(new ArrayList<>(zenPings));
}
public List<? extends ZenPing> zenPings() {
public List<ZenPing> zenPings() {
return this.zenPings;
}

View File

@ -29,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
@ -63,8 +64,12 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
public static final String ACTION_NAME = "internal:discovery/zen/unicast";
public static final String DISCOVERY_ZEN_PING_UNICAST_HOSTS = "discovery.zen.ping.unicast.hosts";
// these limits are per-address
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
public static final int LIMIT_LOCAL_PORTS_COUNT = 5;
public static final int LIMIT_PORTS_COUNT = 1;
private final ThreadPool threadPool;
private final TransportService transportService;
@ -96,6 +101,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
private volatile boolean closed = false;
@Inject
public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName,
Version version, ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
super(settings);
@ -111,21 +117,30 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
}
this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10);
String[] hostArr = this.settings.getAsArray("discovery.zen.ping.unicast.hosts");
String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS);
// trim the hosts
for (int i = 0; i < hostArr.length; i++) {
hostArr[i] = hostArr[i].trim();
}
List<String> hosts = Lists.newArrayList(hostArr);
final int limitPortCounts;
if (hosts.isEmpty()) {
// if unicast hosts are not specified, fill with simple defaults on the local machine
limitPortCounts = LIMIT_LOCAL_PORTS_COUNT;
hosts.addAll(transportService.getLocalAddresses());
} else {
// we only limit to 1 addresses, makes no sense to ping 100 ports
limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;
}
logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
List<DiscoveryNode> configuredTargetNodes = Lists.newArrayList();
for (String host : hosts) {
try {
TransportAddress[] addresses = transportService.addressesFromString(host);
// we only limit to 1 addresses, makes no sense to ping 100 ports
for (int i = 0; (i < addresses.length && i < LIMIT_PORTS_COUNT); i++) {
configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", addresses[i], version.minimumCompatibilityVersion()));
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
for (TransportAddress address : addresses) {
configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", address, version.minimumCompatibilityVersion()));
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);

View File

@ -43,12 +43,10 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.*;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
@ -253,10 +251,9 @@ public abstract class MetaDataStateFormat<T> {
if (dataLocations != null) { // select all eligable files first
for (Path dataLocation : dataLocations) {
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
if (!Files.isDirectory(stateDir)) {
continue;
}
// now, iterate over the current versions, and find latest one
// we don't check if the stateDir is present since it could be deleted
// after the check. Also if there is a _state file and it's not a dir something is really wrong
try (DirectoryStream<Path> paths = Files.newDirectoryStream(stateDir)) { // we don't pass a glob since we need the group part for parsing
for (Path stateFile : paths) {
final Matcher matcher = stateFilePattern.matcher(stateFile.getFileName().toString());
@ -270,6 +267,8 @@ public abstract class MetaDataStateFormat<T> {
files.add(pav);
}
}
} catch (NoSuchFileException | FileNotFoundException ex) {
// no _state directory -- move on
}
}
}

View File

@ -94,12 +94,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
changed = true;
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion);
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else if (nodesToAllocate.throttleNodes.isEmpty() == true && nodesToAllocate.noNodes.isEmpty() == false) {
DiscoveryNode node = nodesToAllocate.noNodes.get(0);
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
changed = true;
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion);
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodes);

View File

@ -169,7 +169,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we found a match
changed = true;
unassignedIterator.initialize(nodeWithHighestMatch.nodeId());
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), shard.version(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
}
} else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation

View File

@ -20,6 +20,7 @@
package org.elasticsearch.http;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
@ -30,6 +31,7 @@ import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.rest.*;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashMap;
@ -114,10 +116,14 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
}
public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) {
if (request.rawPath().startsWith("/_plugin/")) {
String rawPath = request.rawPath();
if (rawPath.startsWith("/_plugin/")) {
RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
filterChain.continueProcessing(request, channel);
return;
} else if (rawPath.equals("/favicon.ico")) {
handleFavicon(request, channel);
return;
}
restController.dispatchRequest(request, channel);
}
@ -131,6 +137,22 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
}
}
void handleFavicon(HttpRequest request, HttpChannel channel) {
if (request.method() == RestRequest.Method.GET) {
try {
try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) {
byte[] content = ByteStreams.toByteArray(stream);
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", content);
channel.sendResponse(restResponse);
}
} catch (IOException e) {
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR));
}
} else {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
}
}
void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException {
if (disableSites) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings;
@ -274,7 +275,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
private void bindAddress(final InetAddress hostAddress) {
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<SocketAddress> boundSocket = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
@ -282,7 +283,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
synchronized (serverChannels) {
Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
serverChannels.add(channel);
boundSocket.set(channel.getLocalAddress());
boundSocket.set((InetSocketAddress) channel.getLocalAddress());
}
} catch (Exception e) {
lastException.set(e);
@ -294,7 +295,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (!success) {
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
}
logger.info("Bound http to address [{}]", boundSocket.get());
logger.info("Bound http to address {{}}", NetworkAddress.format(boundSocket.get()));
}
@Override

View File

@ -26,6 +26,7 @@ import com.google.common.collect.Iterators;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
@ -55,7 +56,6 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InternalIndicesLifecycle;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.ShardsPluginsModule;
import java.io.Closeable;
import java.io.IOException;
@ -270,7 +270,8 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
}
}
public synchronized IndexShard createShard(int sShardId, boolean primary) {
public synchronized IndexShard createShard(int sShardId, ShardRouting routing) {
final boolean primary = routing.primary();
/*
* TODO: we execute this in parallel but it's a synced method. Yet, we might
* be able to serialize the execution via the cluster state in the future. for now we just
@ -299,7 +300,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
}
}
if (path == null) {
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, getAvgShardSizeInBytes(), this);
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), this);
logger.debug("{} creating using a new path [{}]", shardId, path);
} else {
logger.debug("{} creating using an existing path [{}]", shardId, path);
@ -315,7 +316,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
ModulesBuilder modules = new ModulesBuilder();
modules.add(new ShardsPluginsModule(indexSettings, pluginsService));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.shardModules(indexSettings)) {
modules.add(pluginModule);
}
modules.add(new IndexShardModule(shardId, primary, indexSettings));
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
@ -325,6 +329,9 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
}
}), path));
modules.add(new DeletionPolicyModule());
pluginsService.processModules(modules);
try {
shardInjector = modules.createChildInjector(injector);
} catch (CreationException e) {

View File

@ -261,6 +261,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty");
}
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
}

View File

@ -78,8 +78,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
private String minimumShouldMatch;
private String rewrite = null;
private String fuzzyRewrite = null;
private Boolean lenient;
@ -179,11 +177,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
return this;
}
public MatchQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
public MatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
this.fuzzyRewrite = fuzzyRewrite;
return this;
@ -249,9 +242,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
if (minimumShouldMatch != null) {
builder.field("minimum_should_match", minimumShouldMatch);
}
if (rewrite != null) {
builder.field("rewrite", rewrite);
}
if (fuzzyRewrite != null) {
builder.field("fuzzy_rewrite", fuzzyRewrite);
}

View File

@ -61,8 +61,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
private String minimumShouldMatch;
private String rewrite = null;
private String fuzzyRewrite = null;
private Boolean useDisMax;
@ -255,11 +253,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
return this;
}
public MultiMatchQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
public MultiMatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
this.fuzzyRewrite = fuzzyRewrite;
return this;
@ -367,9 +360,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
if (minimumShouldMatch != null) {
builder.field("minimum_should_match", minimumShouldMatch);
}
if (rewrite != null) {
builder.field("rewrite", rewrite);
}
if (fuzzyRewrite != null) {
builder.field("fuzzy_rewrite", fuzzyRewrite);
}

View File

@ -68,7 +68,6 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
private Locale locale;
private float boost = -1;
private Fuzziness fuzziness;
@ -99,6 +98,8 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
/** To limit effort spent determinizing regexp queries. */
private Integer maxDeterminizedStates;
private Boolean escape;
public QueryStringQueryBuilder(String queryString) {
this.queryString = queryString;
}
@ -159,11 +160,11 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
/**
* Sets the boolean operator of the query parser used to parse the query string.
* <p/>
* <p>In default mode ({@link FieldQueryBuilder.Operator#OR}) terms without any modifiers
* <p>In default mode ({@link Operator#OR}) terms without any modifiers
* are considered optional: for example <code>capital of Hungary</code> is equal to
* <code>capital OR of OR Hungary</code>.
* <p/>
* <p>In {@link FieldQueryBuilder.Operator#AND} mode terms are considered to be in conjunction: the
* <p>In {@link Operator#AND} mode terms are considered to be in conjunction: the
* above mentioned query is parsed as <code>capital AND of AND Hungary</code>
*/
public QueryStringQueryBuilder defaultOperator(Operator defaultOperator) {
@ -342,6 +343,14 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
return this;
}
/**
* Set to <tt>true</tt> to enable escaping of the query string
*/
public QueryStringQueryBuilder escape(boolean escape) {
this.escape = escape;
return this;
}
@Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(QueryStringQueryParser.NAME);
@ -431,6 +440,9 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
if (timeZone != null) {
builder.field("time_zone", timeZone);
}
if (escape != null) {
builder.field("escape", escape);
}
builder.endObject();
}
}

View File

@ -21,8 +21,15 @@ package org.elasticsearch.index.shard;
import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException;
@ -162,8 +169,8 @@ public class IndexShard extends AbstractIndexShardComponent {
private TimeValue refreshInterval;
private volatile ScheduledFuture refreshScheduledFuture;
private volatile ScheduledFuture mergeScheduleFuture;
private volatile ScheduledFuture<?> refreshScheduledFuture;
private volatile ScheduledFuture<?> mergeScheduleFuture;
protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
@ -252,7 +259,42 @@ public class IndexShard extends AbstractIndexShardComponent {
if (indexSettings.getAsBoolean(IndexCacheModule.QUERY_CACHE_EVERYTHING, false)) {
cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE;
} else {
cachingPolicy = new UsageTrackingQueryCachingPolicy();
assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1;
// TODO: remove this hack in Lucene 5.4, use UsageTrackingQueryCachingPolicy directly
// See https://issues.apache.org/jira/browse/LUCENE-6748
// cachingPolicy = new UsageTrackingQueryCachingPolicy();
final QueryCachingPolicy wrapped = new UsageTrackingQueryCachingPolicy();
cachingPolicy = new QueryCachingPolicy() {
@Override
public boolean shouldCache(Query query, LeafReaderContext context) throws IOException {
if (query instanceof MatchAllDocsQuery
// MatchNoDocsQuery currently rewrites to a BooleanQuery,
// but who knows, it might get its own Weight one day
|| query instanceof MatchNoDocsQuery) {
return false;
}
if (query instanceof BooleanQuery) {
BooleanQuery bq = (BooleanQuery) query;
if (bq.clauses().isEmpty()) {
return false;
}
}
if (query instanceof DisjunctionMaxQuery) {
DisjunctionMaxQuery dmq = (DisjunctionMaxQuery) query;
if (dmq.getDisjuncts().isEmpty()) {
return false;
}
}
return wrapped.shouldCache(query, context);
}
@Override
public void onUse(Query query) {
wrapped.onUse(query);
}
};
}
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);

View File

@ -20,7 +20,12 @@
package org.elasticsearch.indices;
import com.google.common.base.Function;
import com.google.common.collect.*;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.IOUtils;
@ -35,7 +40,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.*;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Injectors;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
@ -43,7 +53,12 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.*;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNameModule;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.LocalNodeIdModule;
import org.elasticsearch.index.aliases.IndexAliasesServiceModule;
import org.elasticsearch.index.analysis.AnalysisModule;
import org.elasticsearch.index.analysis.AnalysisService;
@ -71,13 +86,16 @@ import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.IndexStoreModule;
import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.plugins.IndexPluginsModule;
import org.elasticsearch.plugins.PluginsService;
import java.io.Closeable;
import java.io.IOException;
import java.nio.file.Files;
import java.util.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
@ -306,7 +324,10 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
modules.add(new IndexNameModule(index));
modules.add(new LocalNodeIdModule(localNodeId));
modules.add(new IndexSettingsModule(index, indexSettings));
modules.add(new IndexPluginsModule(indexSettings, pluginsService));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.indexModules(indexSettings)) {
modules.add(pluginModule);
}
modules.add(new IndexStoreModule(indexSettings));
modules.add(new AnalysisModule(indexSettings, indicesAnalysisService));
modules.add(new SimilarityModule(indexSettings));
@ -315,6 +336,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
modules.add(new MapperServiceModule());
modules.add(new IndexAliasesServiceModule());
modules.add(new IndexModule(indexSettings));
pluginsService.processModules(modules);
Injector indexInjector;
try {

View File

@ -638,7 +638,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] creating shard", shardRouting.index(), shardId);
}
IndexShard indexShard = indexService.createShard(shardId, shardRouting.primary());
IndexShard indexShard = indexService.createShard(shardId, shardRouting);
indexShard.updateRoutingEntry(shardRouting, state.blocks().disableStatePersistence() == false);
indexShard.addFailedEngineListener(failedEngineHandler);
} catch (IndexShardAlreadyExistsException e) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.monitor.process;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.BootstrapInfo;
import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean;
@ -136,7 +136,7 @@ public class ProcessProbe {
}
public ProcessInfo processInfo() {
return new ProcessInfo(jvmInfo().pid(), Bootstrap.isMemoryLocked());
return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked());
}
public ProcessStats processStats() {

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
@ -159,7 +160,11 @@ public class Node implements Releasable {
ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version));
modules.add(new CircuitBreakerModule(settings));
modules.add(new PluginsModule(settings, pluginsService));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(settings));
modules.add(new NodeModule(this));
modules.add(new NetworkModule());
@ -187,6 +192,9 @@ public class Node implements Releasable {
modules.add(new RepositoriesModule());
modules.add(new TribeModule());
pluginsService.processModules(modules);
injector = modules.createInjector();
client = injector.getInstance(Client.class);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.node.internal;
import com.google.common.base.Charsets;
import com.google.common.collect.Sets;
import com.google.common.collect.UnmodifiableIterator;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Booleans;
@ -40,6 +41,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.common.Strings.cleanPath;
@ -113,13 +115,21 @@ public class InternalSettingsPreparer {
}
}
if (loadFromEnv) {
boolean settingsFileFound = false;
Set<String> foundSuffixes = Sets.newHashSet();
for (String allowedSuffix : ALLOWED_SUFFIXES) {
try {
settingsBuilder.loadFromPath(environment.configFile().resolve("elasticsearch" + allowedSuffix));
} catch (SettingsException e) {
// ignore
Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix);
if (Files.exists(path)) {
if (!settingsFileFound) {
settingsBuilder.loadFromPath(path);
}
settingsFileFound = true;
foundSuffixes.add(allowedSuffix);
}
}
if (foundSuffixes.size() > 1) {
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ","));
}
}
}

View File

@ -26,7 +26,6 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter;
@ -52,7 +51,6 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
@ -67,6 +65,7 @@ import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHitField;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.lookup.LeafSearchLookup;
@ -347,12 +346,12 @@ public class PercolateContext extends SearchContext {
}
@Override
public Scroll scroll() {
public ScrollContext scrollContext() {
throw new UnsupportedOperationException();
}
@Override
public SearchContext scroll(Scroll scroll) {
public SearchContext scrollContext(ScrollContext scroll) {
throw new UnsupportedOperationException();
}
@ -620,16 +619,6 @@ public class PercolateContext extends SearchContext {
throw new UnsupportedOperationException();
}
@Override
public void lastEmittedDoc(ScoreDoc doc) {
throw new UnsupportedOperationException();
}
@Override
public ScoreDoc lastEmittedDoc() {
throw new UnsupportedOperationException();
}
@Override
public DfsSearchResult dfsResult() {
throw new UnsupportedOperationException();

View File

@ -52,7 +52,7 @@ public class PluginInfo implements Streamable, ToXContent {
private String description;
private boolean site;
private String version;
private boolean jvm;
private String classname;
private boolean isolated;
@ -86,7 +86,11 @@ public class PluginInfo implements Streamable, ToXContent {
try (InputStream stream = Files.newInputStream(descriptor)) {
props.load(stream);
}
String name = dir.getFileName().toString();
String name = props.getProperty("name");
if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]");
}
PluginManager.checkForForbiddenName(name);
String description = props.getProperty("description");
if (description == null) {
throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]");
@ -95,6 +99,7 @@ public class PluginInfo implements Streamable, ToXContent {
if (version == null) {
throw new IllegalArgumentException("Property [version] is missing for plugin [" + name + "]");
}
boolean jvm = Boolean.parseBoolean(props.getProperty("jvm"));
boolean site = Boolean.parseBoolean(props.getProperty("site"));
if (jvm == false && site == false) {
@ -115,6 +120,7 @@ public class PluginInfo implements Streamable, ToXContent {
if (javaVersionString == null) {
throw new IllegalArgumentException("Property [java.version] is missing for jvm plugin [" + name + "]");
}
JarHell.checkVersionFormat(javaVersionString);
JarHell.checkJavaVersion(name, javaVersionString);
isolated = Boolean.parseBoolean(props.getProperty("isolated", "true"));
classname = props.getProperty("classname");
@ -122,7 +128,7 @@ public class PluginInfo implements Streamable, ToXContent {
throw new IllegalArgumentException("Property [classname] is missing for jvm plugin [" + name + "]");
}
}
if (site) {
if (!Files.exists(dir.resolve("_site"))) {
throw new IllegalArgumentException("Plugin [" + name + "] is a site plugin but has no '_site/' directory");
@ -159,14 +165,14 @@ public class PluginInfo implements Streamable, ToXContent {
public boolean isJvm() {
return jvm;
}
/**
* @return true if jvm plugin has isolated classloader
*/
public boolean isIsolated() {
return isolated;
}
/**
* @return jvm plugin's classname
*/

View File

@ -84,6 +84,7 @@ public class PluginManager {
"cloud-azure",
"cloud-gce",
"delete-by-query",
"discovery-multicast",
"lang-javascript",
"lang-python",
"mapper-murmur3",
@ -91,11 +92,11 @@ public class PluginManager {
).build();
private final Environment environment;
private String url;
private URL url;
private OutputMode outputMode;
private TimeValue timeout;
public PluginManager(Environment environment, String url, OutputMode outputMode, TimeValue timeout) {
public PluginManager(Environment environment, URL url, OutputMode outputMode, TimeValue timeout) {
this.environment = environment;
this.url = url;
this.outputMode = outputMode;
@ -103,8 +104,8 @@ public class PluginManager {
}
public void downloadAndExtract(String name, Terminal terminal) throws IOException {
if (name == null) {
throw new IllegalArgumentException("plugin name must be supplied with install [name].");
if (name == null && url == null) {
throw new IllegalArgumentException("plugin name or url must be supplied with install.");
}
if (!Files.exists(environment.pluginsFile())) {
@ -116,8 +117,14 @@ public class PluginManager {
throw new IOException("plugin directory " + environment.pluginsFile() + " is read only");
}
PluginHandle pluginHandle = PluginHandle.parse(name);
checkForForbiddenName(pluginHandle.name);
PluginHandle pluginHandle;
if (name != null) {
pluginHandle = PluginHandle.parse(name);
checkForForbiddenName(pluginHandle.name);
} else {
// if we have no name but url, use temporary name that will be overwritten later
pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null);
}
Path pluginFile = download(pluginHandle, terminal);
extract(pluginHandle, terminal, pluginFile);
@ -138,7 +145,7 @@ public class PluginManager {
// first, try directly from the URL provided
if (url != null) {
URL pluginUrl = new URL(url);
URL pluginUrl = url;
boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo());
if (isAuthInfoSet && !isSecureProcotol) {
@ -204,14 +211,10 @@ public class PluginManager {
}
private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile) throws IOException {
final Path extractLocation = pluginHandle.extractedDir(environment);
if (Files.exists(extractLocation)) {
throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command");
}
// unzip plugin to a staging temp dir, named for the plugin
Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
Path root = tmp.resolve(pluginHandle.name);
Path root = tmp.resolve(pluginHandle.name);
unzipPlugin(pluginFile, root);
// find the actual root (in case its unzipped with extra directory wrapping)
@ -226,6 +229,13 @@ public class PluginManager {
jarHellCheck(root, info.isIsolated());
}
// update name in handle based on 'name' property found in descriptor file
pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user);
final Path extractLocation = pluginHandle.extractedDir(environment);
if (Files.exists(extractLocation)) {
throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command");
}
// install plugin
FileSystemUtils.copyDirectoryRecursively(root, extractLocation);
terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());
@ -334,7 +344,7 @@ public class PluginManager {
private void unzipPlugin(Path zip, Path target) throws IOException {
Files.createDirectories(target);
try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
ZipEntry entry;
byte[] buffer = new byte[8192];
@ -395,7 +405,7 @@ public class PluginManager {
}
}
private static void checkForForbiddenName(String name) {
static void checkForForbiddenName(String name) {
if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) {
throw new IllegalArgumentException("Illegal plugin name: " + name);
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.plugins;
import com.google.common.base.Strings;
import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
@ -32,7 +33,8 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.PluginManager.OutputMode;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.Locale;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
@ -166,19 +168,29 @@ public class PluginManagerCliParser extends CliTool {
private static final String NAME = "install";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Install.class)
.options(option("u", "url").required(false).hasArg(true))
.options(option("t", "timeout").required(false).hasArg(false))
.build();
static Command parse(Terminal terminal, CommandLine cli) {
String[] args = cli.getArgs();
// install [plugin-name/url]
if ((args == null) || (args.length == 0)) {
return exitCmd(ExitStatus.USAGE, terminal, "plugin name is missing (type -h for help)");
return exitCmd(ExitStatus.USAGE, terminal, "plugin name or url is missing (type -h for help)");
}
String name = args[0];
URL optionalPluginUrl = null;
// try parsing cli argument as URL
try {
optionalPluginUrl = new URL(name);
name = null;
} catch (MalformedURLException e) {
// we tried to parse the cli argument as url and failed
// continue treating it as a symbolic plugin name like `analysis-icu` etc.
}
String name = args[0];
TimeValue timeout = TimeValue.parseTimeValue(cli.getOptionValue("t"), DEFAULT_TIMEOUT, "cli");
String url = cli.getOptionValue("u");
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
@ -188,15 +200,15 @@ public class PluginManagerCliParser extends CliTool {
outputMode = OutputMode.VERBOSE;
}
return new Install(terminal, name, outputMode, url, timeout);
return new Install(terminal, name, outputMode, optionalPluginUrl, timeout);
}
final String name;
private OutputMode outputMode;
final String url;
final URL url;
final TimeValue timeout;
Install(Terminal terminal, String name, OutputMode outputMode, String url, TimeValue timeout) {
Install(Terminal terminal, String name, OutputMode outputMode, URL url, TimeValue timeout) {
super(terminal);
this.name = name;
this.outputMode = outputMode;
@ -207,7 +219,11 @@ public class PluginManagerCliParser extends CliTool {
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, url, outputMode, timeout);
terminal.println("-> Installing " + Strings.nullToEmpty(name) + "...");
if (name != null) {
terminal.println("-> Installing " + Strings.nullToEmpty(name) + "...");
} else {
terminal.println("-> Installing from " + url + "...");
}
pluginManager.downloadAndExtract(name, terminal);
return ExitStatus.OK;
}

View File

@ -20,35 +20,15 @@
package org.elasticsearch.plugins;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.PreProcessModule;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public class PluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
private final Settings settings;
public class PluginsModule extends AbstractModule {
private final PluginsService pluginsService;
public PluginsModule(Settings settings, PluginsService pluginsService) {
this.settings = settings;
public PluginsModule(PluginsService pluginsService) {
this.pluginsService = pluginsService;
}
@Override
public Iterable<? extends Module> spawnModules() {
return pluginsService.nodeModules();
}
@Override
public void processModule(Module module) {
pluginsService.processModule(module);
}
@Override
protected void configure() {
bind(PluginsService.class).toInstance(pluginsService);

View File

@ -21,9 +21,14 @@ package org.elasticsearch.plugins;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
@ -106,7 +111,7 @@ public class PluginsService extends AbstractComponent {
List<Bundle> bundles = getPluginBundles(environment);
tupleBuilder.addAll(loadBundles(bundles));
} catch (IOException ex) {
throw new IllegalStateException(ex);
throw new IllegalStateException("Unable to initialize plugins", ex);
}
plugins = Collections.unmodifiableList(tupleBuilder);
@ -278,9 +283,10 @@ public class PluginsService extends AbstractComponent {
}
static List<Bundle> getPluginBundles(Environment environment) throws IOException {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
ESLogger logger = Loggers.getLogger(PluginsService.class);
Path pluginsDirectory = environment.pluginsFile();
// TODO: remove this leniency, but tests bogusly rely on it
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return Collections.emptyList();
}
@ -346,6 +352,8 @@ public class PluginsService extends AbstractComponent {
for (PluginInfo pluginInfo : bundle.plugins) {
final Plugin plugin;
if (pluginInfo.isJvm()) {
// reload lucene SPI with any new services from the plugin
reloadLuceneSPI(loader);
plugin = loadPlugin(pluginInfo.getClassname(), settings, loader);
} else {
plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription());
@ -357,6 +365,24 @@ public class PluginsService extends AbstractComponent {
return Collections.unmodifiableList(plugins);
}
/**
* Reloads all Lucene SPI implementations using the new classloader.
* This method must be called after the new classloader has been created to
* register the services for use.
*/
static void reloadLuceneSPI(ClassLoader loader) {
// do NOT change the order of these method calls!
// Codecs:
PostingsFormat.reloadPostingsFormats(loader);
DocValuesFormat.reloadDocValuesFormats(loader);
Codec.reloadCodecs(loader);
// Analysis:
CharFilterFactory.reloadCharFilters(loader);
TokenFilterFactory.reloadTokenFilters(loader);
TokenizerFactory.reloadTokenizers(loader);
}
private Plugin loadPlugin(String className, Settings settings, ClassLoader loader) {
try {
Class<? extends Plugin> pluginClass = loader.loadClass(className).asSubclass(Plugin.class);

View File

@ -1,55 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.PreProcessModule;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public class ShardsPluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
private final Settings settings;
private final PluginsService pluginsService;
public ShardsPluginsModule(Settings settings, PluginsService pluginsService) {
this.settings = settings;
this.pluginsService = pluginsService;
}
@Override
public Iterable<? extends Module> spawnModules() {
return pluginsService.shardModules(settings);
}
@Override
public void processModule(Module module) {
pluginsService.processModule(module);
}
@Override
protected void configure() {
}
}

View File

@ -20,14 +20,8 @@
package org.elasticsearch.repositories;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.Modules;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.common.settings.Settings;
import java.util.Arrays;
import java.util.Collections;
/**
* Binds repository classes for the specific repository type.
*/

View File

@ -156,7 +156,7 @@ public class URLRepository extends BlobStoreRepository {
logger.warn("cannot parse the specified url [{}]", url);
throw new RepositoryException(repositoryName, "cannot parse the specified url [" + url + "]");
}
// We didn't match white list - try to resolve against repo.path
// We didn't match white list - try to resolve against path.repo
URL normalizedUrl = environment.resolveRepoURL(url);
if (normalizedUrl == null) {
logger.warn("The specified url [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] or by repositories.url.allowed_urls setting: [{}] ", url, environment.repoFiles());

View File

@ -25,6 +25,8 @@ import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -115,11 +117,20 @@ public class BytesRestResponse extends RestResponse {
return this.status;
}
private static final ESLogger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed");
private static XContentBuilder convert(RestChannel channel, RestStatus status, Throwable t) throws IOException {
XContentBuilder builder = channel.newErrorBuilder().startObject();
if (t == null) {
builder.field("error", "unknown");
} else if (channel.detailedErrorsEnabled()) {
final ToXContent.Params params;
if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) {
params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request());
} else {
SUPPRESSED_ERROR_LOGGER.info("{} Params: {}", t, channel.request().path(), channel.request().params());
params = channel.request();
}
builder.field("error");
builder.startObject();
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
@ -127,16 +138,13 @@ public class BytesRestResponse extends RestResponse {
builder.startArray();
for (ElasticsearchException rootCause : rootCauses){
builder.startObject();
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), channel.request()));
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
builder.endObject();
}
builder.endArray();
ElasticsearchException.toXContent(builder, channel.request(), t);
ElasticsearchException.toXContent(builder, params, t);
builder.endObject();
if (channel.request().paramAsBoolean("error_trace", false)) {
buildErrorTrace(t, builder);
}
} else {
builder.field("error", simpleMessage(t));
}
@ -145,45 +153,6 @@ public class BytesRestResponse extends RestResponse {
return builder;
}
private static void buildErrorTrace(Throwable t, XContentBuilder builder) throws IOException {
builder.startObject("error_trace");
boolean first = true;
int counter = 0;
while (t != null) {
// bail if there are more than 10 levels, becomes useless really...
if (counter++ > 10) {
break;
}
if (!first) {
builder.startObject("cause");
}
buildThrowable(t, builder);
if (!first) {
builder.endObject();
}
t = t.getCause();
first = false;
}
builder.endObject();
}
private static void buildThrowable(Throwable t, XContentBuilder builder) throws IOException {
builder.field("message", t.getMessage());
for (StackTraceElement stElement : t.getStackTrace()) {
builder.startObject("at")
.field("class", stElement.getClassName())
.field("method", stElement.getMethodName());
if (stElement.getFileName() != null) {
builder.field("file", stElement.getFileName());
}
if (stElement.getLineNumber() >= 0) {
builder.field("line", stElement.getLineNumber());
}
builder.endObject();
}
}
/*
* Builds a simple error string from the message of the first ElasticsearchException
*/

View File

@ -26,7 +26,6 @@ import com.google.common.collect.ImmutableMap;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
@ -54,7 +53,6 @@ import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData;
@ -82,7 +80,6 @@ import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.Template;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import org.elasticsearch.search.dfs.CachedDfSource;
import org.elasticsearch.search.dfs.DfsPhase;
import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.*;
@ -266,6 +263,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
}
}
@Deprecated // remove in 3.0
public QuerySearchResult executeScan(ShardSearchRequest request) {
final SearchContext context = createAndPutContext(request);
final int originalSize = context.size();
@ -274,7 +272,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
throw new IllegalArgumentException("aggregations are not supported with search_type=scan");
}
if (context.scroll() == null) {
if (context.scrollContext() == null || context.scrollContext().scroll == null) {
throw new ElasticsearchException("Scroll must be provided when scanning...");
}
@ -322,7 +320,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try {
shortcutDocIdsToLoadForScanning(context);
fetchPhase.execute(context);
if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) {
if (context.scrollContext() == null || context.fetchResult().hits().hits().length < context.size()) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
@ -365,7 +363,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
loadOrExecuteQueryPhase(request, context, queryPhase);
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) {
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
freeContext(context.id());
} else {
contextProcessedSuccessfully(context);
@ -412,23 +410,14 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id());
contextProcessing(context);
context.searcher().setAggregatedDfs(request.dfs());
IndexShard indexShard = context.indexShard();
try {
final IndexCache indexCache = indexShard.indexService().cache();
final QueryCachingPolicy cachingPolicy = indexShard.getQueryCachingPolicy();
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
indexCache.query(), cachingPolicy));
} catch (Throwable e) {
processFailure(context, e);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
ShardSearchStats shardSearchStats = indexShard.searchService();
try {
shardSearchStats.onPreQueryPhase(context);
long time = System.nanoTime();
queryPhase.execute(context);
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) {
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
// no hits, we can release the context since there will be no fetch phase
freeContext(context.id());
} else {
@ -446,6 +435,16 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
}
}
private boolean fetchPhaseShouldFreeContext(SearchContext context) {
if (context.scrollContext() == null) {
// simple search, no scroll
return true;
} else {
// scroll request, but the scroll was not extended
return context.scrollContext().scroll == null;
}
}
public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) {
final SearchContext context = createAndPutContext(request);
contextProcessing(context);
@ -465,7 +464,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
if (fetchPhaseShouldFreeContext(context)) {
freeContext(context.id());
} else {
contextProcessedSuccessfully(context);
@ -488,17 +487,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id());
contextProcessing(context);
try {
final IndexShard indexShard = context.indexShard();
final IndexCache indexCache = indexShard.indexService().cache();
final QueryCachingPolicy cachingPolicy = indexShard.getQueryCachingPolicy();
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
indexCache.query(), cachingPolicy));
} catch (Throwable e) {
freeContext(context.id());
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
context.searcher().setAggregatedDfs(request.dfs());
try {
ShardSearchStats shardSearchStats = context.indexShard().searchService();
shardSearchStats.onPreQueryPhase(context);
@ -515,7 +504,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
@ -555,7 +544,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try {
shortcutDocIdsToLoad(context);
fetchPhase.execute(context);
if (context.scroll() == null) {
if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
@ -581,13 +570,13 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
final ShardSearchStats shardSearchStats = context.indexShard().searchService();
try {
if (request.lastEmittedDoc() != null) {
context.lastEmittedDoc(request.lastEmittedDoc());
context.scrollContext().lastEmittedDoc = request.lastEmittedDoc();
}
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
shardSearchStats.onPreFetchPhase(context);
long time = System.nanoTime();
fetchPhase.execute(context);
if (context.scroll() == null) {
if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id());
} else {
contextProcessedSuccessfully(context);
@ -642,7 +631,10 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
SearchContext.setCurrent(context);
try {
context.scroll(request.scroll());
if (request.scroll() != null) {
context.scrollContext(new ScrollContext());
context.scrollContext().scroll = request.scroll();
}
parseTemplate(request, context);
parseSource(context, request.source());
@ -695,7 +687,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
if (context != null) {
try {
context.indexShard().searchService().onFreeContext(context);
if (context.scroll() != null) {
if (context.scrollContext() != null) {
context.indexShard().searchService().onFreeScrollContext(context);
}
} finally {
@ -708,7 +700,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public void freeAllScrollContexts() {
for (SearchContext searchContext : activeContexts.values()) {
if (searchContext.scroll() != null) {
if (searchContext.scrollContext() != null) {
freeContext(searchContext.id());
}
}
@ -902,7 +894,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
// process scroll
context.from(context.from() + context.size());
context.scroll(request.scroll());
context.scrollContext().scroll = request.scroll();
// update the context keep alive based on the new scroll value
if (request.scroll() != null && request.scroll().keepAlive() != null) {
context.keepAlive(request.scroll().keepAlive().millis());

View File

@ -418,6 +418,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
}
}
@Deprecated // remove in 3.0
class SearchScanTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {

View File

@ -31,7 +31,7 @@ import java.io.IOException;
import java.util.List;
import java.util.Map;
final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource> {
final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory.LeafOnly<ValuesSource> {
private final long precisionThreshold;

View File

@ -186,9 +186,8 @@ public class AggregationPath {
}
public AggregationPath subPath(int offset, int length) {
PathElement[] subTokens = new PathElement[length];
System.arraycopy(pathElements, offset, subTokens, 0, length);
return new AggregationPath(pathElements);
List<PathElement> subTokens = new ArrayList<>(pathElements.subList(offset, offset + length));
return new AggregationPath(subTokens);
}
/**
@ -266,12 +265,12 @@ public class AggregationPath {
}
return aggregator;
}
/**
* Resolves the topmost aggregator pointed by this path using the given root as a point of reference.
*
* @param root The point of reference of this path
* @return The first child aggregator of the root pointed by this path
* @return The first child aggregator of the root pointed by this path
*/
public Aggregator resolveTopmostAggregator(Aggregator root) {
AggregationPath.PathElement token = pathElements.get(0);
@ -279,7 +278,7 @@ public class AggregationPath {
assert (aggregator instanceof SingleBucketAggregator )
|| (aggregator instanceof NumericMetricsAggregator) : "this should be picked up before aggregation execution - on validate";
return aggregator;
}
}
/**
* Validates this path over the given aggregator as a point of reference.

View File

@ -69,14 +69,14 @@ public class ValueFormat {
public static final DateTime DEFAULT = new DateTime(DateFieldMapper.Defaults.DATE_TIME_FORMATTER.format(), ValueFormatter.DateTime.DEFAULT, ValueParser.DateMath.DEFAULT);
public static DateTime format(String format, DateTimeZone timezone) {
return new DateTime(format, new ValueFormatter.DateTime(format, timezone), new ValueParser.DateMath(format));
return new DateTime(format, new ValueFormatter.DateTime(format, timezone), new ValueParser.DateMath(format, timezone));
}
public static DateTime mapper(DateFieldMapper.DateFieldType fieldType, DateTimeZone timezone) {
return new DateTime(fieldType.dateTimeFormatter().format(), ValueFormatter.DateTime.mapper(fieldType, timezone), ValueParser.DateMath.mapper(fieldType));
return new DateTime(fieldType.dateTimeFormatter().format(), ValueFormatter.DateTime.mapper(fieldType, timezone), ValueParser.DateMath.mapper(fieldType, timezone));
}
public DateTime(String pattern, ValueFormatter formatter, ValueParser parser) {
private DateTime(String pattern, ValueFormatter formatter, ValueParser parser) {
super(pattern, formatter, parser);
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.search.aggregations.support.format;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
@ -25,6 +26,7 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper;
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTimeZone;
import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols;
@ -80,16 +82,21 @@ public interface ValueParser {
*/
static class DateMath implements ValueParser {
public static final DateMath DEFAULT = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER));
public static final DateMath DEFAULT = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER), DateTimeZone.UTC);
private DateMathParser parser;
public DateMath(String format) {
this(new DateMathParser(Joda.forPattern(format)));
private DateTimeZone timezone = DateTimeZone.UTC;
public DateMath(String format, DateTimeZone timezone) {
this(new DateMathParser(Joda.forPattern(format)), timezone);
}
public DateMath(DateMathParser parser) {
public DateMath(DateMathParser parser, @Nullable DateTimeZone timeZone) {
this.parser = parser;
if (timeZone != null) {
this.timezone = timeZone;
}
}
@Override
@ -100,7 +107,7 @@ public interface ValueParser {
return searchContext.nowInMillis();
}
};
return parser.parse(value, now);
return parser.parse(value, now, false, timezone);
}
@Override
@ -108,8 +115,8 @@ public interface ValueParser {
return parseLong(value, searchContext);
}
public static DateMath mapper(DateFieldMapper.DateFieldType fieldType) {
return new DateMath(new DateMathParser(fieldType.dateTimeFormatter()));
public static DateMath mapper(DateFieldMapper.DateFieldType fieldType, @Nullable DateTimeZone timezone) {
return new DateMath(new DateMathParser(fieldType.dateTimeFormatter()), timezone);
}
}

View File

@ -1,97 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.dfs;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.search.similarities.Similarity;
import java.io.IOException;
import java.util.List;
/**
*
*/
public class CachedDfSource extends IndexSearcher {
private final AggregatedDfs aggregatedDfs;
private final int maxDoc;
public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity,
QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) throws IOException {
super(reader);
this.aggregatedDfs = aggregatedDfs;
setSimilarity(similarity);
setQueryCache(queryCache);
setQueryCachingPolicy(queryCachingPolicy);
if (aggregatedDfs.maxDoc() > Integer.MAX_VALUE) {
maxDoc = Integer.MAX_VALUE;
} else {
maxDoc = (int) aggregatedDfs.maxDoc();
}
}
@Override
public TermStatistics termStatistics(Term term, TermContext context) throws IOException {
TermStatistics termStatistics = aggregatedDfs.termStatistics().get(term);
if (termStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.termStatistics(term, context);
}
return termStatistics;
}
@Override
public CollectionStatistics collectionStatistics(String field) throws IOException {
CollectionStatistics collectionStatistics = aggregatedDfs.fieldStatistics().get(field);
if (collectionStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.collectionStatistics(field);
}
return collectionStatistics;
}
public int maxDoc() {
return this.maxDoc;
}
@Override
public Document doc(int i) {
throw new UnsupportedOperationException();
}
@Override
public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Explanation explain(Weight weight, int doc) {
throw new UnsupportedOperationException();
}
@Override
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
throw new UnsupportedOperationException();
}
}

View File

@ -20,15 +20,13 @@
package org.elasticsearch.search.internal;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Explanation;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.*;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.CachedDfSource;
import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException;
@ -46,21 +44,23 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
private final SearchContext searchContext;
private CachedDfSource dfSource;
private AggregatedDfs aggregatedDfs;
public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) {
super(searcher.reader());
in = searcher.searcher();
this.searchContext = searchContext;
setSimilarity(searcher.searcher().getSimilarity(true));
setQueryCache(searchContext.indexShard().indexService().cache().query());
setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy());
}
@Override
public void close() {
}
public void dfSource(CachedDfSource dfSource) {
this.dfSource = dfSource;
public void setAggregatedDfs(AggregatedDfs aggregatedDfs) {
this.aggregatedDfs = aggregatedDfs;
}
@Override
@ -75,10 +75,12 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
@Override
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
// During tests we prefer to use the wrapped IndexSearcher, because then we use the AssertingIndexSearcher
// it is hacky, because if we perform a dfs search, we don't use the wrapped IndexSearcher...
try {
// if scores are needed and we have dfs data then use it
if (dfSource != null && needsScores) {
return dfSource.createNormalizedWeight(query, needsScores);
if (aggregatedDfs != null && needsScores) {
return super.createNormalizedWeight(query, needsScores);
}
return in.createNormalizedWeight(query, needsScores);
} catch (Throwable t) {
@ -104,4 +106,32 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
searchContext.clearReleasables(Lifetime.COLLECTION);
}
}
@Override
public TermStatistics termStatistics(Term term, TermContext context) throws IOException {
if (aggregatedDfs == null) {
// we are either executing the dfs phase or the search_type doesn't include the dfs phase.
return super.termStatistics(term, context);
}
TermStatistics termStatistics = aggregatedDfs.termStatistics().get(term);
if (termStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.termStatistics(term, context);
}
return termStatistics;
}
@Override
public CollectionStatistics collectionStatistics(String field) throws IOException {
if (aggregatedDfs == null) {
// we are either executing the dfs phase or the search_type doesn't include the dfs phase.
return super.collectionStatistics(field);
}
CollectionStatistics collectionStatistics = aggregatedDfs.fieldStatistics().get(field);
if (collectionStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.collectionStatistics(field);
}
return collectionStatistics;
}
}

View File

@ -48,7 +48,6 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult;
@ -97,7 +96,7 @@ public class DefaultSearchContext extends SearchContext {
// terminate after count
private int terminateAfter = DEFAULT_TERMINATE_AFTER;
private List<String> groupStats;
private Scroll scroll;
private ScrollContext scrollContext;
private boolean explain;
private boolean version = false; // by default, we don't return versions
private List<String> fieldNames;
@ -289,13 +288,13 @@ public class DefaultSearchContext extends SearchContext {
}
@Override
public Scroll scroll() {
return this.scroll;
public ScrollContext scrollContext() {
return this.scrollContext;
}
@Override
public SearchContext scroll(Scroll scroll) {
this.scroll = scroll;
public SearchContext scrollContext(ScrollContext scrollContext) {
this.scrollContext = scrollContext;
return this;
}
@ -651,16 +650,6 @@ public class DefaultSearchContext extends SearchContext {
this.keepAlive = keepAlive;
}
@Override
public void lastEmittedDoc(ScoreDoc doc) {
this.lastEmittedDoc = doc;
}
@Override
public ScoreDoc lastEmittedDoc() {
return lastEmittedDoc;
}
@Override
public SearchLookup lookup() {
// TODO: The types should take into account the parsing context in QueryParserContext...

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchType;
@ -42,7 +41,6 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult;
@ -154,13 +152,13 @@ public abstract class FilteredSearchContext extends SearchContext {
}
@Override
public Scroll scroll() {
return in.scroll();
public ScrollContext scrollContext() {
return in.scrollContext();
}
@Override
public SearchContext scroll(Scroll scroll) {
return in.scroll(scroll);
public SearchContext scrollContext(ScrollContext scroll) {
return in.scrollContext(scroll);
}
@Override
@ -483,16 +481,6 @@ public abstract class FilteredSearchContext extends SearchContext {
in.keepAlive(keepAlive);
}
@Override
public void lastEmittedDoc(ScoreDoc doc) {
in.lastEmittedDoc(doc);
}
@Override
public ScoreDoc lastEmittedDoc() {
return in.lastEmittedDoc();
}
@Override
public SearchLookup lookup() {
return in.lookup();

View File

@ -17,20 +17,17 @@
* under the License.
*/
package org.elasticsearch.common.inject;
package org.elasticsearch.search.internal;
/**
* This interface can be added to a Module to spawn sub modules. DO NOT USE.
*
* This is fundamentally broken.
* <ul>
* <li>If you have a plugin with multiple modules, return all the modules at once.</li>
* <li>If you are trying to make the implementation of a module "pluggable", don't do it.
* This is not extendable because custom implementations (using onModule) cannot be
* registered before spawnModules() is called.</li>
* </ul>
*/
public interface SpawnModules {
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.search.Scroll;
/** Wrapper around information that needs to stay around when scrolling. */
public class ScrollContext {
public int totalHits = -1;
public float maxScore;
public ScoreDoc lastEmittedDoc;
public Scroll scroll;
Iterable<? extends Module> spawnModules();
}

View File

@ -24,7 +24,6 @@ import com.google.common.collect.MultimapBuilder;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchType;
@ -159,9 +158,9 @@ public abstract class SearchContext implements Releasable, HasContextAndHeaders
protected abstract long nowInMillisImpl();
public abstract Scroll scroll();
public abstract ScrollContext scrollContext();
public abstract SearchContext scroll(Scroll scroll);
public abstract SearchContext scrollContext(ScrollContext scroll);
public abstract SearchContextAggregations aggregations();
@ -303,10 +302,6 @@ public abstract class SearchContext implements Releasable, HasContextAndHeaders
public abstract void keepAlive(long keepAlive);
public abstract void lastEmittedDoc(ScoreDoc doc);
public abstract ScoreDoc lastEmittedDoc();
public abstract SearchLookup lookup();
public abstract DfsSearchResult dfsResult();

View File

@ -20,13 +20,10 @@ package org.elasticsearch.search.internal;
import com.google.common.collect.Lists;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
@ -101,7 +98,7 @@ public class SubSearchContext extends FilteredSearchContext {
}
@Override
public SearchContext scroll(Scroll scroll) {
public SearchContext scrollContext(ScrollContext scrollContext) {
throw new UnsupportedOperationException("Not supported");
}
@ -304,11 +301,6 @@ public class SubSearchContext extends FilteredSearchContext {
throw new UnsupportedOperationException("Not supported");
}
@Override
public void lastEmittedDoc(ScoreDoc doc) {
throw new UnsupportedOperationException("Not supported");
}
@Override
public QuerySearchResult queryResult() {
return querySearchResult;

View File

@ -21,12 +21,16 @@ package org.elasticsearch.search.query;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.queries.MinDocQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TimeLimitingCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopDocsCollector;
@ -43,8 +47,8 @@ import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchPhase;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.rescore.RescorePhase;
import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.scan.ScanContext.ScanCollector;
@ -52,7 +56,6 @@ import org.elasticsearch.search.sort.SortParseElement;
import org.elasticsearch.search.sort.TrackScoresParseElement;
import org.elasticsearch.search.suggest.SuggestPhase;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -115,6 +118,7 @@ public class QueryPhase implements SearchPhase {
searchContext.queryResult().searchTimedOut(false);
final SearchType searchType = searchContext.searchType();
boolean rescore = false;
try {
searchContext.queryResult().from(searchContext.from());
@ -138,7 +142,7 @@ public class QueryPhase implements SearchPhase {
return new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0);
}
};
} else if (searchContext.searchType() == SearchType.SCAN) {
} else if (searchType == SearchType.SCAN) {
query = searchContext.scanContext().wrapQuery(query);
final ScanCollector scanCollector = searchContext.scanContext().collector(searchContext);
collector = scanCollector;
@ -150,11 +154,32 @@ public class QueryPhase implements SearchPhase {
};
} else {
// Perhaps have a dedicated scroll phase?
final ScrollContext scrollContext = searchContext.scrollContext();
assert (scrollContext != null) == (searchContext.request().scroll() != null);
final TopDocsCollector<?> topDocsCollector;
ScoreDoc lastEmittedDoc;
if (searchContext.request().scroll() != null) {
numDocs = Math.min(searchContext.size(), totalNumDocs);
lastEmittedDoc = searchContext.lastEmittedDoc();
lastEmittedDoc = scrollContext.lastEmittedDoc;
if (Sort.INDEXORDER.equals(searchContext.sort())) {
if (scrollContext.totalHits == -1) {
// first round
assert scrollContext.lastEmittedDoc == null;
// there is not much that we can optimize here since we want to collect all
// documents in order to get the total number of hits
} else {
// now this gets interesting: since we sort in index-order, we can directly
// skip to the desired doc and stop collecting after ${size} matches
if (scrollContext.lastEmittedDoc != null) {
BooleanQuery bq = new BooleanQuery();
bq.add(query, Occur.MUST);
bq.add(new MinDocQuery(lastEmittedDoc.doc + 1), Occur.FILTER);
query = bq;
}
searchContext.terminateAfter(numDocs);
}
}
} else {
lastEmittedDoc = null;
}
@ -177,7 +202,31 @@ public class QueryPhase implements SearchPhase {
topDocsCallable = new Callable<TopDocs>() {
@Override
public TopDocs call() throws Exception {
return topDocsCollector.topDocs();
TopDocs topDocs = topDocsCollector.topDocs();
if (scrollContext != null) {
if (scrollContext.totalHits == -1) {
// first round
scrollContext.totalHits = topDocs.totalHits;
scrollContext.maxScore = topDocs.getMaxScore();
} else {
// subsequent round: the total number of hits and
// the maximum score were computed on the first round
topDocs.totalHits = scrollContext.totalHits;
topDocs.setMaxScore(scrollContext.maxScore);
}
switch (searchType) {
case QUERY_AND_FETCH:
case DFS_QUERY_AND_FETCH:
// for (DFS_)QUERY_AND_FETCH, we already know the last emitted doc
if (topDocs.scoreDocs.length > 0) {
// set the last emitted doc
scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1];
}
default:
break;
}
}
return topDocs;
}
};
}
@ -227,19 +276,7 @@ public class QueryPhase implements SearchPhase {
searchContext.queryResult().terminatedEarly(false);
}
final TopDocs topDocs = topDocsCallable.call();
if (searchContext.request().scroll() != null) {
int size = topDocs.scoreDocs.length;
if (size > 0) {
// In the case of *QUERY_AND_FETCH we don't get back to shards telling them which least
// relevant docs got emitted as hit, we can simply mark the last doc as last emitted
if (searchContext.searchType() == SearchType.QUERY_AND_FETCH ||
searchContext.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
searchContext.lastEmittedDoc(topDocs.scoreDocs[size - 1]);
}
}
}
searchContext.queryResult().topDocs(topDocs);
searchContext.queryResult().topDocs(topDocsCallable.call());
} catch (Throwable e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e);
}

View File

@ -20,18 +20,13 @@
package org.elasticsearch.search.scan;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.MinDocQuery;
import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.search.internal.SearchContext;
@ -118,93 +113,4 @@ public class ScanContext {
}
}
/**
* A filtering query that matches all doc IDs that are not deleted and
* greater than or equal to the configured doc ID.
*/
// pkg-private for testing
static class MinDocQuery extends Query {
private final int minDoc;
MinDocQuery(int minDoc) {
this.minDoc = minDoc;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + minDoc;
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
MinDocQuery that = (MinDocQuery) obj;
return minDoc == that.minDoc;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final int maxDoc = context.reader().maxDoc();
if (context.docBase + maxDoc <= minDoc) {
return null;
}
final int segmentMinDoc = Math.max(0, minDoc - context.docBase);
final DocIdSetIterator disi = new DocIdSetIterator() {
int doc = -1;
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
assert target > doc;
if (doc == -1) {
// skip directly to minDoc
doc = Math.max(target, segmentMinDoc);
} else {
doc = target;
}
while (doc < maxDoc) {
if (acceptDocs == null || acceptDocs.get(doc)) {
break;
}
doc += 1;
}
if (doc >= maxDoc) {
doc = NO_MORE_DOCS;
}
return doc;
}
@Override
public long cost() {
return maxDoc - minDoc;
}
};
return new ConstantScoreScorer(this, score(), disi);
}
};
}
@Override
public String toString(String field) {
return "MinDocQuery(minDoc=" + minDoc + ")";
}
}
}

View File

@ -727,7 +727,7 @@ public class ThreadPool extends AbstractComponent {
if (queueSize == null) {
builder.field(Fields.QUEUE_SIZE, -1);
} else {
builder.field(Fields.QUEUE_SIZE, queueSize.toString());
builder.field(Fields.QUEUE_SIZE, queueSize.singles());
}
builder.endObject();
return builder;

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import java.io.IOException;
import java.util.List;
import java.util.Map;
/**
@ -32,6 +33,7 @@ import java.util.Map;
*/
public interface Transport extends LifecycleComponent<Transport> {
public static class TransportSettings {
public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress";
}
@ -52,7 +54,7 @@ public interface Transport extends LifecycleComponent<Transport> {
/**
* Returns an address from its string representation.
*/
TransportAddress[] addressesFromString(String address) throws Exception;
TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception;
/**
* Is the address type supported.
@ -89,4 +91,6 @@ public interface Transport extends LifecycleComponent<Transport> {
* Returns count of currently open connections
*/
long serverOpen();
List<String> getLocalAddresses();
}

View File

@ -92,7 +92,6 @@ public class TransportModule extends AbstractModule {
}
bind(NamedWriteableRegistry.class).asEagerSingleton();
if (configuredTransport != null) {
logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource);
bind(Transport.class).to(configuredTransport).asEagerSingleton();

View File

@ -40,10 +40,7 @@ import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.*;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
@ -221,6 +218,10 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
return transport.boundAddress();
}
public List<String> getLocalAddresses() {
return transport.getLocalAddresses();
}
public boolean nodeConnected(DiscoveryNode node) {
return node.equals(localNode) || transport.nodeConnected(node);
}
@ -383,8 +384,8 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
return requestIds.getAndIncrement();
}
public TransportAddress[] addressesFromString(String address) throws Exception {
return transport.addressesFromString(address);
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
return transport.addressesFromString(address, perAddressLimit);
}
/**

View File

@ -41,8 +41,7 @@ import org.elasticsearch.transport.*;
import org.elasticsearch.transport.support.TransportStatus;
import java.io.IOException;
import java.util.Collections;
import java.util.Map;
import java.util.*;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
@ -57,14 +56,13 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
public class LocalTransport extends AbstractLifecycleComponent<Transport> implements Transport {
public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport";
private final ThreadPool threadPool;
private final ThreadPoolExecutor workers;
private final Version version;
private volatile TransportServiceAdapter transportServiceAdapter;
private volatile BoundTransportAddress boundAddress;
private volatile LocalTransportAddress localAddress;
private final static ConcurrentMap<TransportAddress, LocalTransport> transports = newConcurrentMap();
private final static ConcurrentMap<LocalTransportAddress, LocalTransport> transports = newConcurrentMap();
private static final AtomicLong transportAddressIdGenerator = new AtomicLong();
private final ConcurrentMap<DiscoveryNode, LocalTransport> connectedNodes = newConcurrentMap();
private final NamedWriteableRegistry namedWriteableRegistry;
@ -78,7 +76,6 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
super(settings);
this.threadPool = threadPool;
this.version = version;
int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings));
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
@ -88,7 +85,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
}
@Override
public TransportAddress[] addressesFromString(String address) {
public TransportAddress[] addressesFromString(String address, int perAddressLimit) {
return new TransportAddress[]{new LocalTransportAddress(address)};
}
@ -359,4 +356,9 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
logger.error("failed to handle exception response [{}]", t, handler);
}
}
@Override
public List<String> getLocalAddresses() {
return Collections.singletonList("0.0.0.0");
}
}

View File

@ -21,8 +21,8 @@ package org.elasticsearch.transport.netty;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -41,6 +41,7 @@ import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings;
@ -74,6 +75,7 @@ import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.net.UnknownHostException;
import java.nio.channels.CancelledKeyException;
import java.util.*;
import java.util.concurrent.*;
@ -81,6 +83,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.*;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@ -403,6 +407,13 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} catch (IOException e) {
throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e);
}
if (logger.isDebugEnabled()) {
String[] addresses = new String[hostAddresses.length];
for (int i = 0; i < hostAddresses.length; i++) {
addresses[i] = NetworkAddress.format(hostAddresses[i]);
}
logger.debug("binding server bootstrap to: {}", addresses);
}
for (InetAddress hostAddress : hostAddresses) {
bindServerBootstrap(name, hostAddress, settings);
}
@ -413,7 +424,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
String port = settings.get("port");
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<SocketAddress> boundSocket = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
@ -426,7 +437,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
serverChannels.put(name, list);
}
list.add(channel);
boundSocket.set(channel.getLocalAddress());
boundSocket.set((InetSocketAddress)channel.getLocalAddress());
}
} catch (Exception e) {
lastException.set(e);
@ -440,7 +451,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
if (!DEFAULT_PROFILE.equals(name)) {
InetSocketAddress boundAddress = (InetSocketAddress) boundSocket.get();
InetSocketAddress boundAddress = boundSocket.get();
int publishPort = settings.getAsInt("publish_port", boundAddress.getPort());
String publishHost = settings.get("publish_host", boundAddress.getHostString());
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
@ -448,7 +459,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
profileBoundAddresses.putIfAbsent(name, new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)));
}
logger.info("Bound profile [{}] to address [{}]", name, boundSocket.get());
logger.info("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get()));
}
private void createServerBootstrap(String name, Settings settings) {
@ -496,7 +507,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("reuseAddress", reuseAddress);
serverBootstrap.setOption("child.reuseAddress", reuseAddress);
serverBootstraps.put(name, serverBootstrap);
}
@ -579,35 +589,65 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
@Override
public TransportAddress[] addressesFromString(String address) throws Exception {
int index = address.indexOf('[');
if (index != -1) {
String host = address.substring(0, index);
Set<String> ports = Strings.commaDelimitedListToSet(address.substring(index + 1, address.indexOf(']')));
List<TransportAddress> addresses = Lists.newArrayList();
for (String port : ports) {
int[] iPorts = new PortsRange(port).ports();
for (int iPort : iPorts) {
addresses.add(new InetSocketTransportAddress(host, iPort));
}
}
return addresses.toArray(new TransportAddress[addresses.size()]);
public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
return parse(address, settings.get("transport.profiles.default.port",
settings.get("transport.netty.port",
settings.get("transport.tcp.port",
DEFAULT_PORT_RANGE))), perAddressLimit);
}
// this code is a take on guava's HostAndPort, like a HostAndPortRange
// pattern for validating ipv6 bracked addresses.
// not perfect, but PortsRange should take care of any port range validation, not a regex
private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$");
/** parse a hostname+port range spec into its equivalent addresses */
static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException {
Objects.requireNonNull(hostPortString);
String host;
String portString = null;
if (hostPortString.startsWith("[")) {
// Parse a bracketed host, typically an IPv6 literal.
Matcher matcher = BRACKET_PATTERN.matcher(hostPortString);
if (!matcher.matches()) {
throw new IllegalArgumentException("Invalid bracketed host/port range: " + hostPortString);
}
host = matcher.group(1);
portString = matcher.group(2); // could be null
} else {
index = address.lastIndexOf(':');
if (index == -1) {
List<TransportAddress> addresses = Lists.newArrayList();
String defaultPort = settings.get("transport.profiles.default.port", settings.get("transport.netty.port", this.settings.get("transport.tcp.port", DEFAULT_PORT_RANGE)));
int[] iPorts = new PortsRange(defaultPort).ports();
for (int iPort : iPorts) {
addresses.add(new InetSocketTransportAddress(address, iPort));
}
return addresses.toArray(new TransportAddress[addresses.size()]);
} else {
String host = address.substring(0, index);
int port = Integer.parseInt(address.substring(index + 1));
return new TransportAddress[]{new InetSocketTransportAddress(host, port)};
int colonPos = hostPortString.indexOf(':');
if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) {
// Exactly 1 colon. Split into host:port.
host = hostPortString.substring(0, colonPos);
portString = hostPortString.substring(colonPos + 1);
} else {
// 0 or 2+ colons. Bare hostname or IPv6 literal.
host = hostPortString;
// 2+ colons and not bracketed: exception
if (colonPos >= 0) {
throw new IllegalArgumentException("IPv6 addresses must be bracketed: " + hostPortString);
}
}
}
// if port isn't specified, fill with the default
if (portString == null || portString.isEmpty()) {
portString = defaultPortRange;
}
// generate address for each port in the range
Set<InetAddress> addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host)));
List<TransportAddress> transportAddresses = new ArrayList<>();
int[] ports = new PortsRange(portString).ports();
int limit = Math.min(ports.length, perAddressLimit);
for (int i = 0; i < limit; i++) {
for (InetAddress address : addresses) {
transportAddresses.add(new InetSocketTransportAddress(address, ports[i]));
}
}
return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]);
}
@Override
@ -670,6 +710,17 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
return channels == null ? 0 : channels.numberOfOpenChannels();
}
@Override
public List<String> getLocalAddresses() {
List<String> local = new ArrayList<>();
local.add("127.0.0.1");
// check if v6 is supported, if so, v4 will also work via mapped addresses.
if (NetworkUtils.SUPPORTS_V6) {
local.add("[::1]"); // may get ports appended!
}
return local;
}
@Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -4,29 +4,30 @@ NAME
SYNOPSIS
plugin install <name>
plugin install <name or url>
DESCRIPTION
This command installs an elasticsearch plugin
This command installs an elasticsearch plugin. It can be used as follows:
<name> can be one of the official plugins, or refer to a github repository, or to one of the official plugins
Officially supported or commercial plugins require just the plugin name:
The notation of just specifying a plugin name, downloads an officially supported plugin.
plugin install analysis-icu
plugin install shield
The notation of 'elasticsearch/plugin/version' allows to easily download a commercial elastic plugin.
Plugins from GitHub require 'username/repository' or 'username/repository/version':
The notation of 'groupId/artifactId/version' refers to community plugins using maven central or sonatype
plugin install lmenezes/elasticsearch-kopf
plugin install lmenezes/elasticsearch-kopf/1.5.7
The notation of 'username/repository' refers to a github repository.
Plugins from Maven Central or Sonatype require 'groupId/artifactId/version':
EXAMPLES
plugin install org.elasticsearch/elasticsearch-mapper-attachments/2.6.0
plugin install analysis-kuromoji
Plugins can be installed from a custom URL or file location as follows:
plugin install elasticsearch/shield/latest
plugin install lmenezes/elasticsearch-kopf
plugin install http://some.domain.name//my-plugin-1.0.0.zip
plugin install file:/path/to/my-plugin-1.0.0.zip
OFFICIAL PLUGINS
@ -41,6 +42,7 @@ OFFICIAL PLUGINS
- cloud-azure
- cloud-gce
- delete-by-query
- discovery-multicast
- lang-javascript
- lang-python
- mapper-murmur3
@ -49,8 +51,6 @@ OFFICIAL PLUGINS
OPTIONS
-u,--url URL to retrieve the plugin from
-t,--timeout Timeout until the plugin download is abort
-v,--verbose Verbose output

View File

@ -0,0 +1,61 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.queries;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.store.Directory;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
public class MinDocQueryTests extends ESTestCase {
public void testBasics() {
MinDocQuery query1 = new MinDocQuery(42);
MinDocQuery query2 = new MinDocQuery(42);
MinDocQuery query3 = new MinDocQuery(43);
QueryUtils.check(query1);
QueryUtils.checkEqual(query1, query2);
QueryUtils.checkUnequal(query1, query3);
}
public void testRandom() throws IOException {
final int numDocs = randomIntBetween(10, 200);
final Document doc = new Document();
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(doc);
}
final IndexReader reader = w.getReader();
final IndexSearcher searcher = newSearcher(reader);
for (int i = 0; i <= numDocs; ++i) {
assertEquals(numDocs - i, searcher.count(new MinDocQuery(i)));
}
w.close();
reader.close();
dir.close();
}
}

View File

@ -57,7 +57,7 @@ import java.util.Collections;
import static org.hamcrest.Matchers.equalTo;
public class ESExceptionTests extends ESTestCase {
private static final ToXContent.Params PARAMS = new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true"));
private static final ToXContent.Params PARAMS = ToXContent.EMPTY_PARAMS;
@Test
public void testStatus() {

View File

@ -526,7 +526,7 @@ public class ExceptionSerializationTests extends ESTestCase {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject();
x.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true")));
x.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
@ -607,4 +607,20 @@ public class ExceptionSerializationTests extends ESTestCase {
assertEquals(ex.status(), e.status());
assertEquals(RestStatus.UNAUTHORIZED, e.status());
}
public void testInterruptedException() throws IOException {
InterruptedException orig = randomBoolean() ? new InterruptedException("boom") : new InterruptedException();
InterruptedException ex = serialize(orig);
assertEquals(orig.getMessage(), ex.getMessage());
}
public static class UnknownException extends Exception {
public UnknownException(String message) {
super(message);
}
public UnknownException(String message, Throwable cause) {
super(message, cause);
}
}
}

Some files were not shown because too many files have changed in this diff Show More