Merge branch 'master' into lists_are_simple

This commit is contained in:
Ryan Ernst 2015-08-24 19:06:58 -07:00
commit 311faa822a
245 changed files with 4362 additions and 2755 deletions

View File

@ -0,0 +1,119 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.queries;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import java.io.IOException;
/** A {@link Query} that only matches documents that are greater than or equal
* to a configured doc ID. */
public final class MinDocQuery extends Query {
private final int minDoc;
/** Sole constructor. */
public MinDocQuery(int minDoc) {
this.minDoc = minDoc;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + minDoc;
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
MinDocQuery that = (MinDocQuery) obj;
return minDoc == that.minDoc;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final int maxDoc = context.reader().maxDoc();
if (context.docBase + maxDoc <= minDoc) {
return null;
}
final int segmentMinDoc = Math.max(0, minDoc - context.docBase);
final DocIdSetIterator disi = new DocIdSetIterator() {
int doc = -1;
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
assert target > doc;
if (doc == -1) {
// skip directly to minDoc
doc = Math.max(target, segmentMinDoc);
} else {
doc = target;
}
while (doc < maxDoc) {
if (acceptDocs == null || acceptDocs.get(doc)) {
break;
}
doc += 1;
}
if (doc >= maxDoc) {
doc = NO_MORE_DOCS;
}
return doc;
}
@Override
public long cost() {
return maxDoc - segmentMinDoc;
}
};
return new ConstantScoreScorer(this, score(), disi);
}
};
}
@Override
public String toString(String field) {
return "MinDocQuery(minDoc=" + minDoc + ")";
}
}

View File

@ -45,8 +45,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip"; public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip";
public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip"; public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip";
private static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = false; public static final boolean REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT = true;
private static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false; public static final boolean REST_EXCEPTION_SKIP_CAUSE_DEFAULT = false;
private static final String INDEX_HEADER_KEY = "es.index"; private static final String INDEX_HEADER_KEY = "es.index";
private static final String SHARD_HEADER_KEY = "es.shard"; private static final String SHARD_HEADER_KEY = "es.shard";
private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type"; private static final String RESOURCE_HEADER_TYPE_KEY = "es.resource.type";

View File

@ -19,45 +19,7 @@
package org.elasticsearch.action; package org.elasticsearch.action;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.ValidationException;
public class ActionRequestValidationException extends ValidationException {
import java.util.ArrayList;
import java.util.List;
/**
*
*/
public class ActionRequestValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ActionRequestValidationException() {
super("validation failed");
}
public void addValidationError(String error) {
validationErrors.add(error);
}
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
} }

View File

@ -54,7 +54,9 @@ public enum SearchType {
/** /**
* Performs scanning of the results which executes the search without any sorting. * Performs scanning of the results which executes the search without any sorting.
* It will automatically start scrolling the result set. * It will automatically start scrolling the result set.
* @deprecated will be removed in 3.0, you should do a regular scroll instead, ordered by `_doc`
*/ */
@Deprecated
SCAN((byte) 4), SCAN((byte) 4),
/** /**
* Only counts the results, will still execute aggregations and the like. * Only counts the results, will still execute aggregations and the like.
@ -69,6 +71,7 @@ public enum SearchType {
public static final SearchType DEFAULT = QUERY_THEN_FETCH; public static final SearchType DEFAULT = QUERY_THEN_FETCH;
private static final ParseField COUNT_VALUE = new ParseField("count").withAllDeprecated("query_then_fetch"); private static final ParseField COUNT_VALUE = new ParseField("count").withAllDeprecated("query_then_fetch");
private static final ParseField SCAN_VALUE = new ParseField("scan").withAllDeprecated("query_then_fetch sorting on `_doc`");
private byte id; private byte id;
@ -121,7 +124,7 @@ public enum SearchType {
return SearchType.QUERY_THEN_FETCH; return SearchType.QUERY_THEN_FETCH;
} else if ("query_and_fetch".equals(searchType)) { } else if ("query_and_fetch".equals(searchType)) {
return SearchType.QUERY_AND_FETCH; return SearchType.QUERY_AND_FETCH;
} else if ("scan".equals(searchType)) { } else if (parseFieldMatcher.match(searchType, SCAN_VALUE)) {
return SearchType.SCAN; return SearchType.SCAN;
} else if (parseFieldMatcher.match(searchType, COUNT_VALUE)) { } else if (parseFieldMatcher.match(searchType, COUNT_VALUE)) {
return SearchType.COUNT; return SearchType.COUNT;

View File

@ -40,6 +40,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId; import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
@Deprecated // remove in 3.0
public class TransportSearchScanAction extends TransportSearchTypeAction { public class TransportSearchScanAction extends TransportSearchTypeAction {
@Inject @Inject

View File

@ -21,15 +21,12 @@ package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.PidFile; import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool; import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
@ -44,16 +41,14 @@ import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.util.Locale; import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import static com.google.common.collect.Sets.newHashSet;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
/** /**
* A main entry point when starting from the command line. * Internal startup code.
*/ */
public class Bootstrap { final class Bootstrap {
private static volatile Bootstrap INSTANCE; private static volatile Bootstrap INSTANCE;
@ -137,10 +132,6 @@ public class Bootstrap {
OsProbe.getInstance(); OsProbe.getInstance();
} }
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception {
initializeNatives(settings.getAsBoolean("bootstrap.mlockall", false), initializeNatives(settings.getAsBoolean("bootstrap.mlockall", false),
settings.getAsBoolean("bootstrap.ctrlhandler", true)); settings.getAsBoolean("bootstrap.ctrlhandler", true));
@ -222,7 +213,11 @@ public class Bootstrap {
} }
} }
public static void main(String[] args) throws Throwable { /**
* This method is invoked by {@link Elasticsearch#main(String[])}
* to startup elasticsearch.
*/
static void init(String[] args) throws Throwable {
BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser(); BootstrapCLIParser bootstrapCLIParser = new BootstrapCLIParser();
CliTool.ExitStatus status = bootstrapCLIParser.execute(args); CliTool.ExitStatus status = bootstrapCLIParser.execute(args);
@ -277,11 +272,19 @@ public class Bootstrap {
closeSysError(); closeSysError();
} }
} catch (Throwable e) { } catch (Throwable e) {
// disable console logging, so user does not see the exception twice (jvm will show it already)
if (foreground) {
Loggers.disableConsoleLogging();
}
ESLogger logger = Loggers.getLogger(Bootstrap.class); ESLogger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) { if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("name")); logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("name"));
} }
logger.error("Exception", e); logger.error("Exception", e);
// re-enable it if appropriate, so they can see any logging during the shutdown process
if (foreground) {
Loggers.enableConsoleLogging();
}
throw e; throw e;
} }

View File

@ -38,7 +38,7 @@ import java.util.Properties;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder; import static org.elasticsearch.common.cli.CliToolConfig.Builder.optionBuilder;
public class BootstrapCLIParser extends CliTool { final class BootstrapCLIParser extends CliTool {
private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class) private static final CliToolConfig CONFIG = CliToolConfig.config("elasticsearch", BootstrapCLIParser.class)
.cmds(Start.CMD, Version.CMD) .cmds(Start.CMD, Version.CMD)

View File

@ -0,0 +1,46 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
/**
* Exposes system startup information
*/
public final class BootstrapInfo {
/** no instantiation */
private BootstrapInfo() {}
/**
* Returns true if we successfully loaded native libraries.
* <p>
* If this returns false, then native operations such as locking
* memory did not work.
*/
public static boolean isNativesAvailable() {
return Natives.JNA_AVAILABLE;
}
/**
* Returns true if we were able to lock the process's address space.
*/
public static boolean isMemoryLocked() {
return Natives.isMemoryLocked();
}
}

View File

@ -20,11 +20,23 @@
package org.elasticsearch.bootstrap; package org.elasticsearch.bootstrap;
/** /**
* A wrapper around {@link Bootstrap} just so the process will look nicely on things like jps. * This class starts elasticsearch.
*/ */
public class Elasticsearch extends Bootstrap { public final class Elasticsearch {
public static void main(String[] args) throws Throwable { /** no instantiation */
Bootstrap.main(args); private Elasticsearch() {}
/**
* Main entry point for starting elasticsearch
*/
public static void main(String[] args) throws StartupError {
try {
Bootstrap.init(args);
} catch (Throwable t) {
// format exceptions to the console in a special way
// to avoid 2MB stacktraces from guice, etc.
throw new StartupError(t);
}
} }
} }

View File

@ -59,7 +59,7 @@ final class JNACLibrary {
public long rlim_max = 0; public long rlim_max = 0;
@Override @Override
protected List getFieldOrder() { protected List<String> getFieldOrder() {
return Arrays.asList(new String[] { "rlim_cur", "rlim_max" }); return Arrays.asList(new String[] { "rlim_cur", "rlim_max" });
} }
} }

View File

@ -35,7 +35,7 @@ import java.util.List;
/** /**
* Library for Windows/Kernel32 * Library for Windows/Kernel32
*/ */
class JNAKernel32Library { final class JNAKernel32Library {
private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class); private static final ESLogger logger = Loggers.getLogger(JNAKernel32Library.class);
@ -148,7 +148,7 @@ class JNAKernel32Library {
public NativeLong Type; public NativeLong Type;
@Override @Override
protected List getFieldOrder() { protected List<String> getFieldOrder() {
return Arrays.asList(new String[]{"BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"}); return Arrays.asList(new String[]{"BaseAddress", "AllocationBase", "AllocationProtect", "RegionSize", "State", "Protect", "Type"});
} }
} }

View File

@ -34,10 +34,13 @@ import static org.elasticsearch.bootstrap.JNAKernel32Library.SizeT;
*/ */
class JNANatives { class JNANatives {
/** no instantiation */
private JNANatives() {}
private static final ESLogger logger = Loggers.getLogger(JNANatives.class); private static final ESLogger logger = Loggers.getLogger(JNANatives.class);
// Set to true, in case native mlockall call was successful // Set to true, in case native mlockall call was successful
public static boolean LOCAL_MLOCKALL = false; static boolean LOCAL_MLOCKALL = false;
static void tryMlockall() { static void tryMlockall() {
int errno = Integer.MIN_VALUE; int errno = Integer.MIN_VALUE;
@ -72,16 +75,18 @@ class JNANatives {
} }
// mlockall failed for some reason // mlockall failed for some reason
logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg + ". This can result in part of the JVM being swapped out."); logger.warn("Unable to lock JVM Memory: error=" + errno + ",reason=" + errMsg);
logger.warn("This can result in part of the JVM being swapped out.");
if (errno == JNACLibrary.ENOMEM) { if (errno == JNACLibrary.ENOMEM) {
if (rlimitSuccess) { if (rlimitSuccess) {
logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit)); logger.warn("Increase RLIMIT_MEMLOCK, soft limit: " + rlimitToString(softLimit) + ", hard limit: " + rlimitToString(hardLimit));
if (Constants.LINUX) { if (Constants.LINUX) {
// give specific instructions for the linux case to make it easy // give specific instructions for the linux case to make it easy
String user = System.getProperty("user.name");
logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" + logger.warn("These can be adjusted by modifying /etc/security/limits.conf, for example: \n" +
"\t# allow user 'esuser' mlockall\n" + "\t# allow user '" + user + "' mlockall\n" +
"\tesuser soft memlock unlimited\n" + "\t" + user + " soft memlock unlimited\n" +
"\tesuser hard memlock unlimited" "\t" + user + " hard memlock unlimited"
); );
logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect."); logger.warn("If you are logged in interactively, you will have to re-login for the new limits to take effect.");
} }

View File

@ -29,6 +29,8 @@ import java.util.Map;
/** Checks that the JVM is ok and won't cause index corruption */ /** Checks that the JVM is ok and won't cause index corruption */
final class JVMCheck { final class JVMCheck {
/** no instantiation */
private JVMCheck() {}
/** /**
* URL with latest JVM recommendations * URL with latest JVM recommendations

View File

@ -37,15 +37,30 @@ import java.util.Arrays;
import java.util.Enumeration; import java.util.Enumeration;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.jar.JarEntry; import java.util.jar.JarEntry;
import java.util.jar.JarFile; import java.util.jar.JarFile;
import java.util.jar.Manifest; import java.util.jar.Manifest;
/** Simple check for duplicate class files across the classpath */ /**
* Simple check for duplicate class files across the classpath.
* <p>
* This class checks for incompatibilities in the following ways:
* <ul>
* <li>Checks that class files are not duplicated across jars.</li>
* <li>Checks any {@code X-Compile-Target-JDK} value in the jar
* manifest is compatible with current JRE</li>
* <li>Checks any {@code X-Compile-Elasticsearch-Version} value in
* the jar manifest is compatible with the current ES</li>
* </ul>
*/
public class JarHell { public class JarHell {
/** no instantiation */
private JarHell() {}
/** Simple driver class, can be used eg. from builds. Returns non-zero on jar-hell */ /** Simple driver class, can be used eg. from builds. Returns non-zero on jar-hell */
@SuppressForbidden(reason = "command line tool") @SuppressForbidden(reason = "command line tool")
public static void main(String args[]) throws Exception { public static void main(String args[]) throws Exception {
@ -69,7 +84,7 @@ public class JarHell {
logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path")); logger.debug("sun.boot.class.path: {}", System.getProperty("sun.boot.class.path"));
logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs())); logger.debug("classloader urls: {}", Arrays.toString(((URLClassLoader)loader).getURLs()));
} }
checkJarHell(((URLClassLoader)loader).getURLs()); checkJarHell(((URLClassLoader) loader).getURLs());
} }
/** /**
@ -141,6 +156,7 @@ public class JarHell {
// give a nice error if jar requires a newer java version // give a nice error if jar requires a newer java version
String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK"); String targetVersion = manifest.getMainAttributes().getValue("X-Compile-Target-JDK");
if (targetVersion != null) { if (targetVersion != null) {
checkVersionFormat(targetVersion);
checkJavaVersion(jar.toString(), targetVersion); checkJavaVersion(jar.toString(), targetVersion);
} }
@ -153,23 +169,34 @@ public class JarHell {
} }
} }
public static void checkVersionFormat(String targetVersion) {
if (!JavaVersion.isValid(targetVersion)) {
throw new IllegalStateException(
String.format(
Locale.ROOT,
"version string must be a sequence of nonnegative decimal integers separated by \".\"'s and may have leading zeros but was %s",
targetVersion
)
);
}
}
/** /**
* Checks that the java specification version {@code targetVersion} * Checks that the java specification version {@code targetVersion}
* required by {@code resource} is compatible with the current installation. * required by {@code resource} is compatible with the current installation.
*/ */
public static void checkJavaVersion(String resource, String targetVersion) { public static void checkJavaVersion(String resource, String targetVersion) {
String systemVersion = System.getProperty("java.specification.version"); JavaVersion version = JavaVersion.parse(targetVersion);
float current = Float.POSITIVE_INFINITY; if (JavaVersion.current().compareTo(version) < 0) {
float target = Float.NEGATIVE_INFINITY; throw new IllegalStateException(
try { String.format(
current = Float.parseFloat(systemVersion); Locale.ROOT,
target = Float.parseFloat(targetVersion); "%s requires Java %s:, your system: %s",
} catch (NumberFormatException e) { resource,
// some spec changed, time for a more complex parser targetVersion,
} JavaVersion.current().toString()
if (current < target) { )
throw new IllegalStateException(resource + " requires Java " + targetVersion );
+ ", your system: " + systemVersion);
} }
} }

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.Strings;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
class JavaVersion implements Comparable<JavaVersion> {
private final List<Integer> version;
public List<Integer> getVersion() {
return Collections.unmodifiableList(version);
}
private JavaVersion(List<Integer> version) {
this.version = version;
}
public static JavaVersion parse(String value) {
if (value == null) {
throw new NullPointerException("value");
}
if ("".equals(value)) {
throw new IllegalArgumentException("value");
}
List<Integer> version = new ArrayList<>();
String[] components = value.split("\\.");
for (String component : components) {
version.add(Integer.valueOf(component));
}
return new JavaVersion(version);
}
public static boolean isValid(String value) {
return value.matches("^0*[0-9]+(\\.[0-9]+)*$");
}
private final static JavaVersion CURRENT = parse(System.getProperty("java.specification.version"));
public static JavaVersion current() {
return CURRENT;
}
@Override
public int compareTo(JavaVersion o) {
int len = Math.max(version.size(), o.version.size());
for (int i = 0; i < len; i++) {
int d = (i < version.size() ? version.get(i) : 0);
int s = (i < o.version.size() ? o.version.get(i) : 0);
if (s < d)
return 1;
if (s > d)
return -1;
}
return 0;
}
@Override
public String toString() {
return Strings.collectionToDelimitedString(version, ".");
}
}

View File

@ -26,27 +26,32 @@ import org.elasticsearch.common.logging.Loggers;
* The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on * The Natives class is a wrapper class that checks if the classes necessary for calling native methods are available on
* startup. If they are not available, this class will avoid calling code that loads these classes. * startup. If they are not available, this class will avoid calling code that loads these classes.
*/ */
class Natives { final class Natives {
/** no instantiation */
private Natives() {}
private static final ESLogger logger = Loggers.getLogger(Natives.class); private static final ESLogger logger = Loggers.getLogger(Natives.class);
// marker to determine if the JNA class files are available to the JVM // marker to determine if the JNA class files are available to the JVM
private static boolean jnaAvailable = false; static final boolean JNA_AVAILABLE;
static { static {
boolean v = false;
try { try {
// load one of the main JNA classes to see if the classes are available. this does not ensure that all native // load one of the main JNA classes to see if the classes are available. this does not ensure that all native
// libraries are available, only the ones necessary by JNA to function // libraries are available, only the ones necessary by JNA to function
Class.forName("com.sun.jna.Native"); Class.forName("com.sun.jna.Native");
jnaAvailable = true; v = true;
} catch (ClassNotFoundException e) { } catch (ClassNotFoundException e) {
logger.warn("JNA not found. native methods will be disabled.", e); logger.warn("JNA not found. native methods will be disabled.", e);
} catch (UnsatisfiedLinkError e) { } catch (UnsatisfiedLinkError e) {
logger.warn("unable to load JNA native support library, native methods will be disabled.", e); logger.warn("unable to load JNA native support library, native methods will be disabled.", e);
} }
JNA_AVAILABLE = v;
} }
static void tryMlockall() { static void tryMlockall() {
if (!jnaAvailable) { if (!JNA_AVAILABLE) {
logger.warn("cannot mlockall because JNA is not available"); logger.warn("cannot mlockall because JNA is not available");
return; return;
} }
@ -54,7 +59,7 @@ class Natives {
} }
static boolean definitelyRunningAsRoot() { static boolean definitelyRunningAsRoot() {
if (!jnaAvailable) { if (!JNA_AVAILABLE) {
logger.warn("cannot check if running as root because JNA is not available"); logger.warn("cannot check if running as root because JNA is not available");
return false; return false;
} }
@ -62,7 +67,7 @@ class Natives {
} }
static void tryVirtualLock() { static void tryVirtualLock() {
if (!jnaAvailable) { if (!JNA_AVAILABLE) {
logger.warn("cannot mlockall because JNA is not available"); logger.warn("cannot mlockall because JNA is not available");
return; return;
} }
@ -70,7 +75,7 @@ class Natives {
} }
static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) { static void addConsoleCtrlHandler(ConsoleCtrlHandler handler) {
if (!jnaAvailable) { if (!JNA_AVAILABLE) {
logger.warn("cannot register console handler because JNA is not available"); logger.warn("cannot register console handler because JNA is not available");
return; return;
} }
@ -78,7 +83,7 @@ class Natives {
} }
static boolean isMemoryLocked() { static boolean isMemoryLocked() {
if (!jnaAvailable) { if (!JNA_AVAILABLE) {
return false; return false;
} }
return JNANatives.LOCAL_MLOCKALL; return JNANatives.LOCAL_MLOCKALL;

View File

@ -38,15 +38,59 @@ import java.util.Map;
import java.util.regex.Pattern; import java.util.regex.Pattern;
/** /**
* Initializes securitymanager with necessary permissions. * Initializes SecurityManager with necessary permissions.
* <p> * <p>
* We use a template file (the one we test with), and add additional * <h1>Initialization</h1>
* permissions based on the environment (data paths, etc) * The JVM is not initially started with security manager enabled,
* instead we turn it on early in the startup process. This is a tradeoff
* between security and ease of use:
* <ul>
* <li>Assigns file permissions to user-configurable paths that can
* be specified from the command-line or {@code elasticsearch.yml}.</li>
* <li>Allows for some contained usage of native code that would not
* otherwise be permitted.</li>
* </ul>
* <p>
* <h1>Permissions</h1>
* Permissions use a policy file packaged as a resource, this file is
* also used in tests. File permissions are generated dynamically and
* combined with this policy file.
* <p>
* For each configured path, we ensure it exists and is accessible before
* granting permissions, otherwise directory creation would require
* permissions to parent directories.
* <p>
* In some exceptional cases, permissions are assigned to specific jars only,
* when they are so dangerous that general code should not be granted the
* permission, but there are extenuating circumstances.
* <p>
* Groovy scripts are assigned no permissions. This does not provide adequate
* sandboxing, as these scripts still have access to ES classes, and could
* modify members, etc that would cause bad things to happen later on their
* behalf (no package protections are yet in place, this would need some
* cleanups to the scripting apis). But still it can provide some defense for users
* that enable dynamic scripting without being fully aware of the consequences.
* <p>
* <h1>Disabling Security</h1>
* SecurityManager can be disabled completely with this setting:
* <pre>
* es.security.manager.enabled = false
* </pre>
* <p>
* <h1>Debugging Security</h1>
* A good place to start when there is a problem is to turn on security debugging:
* <pre>
* JAVA_OPTS="-Djava.security.debug=access:failure" bin/elasticsearch
* </pre>
* See <a href="https://docs.oracle.com/javase/7/docs/technotes/guides/security/troubleshooting-security.html">
* Troubleshooting Security</a> for information.
*/ */
final class Security { final class Security {
/** no instantiation */
private Security() {}
/** /**
* Initializes securitymanager for the environment * Initializes SecurityManager for the environment
* Can only happen once! * Can only happen once!
*/ */
static void configure(Environment environment) throws Exception { static void configure(Environment environment) throws Exception {
@ -118,25 +162,25 @@ final class Security {
static Permissions createPermissions(Environment environment) throws IOException { static Permissions createPermissions(Environment environment) throws IOException {
Permissions policy = new Permissions(); Permissions policy = new Permissions();
// read-only dirs // read-only dirs
addPath(policy, environment.binFile(), "read,readlink"); addPath(policy, "path.home", environment.binFile(), "read,readlink");
addPath(policy, environment.libFile(), "read,readlink"); addPath(policy, "path.home", environment.libFile(), "read,readlink");
addPath(policy, environment.pluginsFile(), "read,readlink"); addPath(policy, "path.plugins", environment.pluginsFile(), "read,readlink");
addPath(policy, environment.configFile(), "read,readlink"); addPath(policy, "path.conf", environment.configFile(), "read,readlink");
addPath(policy, environment.scriptsFile(), "read,readlink"); addPath(policy, "path.scripts", environment.scriptsFile(), "read,readlink");
// read-write dirs // read-write dirs
addPath(policy, environment.tmpFile(), "read,readlink,write,delete"); addPath(policy, "java.io.tmpdir", environment.tmpFile(), "read,readlink,write,delete");
addPath(policy, environment.logsFile(), "read,readlink,write,delete"); addPath(policy, "path.logs", environment.logsFile(), "read,readlink,write,delete");
if (environment.sharedDataFile() != null) { if (environment.sharedDataFile() != null) {
addPath(policy, environment.sharedDataFile(), "read,readlink,write,delete"); addPath(policy, "path.shared_data", environment.sharedDataFile(), "read,readlink,write,delete");
} }
for (Path path : environment.dataFiles()) { for (Path path : environment.dataFiles()) {
addPath(policy, path, "read,readlink,write,delete"); addPath(policy, "path.data", path, "read,readlink,write,delete");
} }
for (Path path : environment.dataWithClusterFiles()) { for (Path path : environment.dataWithClusterFiles()) {
addPath(policy, path, "read,readlink,write,delete"); addPath(policy, "path.data", path, "read,readlink,write,delete");
} }
for (Path path : environment.repoFiles()) { for (Path path : environment.repoFiles()) {
addPath(policy, path, "read,readlink,write,delete"); addPath(policy, "path.repo", path, "read,readlink,write,delete");
} }
if (environment.pidFile() != null) { if (environment.pidFile() != null) {
// we just need permission to remove the file if its elsewhere. // we just need permission to remove the file if its elsewhere.
@ -145,10 +189,20 @@ final class Security {
return policy; return policy;
} }
/** Add access to path (and all files underneath it */ /**
static void addPath(Permissions policy, Path path, String permissions) throws IOException { * Add access to path (and all files underneath it)
// paths may not exist yet * @param policy current policy to add permissions to
* @param configurationName the configuration name associated with the path (for error messages only)
* @param path the path itself
* @param permissions set of filepermissions to grant to the path
*/
static void addPath(Permissions policy, String configurationName, Path path, String permissions) {
// paths may not exist yet, this also checks accessibility
try {
ensureDirectoryExists(path); ensureDirectoryExists(path);
} catch (IOException e) {
throw new IllegalStateException("Unable to access '" + configurationName + "' (" + path + ")", e);
}
// add each path twice: once for itself, again for files underneath it // add each path twice: once for itself, again for files underneath it
policy.add(new FilePermission(path.toString(), permissions)); policy.add(new FilePermission(path.toString(), permissions));

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import java.io.PrintStream;
/**
* Wraps an exception in a special way that it gets formatted
* "reasonably". This means limits on stacktrace frames and
* cleanup for guice, and some guidance about consulting full
* logs for the whole exception.
*/
//TODO: remove this when guice is removed, and exceptions are cleaned up
//this is horrible, but its what we must do
final class StartupError extends RuntimeException {
/** maximum length of a stacktrace, before we truncate it */
static final int STACKTRACE_LIMIT = 30;
/** all lines from this package are RLE-compressed */
static final String GUICE_PACKAGE = "org.elasticsearch.common.inject";
/**
* Create a new StartupError that will format {@code cause}
* to the console on failure.
*/
StartupError(Throwable cause) {
super(cause);
}
/*
* This logic actually prints the exception to the console, its
* what is invoked by the JVM when we throw the exception from main()
*/
@Override
public void printStackTrace(PrintStream s) {
Throwable originalCause = getCause();
Throwable cause = originalCause;
if (cause instanceof CreationException) {
cause = getFirstGuiceCause((CreationException)cause);
}
String message = cause.toString();
s.println(message);
if (cause != null) {
// walk to the root cause
while (cause.getCause() != null) {
cause = cause.getCause();
}
// print the root cause message, only if it differs!
if (cause != originalCause && (message.equals(cause.toString()) == false)) {
s.println("Likely root cause: " + cause);
}
// print stacktrace of cause
StackTraceElement stack[] = cause.getStackTrace();
int linesWritten = 0;
for (int i = 0; i < stack.length; i++) {
if (linesWritten == STACKTRACE_LIMIT) {
s.println("\t<<<truncated>>>");
break;
}
String line = stack[i].toString();
// skip past contiguous runs of this garbage:
if (line.startsWith(GUICE_PACKAGE)) {
while (i + 1 < stack.length && stack[i + 1].toString().startsWith(GUICE_PACKAGE)) {
i++;
}
s.println("\tat <<<guice>>>");
linesWritten++;
continue;
}
s.println("\tat " + line.toString());
linesWritten++;
}
}
s.println("Refer to the log for complete error details.");
}
/**
* Returns first cause from a guice error (it can have multiple).
*/
static Throwable getFirstGuiceCause(CreationException guice) {
for (Message message : guice.getErrorMessages()) {
Throwable cause = message.getCause();
if (cause != null) {
return cause;
}
}
return guice; // we tried
}
}

View File

@ -35,6 +35,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -132,7 +133,11 @@ public class TransportClient extends AbstractClient {
try { try {
ModulesBuilder modules = new ModulesBuilder(); ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version)); modules.add(new Version.Module(version));
modules.add(new PluginsModule(this.settings, pluginsService)); // plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new EnvironmentModule(environment)); modules.add(new EnvironmentModule(environment));
modules.add(new SettingsModule(this.settings)); modules.add(new SettingsModule(this.settings));
modules.add(new NetworkModule()); modules.add(new NetworkModule());
@ -149,6 +154,8 @@ public class TransportClient extends AbstractClient {
modules.add(new ClientTransportModule()); modules.add(new ClientTransportModule());
modules.add(new CircuitBreakerModule(this.settings)); modules.add(new CircuitBreakerModule(this.settings));
pluginsService.processModules(modules);
Injector injector = modules.createInjector(); Injector injector = modules.createInjector();
injector.getInstance(TransportService.class).start(); injector.getInstance(TransportService.class).start();
TransportClient transportClient = new TransportClient(injector); TransportClient transportClient = new TransportClient(injector);

View File

@ -30,7 +30,7 @@ import java.util.Map;
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code> * <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
* for the key used in the shardSizes map * for the key used in the shardSizes map
*/ */
public final class ClusterInfo { public class ClusterInfo {
private final Map<String, DiskUsage> usages; private final Map<String, DiskUsage> usages;
final Map<String, Long> shardSizes; final Map<String, Long> shardSizes;
@ -54,6 +54,11 @@ public final class ClusterInfo {
return shardSizes.get(shardIdentifierFromRouting(shardRouting)); return shardSizes.get(shardIdentifierFromRouting(shardRouting));
} }
public long getShardSize(ShardRouting shardRouting, long defaultValue) {
Long shardSize = getShardSize(shardRouting);
return shardSize == null ? defaultValue : shardSize;
}
/** /**
* Method that incorporates the ShardId for the shard into a string that * Method that incorporates the ShardId for the shard into a string that
* includes a 'p' or 'r' depending on whether the shard is a primary. * includes a 'p' or 'r' depending on whether the shard is a primary.

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.LatchedActionListener;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
@ -36,6 +37,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.node.settings.NodeSettingsService;
@ -45,6 +47,7 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException;
import java.util.*; import java.util.*;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
/** /**
* InternalClusterInfoService provides the ClusterInfoService interface, * InternalClusterInfoService provides the ClusterInfoService interface,

View File

@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
@ -46,6 +45,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -60,12 +60,15 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.IndexService; import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.*; import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexNameException;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.joda.time.DateTime; import org.joda.time.DateTime;
@ -514,6 +517,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
} }
public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException { public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException {
List<String> validationErrors = getIndexSettingsValidationErrors(settings);
if (validationErrors.isEmpty() == false) {
ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
throw new IndexCreationException(new Index(indexName), validationException);
}
}
List<String> getIndexSettingsValidationErrors(Settings settings) {
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null); String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
List<String> validationErrors = Lists.newArrayList(); List<String> validationErrors = Lists.newArrayList();
if (customPath != null && env.sharedDataFile() == null) { if (customPath != null && env.sharedDataFile() == null) {
@ -532,20 +544,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (number_of_replicas != null && number_of_replicas < 0) { if (number_of_replicas != null && number_of_replicas < 0) {
validationErrors.add("index must have 0 or more replica shards"); validationErrors.add("index must have 0 or more replica shards");
} }
if (validationErrors.isEmpty() == false) { return validationErrors;
throw new IndexCreationException(new Index(indexName),
new IllegalArgumentException(getMessage(validationErrors)));
}
}
private String getMessage(List<String> validationErrors) {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
} }
private static class DefaultIndexTemplateFilter implements IndexTemplateFilter { private static class DefaultIndexTemplateFilter implements IndexTemplateFilter {

View File

@ -29,12 +29,12 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask; import org.elasticsearch.cluster.TimeoutClusterStateUpdateTask;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndexTemplateAlreadyExistsException; import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
import org.elasticsearch.indices.IndexTemplateMissingException; import org.elasticsearch.indices.IndexTemplateMissingException;
import org.elasticsearch.indices.InvalidIndexTemplateException; import org.elasticsearch.indices.InvalidIndexTemplateException;
@ -179,41 +179,44 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
} }
private void validate(PutRequest request) { private void validate(PutRequest request) {
List<String> validationErrors = Lists.newArrayList();
if (request.name.contains(" ")) { if (request.name.contains(" ")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a space"); validationErrors.add("name must not contain a space");
} }
if (request.name.contains(",")) { if (request.name.contains(",")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a ','"); validationErrors.add("name must not contain a ','");
} }
if (request.name.contains("#")) { if (request.name.contains("#")) {
throw new InvalidIndexTemplateException(request.name, "name must not contain a '#'"); validationErrors.add("name must not contain a '#'");
} }
if (request.name.startsWith("_")) { if (request.name.startsWith("_")) {
throw new InvalidIndexTemplateException(request.name, "name must not start with '_'"); validationErrors.add("name must not start with '_'");
} }
if (!request.name.toLowerCase(Locale.ROOT).equals(request.name)) { if (!request.name.toLowerCase(Locale.ROOT).equals(request.name)) {
throw new InvalidIndexTemplateException(request.name, "name must be lower cased"); validationErrors.add("name must be lower cased");
} }
if (request.template.contains(" ")) { if (request.template.contains(" ")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a space"); validationErrors.add("template must not contain a space");
} }
if (request.template.contains(",")) { if (request.template.contains(",")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a ','"); validationErrors.add("template must not contain a ','");
} }
if (request.template.contains("#")) { if (request.template.contains("#")) {
throw new InvalidIndexTemplateException(request.name, "template must not contain a '#'"); validationErrors.add("template must not contain a '#'");
} }
if (request.template.startsWith("_")) { if (request.template.startsWith("_")) {
throw new InvalidIndexTemplateException(request.name, "template must not start with '_'"); validationErrors.add("template must not start with '_'");
} }
if (!Strings.validFileNameExcludingAstrix(request.template)) { if (!Strings.validFileNameExcludingAstrix(request.template)) {
throw new InvalidIndexTemplateException(request.name, "template must not container the following characters " + Strings.INVALID_FILENAME_CHARS); validationErrors.add("template must not container the following characters " + Strings.INVALID_FILENAME_CHARS);
} }
try { List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
metaDataCreateIndexService.validateIndexSettings(request.name, request.settings); validationErrors.addAll(indexSettingsValidation);
} catch (IndexCreationException exception) { if (!validationErrors.isEmpty()) {
throw new InvalidIndexTemplateException(request.name, exception.getDetailedMessage()); ValidationException validationException = new ValidationException();
validationException.addValidationErrors(validationErrors);
throw new InvalidIndexTemplateException(request.name, validationException.getMessage());
} }
for (Alias alias : request.aliases) { for (Alias alias : request.aliases) {

View File

@ -361,16 +361,16 @@ public class DiscoveryNode implements Streamable, ToXContent {
public String toString() { public String toString() {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
if (nodeName.length() > 0) { if (nodeName.length() > 0) {
sb.append('[').append(nodeName).append(']'); sb.append('{').append(nodeName).append('}');
} }
if (nodeId != null) { if (nodeId != null) {
sb.append('[').append(nodeId).append(']'); sb.append('{').append(nodeId).append('}');
} }
if (Strings.hasLength(hostName)) { if (Strings.hasLength(hostName)) {
sb.append('[').append(hostName).append(']'); sb.append('{').append(hostName).append('}');
} }
if (address != null) { if (address != null) {
sb.append('[').append(address).append(']'); sb.append('{').append(address).append('}');
} }
if (!attributes.isEmpty()) { if (!attributes.isEmpty()) {
sb.append(attributes); sb.append(attributes);

View File

@ -345,10 +345,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/** /**
* Moves a shard from unassigned to initialize state * Moves a shard from unassigned to initialize state
*/ */
public void initialize(ShardRouting shard, String nodeId) { public void initialize(ShardRouting shard, String nodeId, long expectedSize) {
ensureMutable(); ensureMutable();
assert shard.unassigned() : shard; assert shard.unassigned() : shard;
shard.initialize(nodeId); shard.initialize(nodeId, expectedSize);
node(nodeId).add(shard); node(nodeId).add(shard);
inactiveShardCount++; inactiveShardCount++;
if (shard.primary()) { if (shard.primary()) {
@ -362,10 +362,10 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* shard as well as assigning it. And returning the target initializing * shard as well as assigning it. And returning the target initializing
* shard. * shard.
*/ */
public ShardRouting relocate(ShardRouting shard, String nodeId) { public ShardRouting relocate(ShardRouting shard, String nodeId, long expectedShardSize) {
ensureMutable(); ensureMutable();
relocatingShards++; relocatingShards++;
shard.relocate(nodeId); shard.relocate(nodeId, expectedShardSize);
ShardRouting target = shard.buildTargetRelocatingShard(); ShardRouting target = shard.buildTargetRelocatingShard();
node(target.currentNodeId()).add(target); node(target.currentNodeId()).add(target);
assignedShardsAdd(target); assignedShardsAdd(target);
@ -608,16 +608,9 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/** /**
* Initializes the current unassigned shard and moves it from the unassigned list. * Initializes the current unassigned shard and moves it from the unassigned list.
*/ */
public void initialize(String nodeId) { public void initialize(String nodeId, long version, long expectedShardSize) {
initialize(nodeId, current.version());
}
/**
* Initializes the current unassigned shard and moves it from the unassigned list.
*/
public void initialize(String nodeId, long version) {
innerRemove(); innerRemove();
nodes.initialize(new ShardRouting(current, version), nodeId); nodes.initialize(new ShardRouting(current, version), nodeId, expectedShardSize);
} }
/** /**

View File

@ -37,6 +37,11 @@ import java.util.List;
*/ */
public final class ShardRouting implements Streamable, ToXContent { public final class ShardRouting implements Streamable, ToXContent {
/**
* Used if shard size is not available
*/
public static final long UNAVAILABLE_EXPECTED_SHARD_SIZE = -1;
private String index; private String index;
private int shardId; private int shardId;
private String currentNodeId; private String currentNodeId;
@ -50,6 +55,7 @@ public final class ShardRouting implements Streamable, ToXContent {
private final transient List<ShardRouting> asList; private final transient List<ShardRouting> asList;
private transient ShardId shardIdentifier; private transient ShardId shardIdentifier;
private boolean frozen = false; private boolean frozen = false;
private long expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
private ShardRouting() { private ShardRouting() {
this.asList = Collections.singletonList(this); this.asList = Collections.singletonList(this);
@ -60,7 +66,7 @@ public final class ShardRouting implements Streamable, ToXContent {
} }
public ShardRouting(ShardRouting copy, long version) { public ShardRouting(ShardRouting copy, long version) {
this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true); this(copy.index(), copy.id(), copy.currentNodeId(), copy.relocatingNodeId(), copy.restoreSource(), copy.primary(), copy.state(), version, copy.unassignedInfo(), copy.allocationId(), true, copy.getExpectedShardSize());
} }
/** /**
@ -69,7 +75,7 @@ public final class ShardRouting implements Streamable, ToXContent {
*/ */
ShardRouting(String index, int shardId, String currentNodeId, ShardRouting(String index, int shardId, String currentNodeId,
String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version, String relocatingNodeId, RestoreSource restoreSource, boolean primary, ShardRoutingState state, long version,
UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal) { UnassignedInfo unassignedInfo, AllocationId allocationId, boolean internal, long expectedShardSize) {
this.index = index; this.index = index;
this.shardId = shardId; this.shardId = shardId;
this.currentNodeId = currentNodeId; this.currentNodeId = currentNodeId;
@ -81,6 +87,9 @@ public final class ShardRouting implements Streamable, ToXContent {
this.restoreSource = restoreSource; this.restoreSource = restoreSource;
this.unassignedInfo = unassignedInfo; this.unassignedInfo = unassignedInfo;
this.allocationId = allocationId; this.allocationId = allocationId;
this.expectedShardSize = expectedShardSize;
assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state;
assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta";
if (!internal) { if (!internal) {
assert state == ShardRoutingState.UNASSIGNED; assert state == ShardRoutingState.UNASSIGNED;
@ -88,13 +97,14 @@ public final class ShardRouting implements Streamable, ToXContent {
assert relocatingNodeId == null; assert relocatingNodeId == null;
assert allocationId == null; assert allocationId == null;
} }
} }
/** /**
* Creates a new unassigned shard. * Creates a new unassigned shard.
*/ */
public static ShardRouting newUnassigned(String index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) { public static ShardRouting newUnassigned(String index, int shardId, RestoreSource restoreSource, boolean primary, UnassignedInfo unassignedInfo) {
return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true); return new ShardRouting(index, shardId, null, null, restoreSource, primary, ShardRoutingState.UNASSIGNED, 0, unassignedInfo, null, true, UNAVAILABLE_EXPECTED_SHARD_SIZE);
} }
/** /**
@ -205,7 +215,7 @@ public final class ShardRouting implements Streamable, ToXContent {
public ShardRouting buildTargetRelocatingShard() { public ShardRouting buildTargetRelocatingShard() {
assert relocating(); assert relocating();
return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo, return new ShardRouting(index, shardId, relocatingNodeId, currentNodeId, restoreSource, primary, ShardRoutingState.INITIALIZING, version, unassignedInfo,
AllocationId.newTargetRelocation(allocationId), true); AllocationId.newTargetRelocation(allocationId), true, expectedShardSize);
} }
/** /**
@ -317,6 +327,11 @@ public final class ShardRouting implements Streamable, ToXContent {
if (in.readBoolean()) { if (in.readBoolean()) {
allocationId = new AllocationId(in); allocationId = new AllocationId(in);
} }
if (relocating() || initializing()) {
expectedShardSize = in.readLong();
} else {
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
}
freeze(); freeze();
} }
@ -368,6 +383,10 @@ public final class ShardRouting implements Streamable, ToXContent {
} else { } else {
out.writeBoolean(false); out.writeBoolean(false);
} }
if (relocating() || initializing()) {
out.writeLong(expectedShardSize);
}
} }
@Override @Override
@ -397,12 +416,13 @@ public final class ShardRouting implements Streamable, ToXContent {
relocatingNodeId = null; relocatingNodeId = null;
this.unassignedInfo = unassignedInfo; this.unassignedInfo = unassignedInfo;
allocationId = null; allocationId = null;
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
} }
/** /**
* Initializes an unassigned shard on a node. * Initializes an unassigned shard on a node.
*/ */
void initialize(String nodeId) { void initialize(String nodeId, long expectedShardSize) {
ensureNotFrozen(); ensureNotFrozen();
version++; version++;
assert state == ShardRoutingState.UNASSIGNED : this; assert state == ShardRoutingState.UNASSIGNED : this;
@ -410,6 +430,7 @@ public final class ShardRouting implements Streamable, ToXContent {
state = ShardRoutingState.INITIALIZING; state = ShardRoutingState.INITIALIZING;
currentNodeId = nodeId; currentNodeId = nodeId;
allocationId = AllocationId.newInitializing(); allocationId = AllocationId.newInitializing();
this.expectedShardSize = expectedShardSize;
} }
/** /**
@ -417,13 +438,14 @@ public final class ShardRouting implements Streamable, ToXContent {
* *
* @param relocatingNodeId id of the node to relocate the shard * @param relocatingNodeId id of the node to relocate the shard
*/ */
void relocate(String relocatingNodeId) { void relocate(String relocatingNodeId, long expectedShardSize) {
ensureNotFrozen(); ensureNotFrozen();
version++; version++;
assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this; assert state == ShardRoutingState.STARTED : "current shard has to be started in order to be relocated " + this;
state = ShardRoutingState.RELOCATING; state = ShardRoutingState.RELOCATING;
this.relocatingNodeId = relocatingNodeId; this.relocatingNodeId = relocatingNodeId;
this.allocationId = AllocationId.newRelocation(allocationId); this.allocationId = AllocationId.newRelocation(allocationId);
this.expectedShardSize = expectedShardSize;
} }
/** /**
@ -436,7 +458,7 @@ public final class ShardRouting implements Streamable, ToXContent {
assert state == ShardRoutingState.RELOCATING : this; assert state == ShardRoutingState.RELOCATING : this;
assert assignedToNode() : this; assert assignedToNode() : this;
assert relocatingNodeId != null : this; assert relocatingNodeId != null : this;
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
state = ShardRoutingState.STARTED; state = ShardRoutingState.STARTED;
relocatingNodeId = null; relocatingNodeId = null;
allocationId = AllocationId.cancelRelocation(allocationId); allocationId = AllocationId.cancelRelocation(allocationId);
@ -470,6 +492,7 @@ public final class ShardRouting implements Streamable, ToXContent {
// relocation target // relocation target
allocationId = AllocationId.finishRelocation(allocationId); allocationId = AllocationId.finishRelocation(allocationId);
} }
expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE;
state = ShardRoutingState.STARTED; state = ShardRoutingState.STARTED;
} }
@ -669,6 +692,9 @@ public final class ShardRouting implements Streamable, ToXContent {
if (this.unassignedInfo != null) { if (this.unassignedInfo != null) {
sb.append(", ").append(unassignedInfo.toString()); sb.append(", ").append(unassignedInfo.toString());
} }
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE) {
sb.append(", expected_shard_size[").append(expectedShardSize).append("]");
}
return sb.toString(); return sb.toString();
} }
@ -682,7 +708,9 @@ public final class ShardRouting implements Streamable, ToXContent {
.field("shard", shardId().id()) .field("shard", shardId().id())
.field("index", shardId().index().name()) .field("index", shardId().index().name())
.field("version", version); .field("version", version);
if (expectedShardSize != UNAVAILABLE_EXPECTED_SHARD_SIZE){
builder.field("expected_shard_size_in_bytes", expectedShardSize);
}
if (restoreSource() != null) { if (restoreSource() != null) {
builder.field("restore_source"); builder.field("restore_source");
restoreSource().toXContent(builder, params); restoreSource().toXContent(builder, params);
@ -709,4 +737,12 @@ public final class ShardRouting implements Streamable, ToXContent {
boolean isFrozen() { boolean isFrozen() {
return frozen; return frozen;
} }
/**
* Returns the expected shard size for {@link ShardRoutingState#RELOCATING} and {@link ShardRoutingState#INITIALIZING}
* shards. If it's size is not available {@value #UNAVAILABLE_EXPECTED_SHARD_SIZE} will be returned.
*/
public long getExpectedShardSize() {
return expectedShardSize;
}
} }

View File

@ -507,7 +507,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
Decision decision = allocation.deciders().canAllocate(shard, target, allocation); Decision decision = allocation.deciders().canAllocate(shard, target, allocation);
if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too? if (decision.type() == Type.YES) { // TODO maybe we can respect throttling here too?
sourceNode.removeShard(shard); sourceNode.removeShard(shard);
ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId()); ShardRouting targetRelocatingShard = routingNodes.relocate(shard, target.nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
currentNode.addShard(targetRelocatingShard, decision); currentNode.addShard(targetRelocatingShard, decision);
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId()); logger.trace("Moved shard [{}] to node [{}]", shard, currentNode.getNodeId());
@ -687,7 +687,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId()); logger.trace("Assigned shard [{}] to [{}]", shard, minNode.getNodeId());
} }
routingNodes.initialize(shard, routingNodes.node(minNode.getNodeId()).nodeId()); routingNodes.initialize(shard, routingNodes.node(minNode.getNodeId()).nodeId(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
changed = true; changed = true;
continue; // don't add to ignoreUnassigned continue; // don't add to ignoreUnassigned
} else { } else {
@ -779,10 +779,10 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
/* now allocate on the cluster - if we are started we need to relocate the shard */ /* now allocate on the cluster - if we are started we need to relocate the shard */
if (candidate.started()) { if (candidate.started()) {
RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId()); RoutingNode lowRoutingNode = routingNodes.node(minNode.getNodeId());
routingNodes.relocate(candidate, lowRoutingNode.nodeId()); routingNodes.relocate(candidate, lowRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
} else { } else {
routingNodes.initialize(candidate, routingNodes.node(minNode.getNodeId()).nodeId()); routingNodes.initialize(candidate, routingNodes.node(minNode.getNodeId()).nodeId(), allocation.clusterInfo().getShardSize(candidate, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
} }
return true; return true;

View File

@ -231,7 +231,7 @@ public class AllocateAllocationCommand implements AllocationCommand {
unassigned.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, unassigned.updateUnassignedInfo(new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
"force allocation from previous reason " + unassigned.unassignedInfo().getReason() + ", " + unassigned.unassignedInfo().getMessage(), unassigned.unassignedInfo().getFailure())); "force allocation from previous reason " + unassigned.unassignedInfo().getReason() + ", " + unassigned.unassignedInfo().getMessage(), unassigned.unassignedInfo().getFailure()));
} }
it.initialize(routingNode.nodeId()); it.initialize(routingNode.nodeId(), unassigned.version(), allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
break; break;
} }
return new RerouteExplanation(this, decision); return new RerouteExplanation(this, decision);

View File

@ -178,7 +178,7 @@ public class MoveAllocationCommand implements AllocationCommand {
if (decision.type() == Decision.Type.THROTTLE) { if (decision.type() == Decision.Type.THROTTLE) {
// its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it...
} }
allocation.routingNodes().relocate(shardRouting, toRoutingNode.nodeId()); allocation.routingNodes().relocate(shardRouting, toRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
} }
if (!found) { if (!found) {

View File

@ -0,0 +1,71 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import java.util.ArrayList;
import java.util.List;
/**
* Encapsulates an accumulation of validation errors
*/
public class ValidationException extends IllegalArgumentException {
private final List<String> validationErrors = new ArrayList<>();
public ValidationException() {
super("validation failed");
}
/**
* Add a new validation error to the accumulating validation errors
* @param error the error to add
*/
public void addValidationError(String error) {
validationErrors.add(error);
}
/**
* Add a sequence of validation errors to the accumulating validation errors
* @param errors the errors to add
*/
public void addValidationErrors(Iterable<String> errors) {
for (String error : errors) {
validationErrors.add(error);
}
}
/**
* Returns the validation errors accumulated
* @return
*/
public List<String> validationErrors() {
return validationErrors;
}
@Override
public String getMessage() {
StringBuilder sb = new StringBuilder();
sb.append("Validation Failed: ");
int index = 0;
for (String error : validationErrors) {
sb.append(++index).append(": ").append(error).append(";");
}
return sb.toString();
}
}

View File

@ -360,10 +360,11 @@ public class HttpDownloadHelper {
if (connection instanceof HttpURLConnection) { if (connection instanceof HttpURLConnection) {
((HttpURLConnection) connection).setInstanceFollowRedirects(false); ((HttpURLConnection) connection).setInstanceFollowRedirects(false);
((HttpURLConnection) connection).setUseCaches(true); connection.setUseCaches(true);
((HttpURLConnection) connection).setConnectTimeout(5000); connection.setConnectTimeout(5000);
} }
connection.setRequestProperty("ES-Version", Version.CURRENT.toString()); connection.setRequestProperty("ES-Version", Version.CURRENT.toString());
connection.setRequestProperty("ES-Build-Hash", Build.CURRENT.hashShort());
connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager"); connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager");
// connect to the remote site (may take some time) // connect to the remote site (may take some time)

View File

@ -1,65 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.inject;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings;
import java.lang.reflect.Constructor;
/**
*
*/
public class Modules {
public static Module createModule(Class<? extends Module> moduleClass, @Nullable Settings settings) {
Constructor<? extends Module> constructor;
try {
constructor = moduleClass.getConstructor(Settings.class);
try {
return constructor.newInstance(settings);
} catch (Exception e) {
throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
}
} catch (NoSuchMethodException e) {
try {
constructor = moduleClass.getConstructor();
try {
return constructor.newInstance();
} catch (Exception e1) {
throw new ElasticsearchException("Failed to create module [" + moduleClass + "]", e);
}
} catch (NoSuchMethodException e1) {
throw new ElasticsearchException("No constructor for [" + moduleClass + "]");
}
}
}
public static void processModules(Iterable<Module> modules) {
for (Module module : modules) {
if (module instanceof PreProcessModule) {
for (Module module1 : modules) {
((PreProcessModule) module).processModule(module1);
}
}
}
}
}

View File

@ -31,20 +31,9 @@ public class ModulesBuilder implements Iterable<Module> {
private final List<Module> modules = Lists.newArrayList(); private final List<Module> modules = Lists.newArrayList();
public ModulesBuilder add(Module... modules) { public ModulesBuilder add(Module... newModules) {
for (Module module : modules) { for (Module module : newModules) {
add(module);
}
return this;
}
public ModulesBuilder add(Module module) {
modules.add(module); modules.add(module);
if (module instanceof SpawnModules) {
Iterable<? extends Module> spawned = ((SpawnModules) module).spawnModules();
for (Module spawn : spawned) {
add(spawn);
}
} }
return this; return this;
} }
@ -55,7 +44,6 @@ public class ModulesBuilder implements Iterable<Module> {
} }
public Injector createInjector() { public Injector createInjector() {
Modules.processModules(modules);
Injector injector = Guice.createInjector(modules); Injector injector = Guice.createInjector(modules);
Injectors.cleanCaches(injector); Injectors.cleanCaches(injector);
// in ES, we always create all instances as if they are eager singletons // in ES, we always create all instances as if they are eager singletons
@ -65,7 +53,6 @@ public class ModulesBuilder implements Iterable<Module> {
} }
public Injector createChildInjector(Injector injector) { public Injector createChildInjector(Injector injector) {
Modules.processModules(modules);
Injector childInjector = injector.createChildInjector(modules); Injector childInjector = injector.createChildInjector(modules);
Injectors.cleanCaches(childInjector); Injectors.cleanCaches(childInjector);
// in ES, we always create all instances as if they are eager singletons // in ES, we always create all instances as if they are eager singletons

View File

@ -18,7 +18,6 @@ package org.elasticsearch.common.inject.spi;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.common.inject.*; import org.elasticsearch.common.inject.*;
import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder; import org.elasticsearch.common.inject.binder.AnnotatedBindingBuilder;
import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder; import org.elasticsearch.common.inject.binder.AnnotatedConstantBindingBuilder;
@ -342,7 +341,7 @@ public final class Elements {
return builder; return builder;
} }
private static ESLogger logger = Loggers.getLogger(Bootstrap.class); private static ESLogger logger = Loggers.getLogger(Elements.class);
protected Object getSource() { protected Object getSource() {
Object ret; Object ret;

View File

@ -542,6 +542,8 @@ public abstract class StreamInput extends InputStream {
return (T) readStackTrace(new IllegalStateException(readOptionalString(), readThrowable()), this); return (T) readStackTrace(new IllegalStateException(readOptionalString(), readThrowable()), this);
case 17: case 17:
return (T) readStackTrace(new LockObtainFailedException(readOptionalString(), readThrowable()), this); return (T) readStackTrace(new LockObtainFailedException(readOptionalString(), readThrowable()), this);
case 18:
return (T) readStackTrace(new InterruptedException(readOptionalString()), this);
default: default:
assert false : "no such exception for id: " + key; assert false : "no such exception for id: " + key;
} }

View File

@ -590,6 +590,9 @@ public abstract class StreamOutput extends OutputStream {
writeVInt(16); writeVInt(16);
} else if (throwable instanceof LockObtainFailedException) { } else if (throwable instanceof LockObtainFailedException) {
writeVInt(17); writeVInt(17);
} else if (throwable instanceof InterruptedException) {
writeVInt(18);
writeCause = false;
} else { } else {
ElasticsearchException ex; ElasticsearchException ex;
final String name = throwable.getClass().getName(); final String name = throwable.getClass().getName();

View File

@ -275,9 +275,9 @@ public class Joda {
.toFormatter() .toFormatter()
.withZoneUTC(); .withZoneUTC();
DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[] {longFormatter.getParser(), shortFormatter.getParser()}); DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder().append(longFormatter.withZone(DateTimeZone.UTC).getPrinter(), new DateTimeParser[]{longFormatter.getParser(), shortFormatter.getParser(), new EpochTimeParser(true)});
return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd", builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT); return new FormatDateTimeFormatter("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis", builder.toFormatter().withZone(DateTimeZone.UTC), Locale.ROOT);
} }

View File

@ -84,6 +84,7 @@ public class Loggers {
} }
} }
@SuppressForbidden(reason = "do not know what this method does")
public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) { public static ESLogger getLogger(String loggerName, Settings settings, String... prefixes) {
List<String> prefixesList = newArrayList(); List<String> prefixesList = newArrayList();
if (settings.getAsBoolean("logger.logHostAddress", false)) { if (settings.getAsBoolean("logger.logHostAddress", false)) {

View File

@ -0,0 +1,167 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.network;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InterfaceAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.util.List;
import java.util.Locale;
/**
* Simple class to log {@code ifconfig}-style output at DEBUG logging.
*/
final class IfConfig {
private static final ESLogger logger = Loggers.getLogger(IfConfig.class);
private static final String INDENT = " ";
/** log interface configuration at debug level, if its enabled */
static void logIfNecessary() {
if (logger.isDebugEnabled()) {
try {
doLogging();
} catch (IOException | SecurityException e) {
logger.warn("unable to gather network information", e);
}
}
}
/** perform actual logging: might throw exception if things go wrong */
private static void doLogging() throws IOException {
StringBuilder msg = new StringBuilder();
for (NetworkInterface nic : NetworkUtils.getInterfaces()) {
msg.append(System.lineSeparator());
// ordinary name
msg.append(nic.getName());
msg.append(System.lineSeparator());
// display name (e.g. on windows)
if (!nic.getName().equals(nic.getDisplayName())) {
msg.append(INDENT);
msg.append(nic.getDisplayName());
msg.append(System.lineSeparator());
}
// addresses: v4 first, then v6
List<InterfaceAddress> addresses = nic.getInterfaceAddresses();
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address == false) {
msg.append(INDENT);
msg.append(formatAddress(address));
msg.append(System.lineSeparator());
}
}
for (InterfaceAddress address : addresses) {
if (address.getAddress() instanceof Inet6Address) {
msg.append(INDENT);
msg.append(formatAddress(address));
msg.append(System.lineSeparator());
}
}
// hardware address
byte hardware[] = nic.getHardwareAddress();
if (hardware != null) {
msg.append(INDENT);
msg.append("hardware ");
for (int i = 0; i < hardware.length; i++) {
if (i > 0) {
msg.append(":");
}
msg.append(String.format(Locale.ROOT, "%02X", hardware[i]));
}
msg.append(System.lineSeparator());
}
// attributes
msg.append(INDENT);
msg.append(formatFlags(nic));
msg.append(System.lineSeparator());
}
logger.debug("configuration:" + System.lineSeparator() + "{}", msg.toString());
}
/** format internet address: java's default doesn't include everything useful */
private static String formatAddress(InterfaceAddress interfaceAddress) throws IOException {
StringBuilder sb = new StringBuilder();
InetAddress address = interfaceAddress.getAddress();
if (address instanceof Inet6Address) {
sb.append("inet6 ");
sb.append(NetworkAddress.formatAddress(address));
sb.append(" prefixlen:");
sb.append(interfaceAddress.getNetworkPrefixLength());
} else {
sb.append("inet ");
sb.append(NetworkAddress.formatAddress(address));
int netmask = 0xFFFFFFFF << (32 - interfaceAddress.getNetworkPrefixLength());
sb.append(" netmask:" + NetworkAddress.formatAddress(InetAddress.getByAddress(new byte[] {
(byte)(netmask >>> 24),
(byte)(netmask >>> 16 & 0xFF),
(byte)(netmask >>> 8 & 0xFF),
(byte)(netmask & 0xFF)
})));
InetAddress broadcast = interfaceAddress.getBroadcast();
if (broadcast != null) {
sb.append(" broadcast:" + NetworkAddress.formatAddress(broadcast));
}
}
if (address.isLoopbackAddress()) {
sb.append(" scope:host");
} else if (address.isLinkLocalAddress()) {
sb.append(" scope:link");
} else if (address.isSiteLocalAddress()) {
sb.append(" scope:site");
}
return sb.toString();
}
/** format network interface flags */
private static String formatFlags(NetworkInterface nic) throws SocketException {
StringBuilder flags = new StringBuilder();
if (nic.isUp()) {
flags.append("UP ");
}
if (nic.supportsMulticast()) {
flags.append("MULTICAST ");
}
if (nic.isLoopback()) {
flags.append("LOOPBACK ");
}
if (nic.isPointToPoint()) {
flags.append("POINTOPOINT ");
}
if (nic.isVirtual()) {
flags.append("VIRTUAL ");
}
flags.append("mtu:" + nic.getMTU());
flags.append(" index:" + nic.getIndex());
return flags.toString();
}
}

View File

@ -0,0 +1,183 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.network;
import com.google.common.net.InetAddresses;
import org.elasticsearch.common.SuppressForbidden;
import java.net.Inet6Address;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Objects;
/**
* Utility functions for presentation of network addresses.
* <p>
* Java's address formatting is particularly bad, every address
* has an optional host if its resolved, so IPv4 addresses often
* look like this (note the confusing leading slash):
* <pre>
* {@code /127.0.0.1}
* </pre>
* IPv6 addresses are even worse, with no IPv6 address compression,
* and often containing things like numeric scopeids, which are even
* more confusing (e.g. not going to work in any user's browser, refer
* to an interface on <b>another</b> machine, etc):
* <pre>
* {@code /0:0:0:0:0:0:0:1%1}
* </pre>
* This class provides sane address formatting instead, e.g.
* {@code 127.0.0.1} and {@code ::1} respectively. No methods do reverse
* lookups.
*/
public final class NetworkAddress {
/** No instantiation */
private NetworkAddress() {}
/**
* Formats a network address (with optional host) for display purposes.
* <p>
* If the host is already resolved (typically because, we looked up
* a name to do that), then we include it, otherwise it is
* omitted. See {@link #formatAddress(InetAddress)} if you only
* want the address.
* <p>
* IPv6 addresses are compressed and without scope
* identifiers.
* <p>
* Example output with already-resolved hostnames:
* <ul>
* <li>IPv4: {@code localhost/127.0.0.1}</li>
* <li>IPv6: {@code localhost/::1}</li>
* </ul>
* <p>
* Example output with just an address:
* <ul>
* <li>IPv4: {@code 127.0.0.1}</li>
* <li>IPv6: {@code ::1}</li>
* </ul>
* @param address IPv4 or IPv6 address
* @return formatted string
* @see #formatAddress(InetAddress)
*/
public static String format(InetAddress address) {
return format(address, -1, true);
}
/**
* Formats a network address and port for display purposes.
* <p>
* If the host is already resolved (typically because, we looked up
* a name to do that), then we include it, otherwise it is
* omitted. See {@link #formatAddress(InetSocketAddress)} if you only
* want the address.
* <p>
* This formats the address with {@link #format(InetAddress)}
* and appends the port number. IPv6 addresses will be bracketed.
* <p>
* Example output with already-resolved hostnames:
* <ul>
* <li>IPv4: {@code localhost/127.0.0.1:9300}</li>
* <li>IPv6: {@code localhost/[::1]:9300}</li>
* </ul>
* <p>
* Example output with just an address:
* <ul>
* <li>IPv4: {@code 127.0.0.1:9300}</li>
* <li>IPv6: {@code [::1]:9300}</li>
* </ul>
* @param address IPv4 or IPv6 address with port
* @return formatted string
* @see #formatAddress(InetSocketAddress)
*/
public static String format(InetSocketAddress address) {
return format(address.getAddress(), address.getPort(), true);
}
/**
* Formats a network address for display purposes.
* <p>
* This formats only the address, any hostname information,
* if present, is ignored. IPv6 addresses are compressed
* and without scope identifiers.
* <p>
* Example output with just an address:
* <ul>
* <li>IPv4: {@code 127.0.0.1}</li>
* <li>IPv6: {@code ::1}</li>
* </ul>
* @param address IPv4 or IPv6 address
* @return formatted string
*/
public static String formatAddress(InetAddress address) {
return format(address, -1, false);
}
/**
* Formats a network address and port for display purposes.
* <p>
* This formats the address with {@link #formatAddress(InetAddress)}
* and appends the port number. IPv6 addresses will be bracketed.
* Any host information, if present is ignored.
* <p>
* Example output:
* <ul>
* <li>IPv4: {@code 127.0.0.1:9300}</li>
* <li>IPv6: {@code [::1]:9300}</li>
* </ul>
* @param address IPv4 or IPv6 address with port
* @return formatted string
*/
public static String formatAddress(InetSocketAddress address) {
return format(address.getAddress(), address.getPort(), false);
}
// note, we don't validate port, because we only allow InetSocketAddress
@SuppressForbidden(reason = "we call toString to avoid a DNS lookup")
static String format(InetAddress address, int port, boolean includeHost) {
Objects.requireNonNull(address);
StringBuilder builder = new StringBuilder();
if (includeHost) {
// must use toString, to avoid DNS lookup. but the format is specified in the spec
String toString = address.toString();
int separator = toString.indexOf('/');
if (separator > 0) {
// append hostname, with the slash too
builder.append(toString, 0, separator + 1);
}
}
if (port != -1 && address instanceof Inet6Address) {
builder.append(InetAddresses.toUriString(address));
} else {
builder.append(InetAddresses.toAddrString(address));
}
if (port != -1) {
builder.append(':');
builder.append(port);
}
return builder.toString();
}
}

View File

@ -82,7 +82,7 @@ public class NetworkService extends AbstractComponent {
@Inject @Inject
public NetworkService(Settings settings) { public NetworkService(Settings settings) {
super(settings); super(settings);
InetSocketTransportAddress.setResolveAddress(settings.getAsBoolean("network.address.serialization.resolve", false)); IfConfig.logIfNecessary();
} }
/** /**

View File

@ -21,8 +21,6 @@ package org.elasticsearch.common.network;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import java.net.Inet4Address; import java.net.Inet4Address;
import java.net.Inet6Address; import java.net.Inet6Address;
@ -34,10 +32,12 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashSet;
import java.util.List; import java.util.List;
/** /**
* Utilities for network interfaces / addresses * Utilities for network interfaces / addresses binding and publishing.
* Its only intended for that purpose, not general purpose usage!!!!
*/ */
public abstract class NetworkUtils { public abstract class NetworkUtils {
@ -53,6 +53,31 @@ public abstract class NetworkUtils {
@Deprecated @Deprecated
static final boolean PREFER_V6 = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses", "false")); static final boolean PREFER_V6 = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses", "false"));
/**
* True if we can bind to a v6 address. Its silly, but for *binding* we have a need to know
* if the stack works. this can prevent scary noise on IPv4-only hosts.
* @deprecated transition mechanism only, do not use
*/
@Deprecated
public static final boolean SUPPORTS_V6;
static {
boolean v = false;
try {
for (NetworkInterface nic : getInterfaces()) {
for (InetAddress address : Collections.list(nic.getInetAddresses())) {
if (address instanceof Inet6Address) {
v = true;
break;
}
}
}
} catch (SecurityException | SocketException misconfiguration) {
v = true; // be optimistic, you misconfigure, then you get noise to your screen
}
SUPPORTS_V6 = v;
}
/** Sorts an address by preference. This way code like publishing can just pick the first one */ /** Sorts an address by preference. This way code like publishing can just pick the first one */
static int sortKey(InetAddress address, boolean prefer_v6) { static int sortKey(InetAddress address, boolean prefer_v6) {
int key = address.getAddress().length; int key = address.getAddress().length;
@ -84,7 +109,7 @@ public abstract class NetworkUtils {
* @deprecated remove this when multihoming is really correct * @deprecated remove this when multihoming is really correct
*/ */
@Deprecated @Deprecated
private static void sortAddresses(List<InetAddress> list) { static void sortAddresses(List<InetAddress> list) {
Collections.sort(list, new Comparator<InetAddress>() { Collections.sort(list, new Comparator<InetAddress>() {
@Override @Override
public int compare(InetAddress left, InetAddress right) { public int compare(InetAddress left, InetAddress right) {
@ -97,8 +122,6 @@ public abstract class NetworkUtils {
}); });
} }
private final static ESLogger logger = Loggers.getLogger(NetworkUtils.class);
/** Return all interfaces (and subinterfaces) on the system */ /** Return all interfaces (and subinterfaces) on the system */
static List<NetworkInterface> getInterfaces() throws SocketException { static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>(); List<NetworkInterface> all = new ArrayList<>();
@ -128,7 +151,7 @@ public abstract class NetworkUtils {
} }
/** Returns addresses for all loopback interfaces that are up. */ /** Returns addresses for all loopback interfaces that are up. */
public static InetAddress[] getLoopbackAddresses() throws SocketException { static InetAddress[] getLoopbackAddresses() throws SocketException {
List<InetAddress> list = new ArrayList<>(); List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) { for (NetworkInterface intf : getInterfaces()) {
if (intf.isLoopback() && intf.isUp()) { if (intf.isLoopback() && intf.isUp()) {
@ -143,7 +166,7 @@ public abstract class NetworkUtils {
} }
/** Returns addresses for the first non-loopback interface that is up. */ /** Returns addresses for the first non-loopback interface that is up. */
public static InetAddress[] getFirstNonLoopbackAddresses() throws SocketException { static InetAddress[] getFirstNonLoopbackAddresses() throws SocketException {
List<InetAddress> list = new ArrayList<>(); List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) { for (NetworkInterface intf : getInterfaces()) {
if (intf.isLoopback() == false && intf.isUp()) { if (intf.isLoopback() == false && intf.isUp()) {
@ -159,7 +182,7 @@ public abstract class NetworkUtils {
} }
/** Returns addresses for the given interface (it must be marked up) */ /** Returns addresses for the given interface (it must be marked up) */
public static InetAddress[] getAddressesForInterface(String name) throws SocketException { static InetAddress[] getAddressesForInterface(String name) throws SocketException {
NetworkInterface intf = NetworkInterface.getByName(name); NetworkInterface intf = NetworkInterface.getByName(name);
if (intf == null) { if (intf == null) {
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces()); throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
@ -176,14 +199,17 @@ public abstract class NetworkUtils {
} }
/** Returns addresses for the given host, sorted by order of preference */ /** Returns addresses for the given host, sorted by order of preference */
public static InetAddress[] getAllByName(String host) throws UnknownHostException { static InetAddress[] getAllByName(String host) throws UnknownHostException {
InetAddress addresses[] = InetAddress.getAllByName(host); InetAddress addresses[] = InetAddress.getAllByName(host);
sortAddresses(Arrays.asList(addresses)); // deduplicate, in case of resolver misconfiguration
return addresses; // stuff like https://bugzilla.redhat.com/show_bug.cgi?id=496300
List<InetAddress> unique = new ArrayList<>(new HashSet<>(Arrays.asList(addresses)));
sortAddresses(unique);
return unique.toArray(new InetAddress[unique.size()]);
} }
/** Returns only the IPV4 addresses in {@code addresses} */ /** Returns only the IPV4 addresses in {@code addresses} */
public static InetAddress[] filterIPV4(InetAddress addresses[]) { static InetAddress[] filterIPV4(InetAddress addresses[]) {
List<InetAddress> list = new ArrayList<>(); List<InetAddress> list = new ArrayList<>();
for (InetAddress address : addresses) { for (InetAddress address : addresses) {
if (address instanceof Inet4Address) { if (address instanceof Inet4Address) {
@ -197,7 +223,7 @@ public abstract class NetworkUtils {
} }
/** Returns only the IPV6 addresses in {@code addresses} */ /** Returns only the IPV6 addresses in {@code addresses} */
public static InetAddress[] filterIPV6(InetAddress addresses[]) { static InetAddress[] filterIPV6(InetAddress addresses[]) {
List<InetAddress> list = new ArrayList<>(); List<InetAddress> list = new ArrayList<>();
for (InetAddress address : addresses) { for (InetAddress address : addresses) {
if (address instanceof Inet6Address) { if (address instanceof Inet6Address) {

View File

@ -28,7 +28,6 @@ import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Classes;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -21,9 +21,9 @@ package org.elasticsearch.common.transport;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.network.NetworkAddress;
import java.io.IOException; import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
@ -32,52 +32,34 @@ import java.net.InetSocketAddress;
*/ */
public final class InetSocketTransportAddress implements TransportAddress { public final class InetSocketTransportAddress implements TransportAddress {
private static boolean resolveAddress = false;
public static void setResolveAddress(boolean resolveAddress) {
InetSocketTransportAddress.resolveAddress = resolveAddress;
}
public static boolean getResolveAddress() {
return resolveAddress;
}
public static final InetSocketTransportAddress PROTO = new InetSocketTransportAddress(); public static final InetSocketTransportAddress PROTO = new InetSocketTransportAddress();
private final InetSocketAddress address; private final InetSocketAddress address;
public InetSocketTransportAddress(StreamInput in) throws IOException { public InetSocketTransportAddress(StreamInput in) throws IOException {
if (in.readByte() == 0) { final int len = in.readByte();
int len = in.readByte(); final byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
byte[] a = new byte[len]; // 4 bytes (IPv4) or 16 bytes (IPv6)
in.readFully(a); in.readFully(a);
InetAddress inetAddress; InetAddress inetAddress = InetAddress.getByAddress(a);
if (len == 16) {
int scope_id = in.readInt();
inetAddress = Inet6Address.getByAddress(null, a, scope_id);
} else {
inetAddress = InetAddress.getByAddress(a);
}
int port = in.readInt(); int port = in.readInt();
this.address = new InetSocketAddress(inetAddress, port); this.address = new InetSocketAddress(inetAddress, port);
} else {
this.address = new InetSocketAddress(in.readString(), in.readInt());
}
} }
private InetSocketTransportAddress() { private InetSocketTransportAddress() {
address = null; address = null;
} }
public InetSocketTransportAddress(String hostname, int port) {
this(new InetSocketAddress(hostname, port));
}
public InetSocketTransportAddress(InetAddress address, int port) { public InetSocketTransportAddress(InetAddress address, int port) {
this(new InetSocketAddress(address, port)); this(new InetSocketAddress(address, port));
} }
public InetSocketTransportAddress(InetSocketAddress address) { public InetSocketTransportAddress(InetSocketAddress address) {
if (address == null) {
throw new IllegalArgumentException("InetSocketAddress must not be null");
}
if (address.getAddress() == null) {
throw new IllegalArgumentException("Address must be resolved but wasn't - InetSocketAddress#getAddress() returned null");
}
this.address = address; this.address = address;
} }
@ -94,12 +76,12 @@ public final class InetSocketTransportAddress implements TransportAddress {
@Override @Override
public String getHost() { public String getHost() {
return address.getHostName(); return getAddress(); // just delegate no resolving
} }
@Override @Override
public String getAddress() { public String getAddress() {
return address.getAddress().getHostAddress(); return NetworkAddress.formatAddress(address.getAddress());
} }
@Override @Override
@ -118,20 +100,16 @@ public final class InetSocketTransportAddress implements TransportAddress {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
if (!resolveAddress && address.getAddress() != null) {
out.writeByte((byte) 0);
byte[] bytes = address().getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6) byte[] bytes = address().getAddress().getAddress(); // 4 bytes (IPv4) or 16 bytes (IPv6)
out.writeByte((byte) bytes.length); // 1 byte out.writeByte((byte) bytes.length); // 1 byte
out.write(bytes, 0, bytes.length); out.write(bytes, 0, bytes.length);
if (address().getAddress() instanceof Inet6Address) // don't serialize scope ids over the network!!!!
out.writeInt(((Inet6Address) address.getAddress()).getScopeId()); // these only make sense with respect to the local machine, and will only formulate
} else { // the address incorrectly remotely.
out.writeByte((byte) 1);
out.writeString(address.getHostName());
}
out.writeInt(address.getPort()); out.writeInt(address.getPort());
} }
@Override @Override
public boolean equals(Object o) { public boolean equals(Object o) {
if (this == o) return true; if (this == o) return true;
@ -147,6 +125,6 @@ public final class InetSocketTransportAddress implements TransportAddress {
@Override @Override
public String toString() { public String toString() {
return "inet[" + address + "]"; return NetworkAddress.format(address);
} }
} }

View File

@ -187,7 +187,7 @@ public abstract class ExtensionPoint {
protected final void bindExtensions(Binder binder) { protected final void bindExtensions(Binder binder) {
Multibinder<T> allocationMultibinder = Multibinder.newSetBinder(binder, extensionClass); Multibinder<T> allocationMultibinder = Multibinder.newSetBinder(binder, extensionClass);
for (Class<? extends T> clazz : extensions) { for (Class<? extends T> clazz : extensions) {
allocationMultibinder.addBinding().to(clazz); allocationMultibinder.addBinding().to(clazz).asEagerSingleton();
} }
} }
} }

View File

@ -19,18 +19,20 @@
package org.elasticsearch.discovery; package org.elasticsearch.discovery;
import com.google.common.collect.Lists;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.local.LocalDiscovery; import org.elasticsearch.discovery.local.LocalDiscovery;
import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider; import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import java.util.ArrayList;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -44,7 +46,8 @@ public class DiscoveryModule extends AbstractModule {
public static final String ZEN_MASTER_SERVICE_TYPE_KEY = "discovery.zen.masterservice.type"; public static final String ZEN_MASTER_SERVICE_TYPE_KEY = "discovery.zen.masterservice.type";
private final Settings settings; private final Settings settings;
private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = Lists.newArrayList(); private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = new ArrayList<>();
private final ExtensionPoint.ClassSet<ZenPing> zenPings = new ExtensionPoint.ClassSet<>("zen_ping", ZenPing.class);
private final Map<String, Class<? extends Discovery>> discoveryTypes = new HashMap<>(); private final Map<String, Class<? extends Discovery>> discoveryTypes = new HashMap<>();
private final Map<String, Class<? extends ElectMasterService>> masterServiceType = new HashMap<>(); private final Map<String, Class<? extends ElectMasterService>> masterServiceType = new HashMap<>();
@ -53,6 +56,8 @@ public class DiscoveryModule extends AbstractModule {
addDiscoveryType("local", LocalDiscovery.class); addDiscoveryType("local", LocalDiscovery.class);
addDiscoveryType("zen", ZenDiscovery.class); addDiscoveryType("zen", ZenDiscovery.class);
addElectMasterService("zen", ElectMasterService.class); addElectMasterService("zen", ElectMasterService.class);
// always add the unicast hosts, or things get angry!
addZenPing(UnicastZenPing.class);
} }
/** /**
@ -82,6 +87,10 @@ public class DiscoveryModule extends AbstractModule {
this.masterServiceType.put(type, masterService); this.masterServiceType.put(type, masterService);
} }
public void addZenPing(Class<? extends ZenPing> clazz) {
zenPings.registerExtension(clazz);
}
@Override @Override
protected void configure() { protected void configure() {
String defaultType = DiscoveryNode.localNode(settings) ? "local" : "zen"; String defaultType = DiscoveryNode.localNode(settings) ? "local" : "zen";
@ -107,6 +116,7 @@ public class DiscoveryModule extends AbstractModule {
for (Class<? extends UnicastHostsProvider> unicastHostProvider : unicastHostProviders) { for (Class<? extends UnicastHostsProvider> unicastHostProvider : unicastHostProviders) {
unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider); unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider);
} }
zenPings.bind(binder());
} }
bind(Discovery.class).to(discoveryClass).asEagerSingleton(); bind(Discovery.class).to(discoveryClass).asEagerSingleton();
bind(DiscoveryService.class).asEagerSingleton(); bind(DiscoveryService.class).asEagerSingleton();

View File

@ -19,21 +19,11 @@
package org.elasticsearch.discovery.zen.ping; package org.elasticsearch.discovery.zen.ping;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.multicast.MulticastZenPing;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
@ -43,28 +33,17 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
/**
*
*/
public class ZenPingService extends AbstractLifecycleComponent<ZenPing> implements ZenPing { public class ZenPingService extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
private volatile List<? extends ZenPing> zenPings = Collections.emptyList(); private List<ZenPing> zenPings = Collections.emptyList();
@Inject @Inject
public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService, public ZenPingService(Settings settings, Set<ZenPing> zenPings) {
Version version, ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
super(settings); super(settings);
List<ZenPing> zenPingsBuilder = new ArrayList<>(); this.zenPings = Collections.unmodifiableList(new ArrayList<>(zenPings));
if (this.settings.getAsBoolean("discovery.zen.ping.multicast.enabled", true)) {
zenPingsBuilder.add(new MulticastZenPing(settings, threadPool, transportService, clusterName, networkService, version));
}
// always add the unicast hosts, so it will be able to receive unicast requests even when working in multicast
zenPingsBuilder.add(new UnicastZenPing(settings, threadPool, transportService, clusterName, version, electMasterService, unicastHostsProviders));
this.zenPings = Collections.unmodifiableList(zenPingsBuilder);
} }
public List<? extends ZenPing> zenPings() { public List<ZenPing> zenPings() {
return this.zenPings; return this.zenPings;
} }

View File

@ -29,6 +29,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -63,8 +64,12 @@ import static org.elasticsearch.discovery.zen.ping.ZenPing.PingResponse.readPing
public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing { public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implements ZenPing {
public static final String ACTION_NAME = "internal:discovery/zen/unicast"; public static final String ACTION_NAME = "internal:discovery/zen/unicast";
public static final String DISCOVERY_ZEN_PING_UNICAST_HOSTS = "discovery.zen.ping.unicast.hosts";
// these limits are per-address
public static final int LIMIT_FOREIGN_PORTS_COUNT = 1;
public static final int LIMIT_LOCAL_PORTS_COUNT = 5;
public static final int LIMIT_PORTS_COUNT = 1;
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final TransportService transportService; private final TransportService transportService;
@ -96,6 +101,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
private volatile boolean closed = false; private volatile boolean closed = false;
@Inject
public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName,
Version version, ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) { Version version, ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
super(settings); super(settings);
@ -111,21 +117,30 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
} }
this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10); this.concurrentConnects = this.settings.getAsInt("discovery.zen.ping.unicast.concurrent_connects", 10);
String[] hostArr = this.settings.getAsArray("discovery.zen.ping.unicast.hosts"); String[] hostArr = this.settings.getAsArray(DISCOVERY_ZEN_PING_UNICAST_HOSTS);
// trim the hosts // trim the hosts
for (int i = 0; i < hostArr.length; i++) { for (int i = 0; i < hostArr.length; i++) {
hostArr[i] = hostArr[i].trim(); hostArr[i] = hostArr[i].trim();
} }
List<String> hosts = Lists.newArrayList(hostArr); List<String> hosts = Lists.newArrayList(hostArr);
final int limitPortCounts;
if (hosts.isEmpty()) {
// if unicast hosts are not specified, fill with simple defaults on the local machine
limitPortCounts = LIMIT_LOCAL_PORTS_COUNT;
hosts.addAll(transportService.getLocalAddresses());
} else {
// we only limit to 1 addresses, makes no sense to ping 100 ports
limitPortCounts = LIMIT_FOREIGN_PORTS_COUNT;
}
logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects); logger.debug("using initial hosts {}, with concurrent_connects [{}]", hosts, concurrentConnects);
List<DiscoveryNode> configuredTargetNodes = Lists.newArrayList(); List<DiscoveryNode> configuredTargetNodes = Lists.newArrayList();
for (String host : hosts) { for (String host : hosts) {
try { try {
TransportAddress[] addresses = transportService.addressesFromString(host); TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
// we only limit to 1 addresses, makes no sense to ping 100 ports for (TransportAddress address : addresses) {
for (int i = 0; (i < addresses.length && i < LIMIT_PORTS_COUNT); i++) { configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", address, version.minimumCompatibilityVersion()));
configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", addresses[i], version.minimumCompatibilityVersion()));
} }
} catch (Exception e) { } catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e); throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);

View File

@ -43,12 +43,10 @@ import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStream; import java.io.OutputStream;
import java.nio.file.DirectoryStream; import java.nio.file.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.regex.Matcher; import java.util.regex.Matcher;
@ -253,10 +251,9 @@ public abstract class MetaDataStateFormat<T> {
if (dataLocations != null) { // select all eligable files first if (dataLocations != null) { // select all eligable files first
for (Path dataLocation : dataLocations) { for (Path dataLocation : dataLocations) {
final Path stateDir = dataLocation.resolve(STATE_DIR_NAME); final Path stateDir = dataLocation.resolve(STATE_DIR_NAME);
if (!Files.isDirectory(stateDir)) {
continue;
}
// now, iterate over the current versions, and find latest one // now, iterate over the current versions, and find latest one
// we don't check if the stateDir is present since it could be deleted
// after the check. Also if there is a _state file and it's not a dir something is really wrong
try (DirectoryStream<Path> paths = Files.newDirectoryStream(stateDir)) { // we don't pass a glob since we need the group part for parsing try (DirectoryStream<Path> paths = Files.newDirectoryStream(stateDir)) { // we don't pass a glob since we need the group part for parsing
for (Path stateFile : paths) { for (Path stateFile : paths) {
final Matcher matcher = stateFilePattern.matcher(stateFile.getFileName().toString()); final Matcher matcher = stateFilePattern.matcher(stateFile.getFileName().toString());
@ -270,6 +267,8 @@ public abstract class MetaDataStateFormat<T> {
files.add(pav); files.add(pav);
} }
} }
} catch (NoSuchFileException | FileNotFoundException ex) {
// no _state directory -- move on
} }
} }
} }

View File

@ -94,12 +94,12 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
DiscoveryNode node = nodesToAllocate.yesNodes.get(0); DiscoveryNode node = nodesToAllocate.yesNodes.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
changed = true; changed = true;
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion); unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else if (nodesToAllocate.throttleNodes.isEmpty() == true && nodesToAllocate.noNodes.isEmpty() == false) { } else if (nodesToAllocate.throttleNodes.isEmpty() == true && nodesToAllocate.noNodes.isEmpty() == false) {
DiscoveryNode node = nodesToAllocate.noNodes.get(0); DiscoveryNode node = nodesToAllocate.noNodes.get(0);
logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); logger.debug("[{}][{}]: forcing allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node);
changed = true; changed = true;
unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion); unassignedIterator.initialize(node.id(), nodesAndVersions.highestVersion, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else { } else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now // we are throttling this, but we have enough to allocate to this node, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodes); logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodes);

View File

@ -169,7 +169,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node()); logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we found a match // we found a match
changed = true; changed = true;
unassignedIterator.initialize(nodeWithHighestMatch.nodeId()); unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), shard.version(), allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE));
} }
} else if (matchingNodes.hasAnyData() == false) { } else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation // if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation

View File

@ -20,6 +20,7 @@
package org.elasticsearch.http; package org.elasticsearch.http;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import com.google.common.io.ByteStreams;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
@ -30,6 +31,7 @@ import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.rest.*; import org.elasticsearch.rest.*;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import java.nio.file.*; import java.nio.file.*;
import java.nio.file.attribute.BasicFileAttributes; import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashMap; import java.util.HashMap;
@ -114,10 +116,14 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
} }
public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) { public void internalDispatchRequest(final HttpRequest request, final HttpChannel channel) {
if (request.rawPath().startsWith("/_plugin/")) { String rawPath = request.rawPath();
if (rawPath.startsWith("/_plugin/")) {
RestFilterChain filterChain = restController.filterChain(pluginSiteFilter); RestFilterChain filterChain = restController.filterChain(pluginSiteFilter);
filterChain.continueProcessing(request, channel); filterChain.continueProcessing(request, channel);
return; return;
} else if (rawPath.equals("/favicon.ico")) {
handleFavicon(request, channel);
return;
} }
restController.dispatchRequest(request, channel); restController.dispatchRequest(request, channel);
} }
@ -131,6 +137,22 @@ public class HttpServer extends AbstractLifecycleComponent<HttpServer> {
} }
} }
void handleFavicon(HttpRequest request, HttpChannel channel) {
if (request.method() == RestRequest.Method.GET) {
try {
try (InputStream stream = getClass().getResourceAsStream("/config/favicon.ico")) {
byte[] content = ByteStreams.toByteArray(stream);
BytesRestResponse restResponse = new BytesRestResponse(RestStatus.OK, "image/x-icon", content);
channel.sendResponse(restResponse);
}
} catch (IOException e) {
channel.sendResponse(new BytesRestResponse(INTERNAL_SERVER_ERROR));
}
} else {
channel.sendResponse(new BytesRestResponse(FORBIDDEN));
}
}
void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException { void handlePluginSite(HttpRequest request, HttpChannel channel) throws IOException {
if (disableSites) { if (disableSites) {
channel.sendResponse(new BytesRestResponse(FORBIDDEN)); channel.sendResponse(new BytesRestResponse(FORBIDDEN));

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -274,7 +275,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
private void bindAddress(final InetAddress hostAddress) { private void bindAddress(final InetAddress hostAddress) {
PortsRange portsRange = new PortsRange(port); PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>(); final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<SocketAddress> boundSocket = new AtomicReference<>(); final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() { boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override @Override
public boolean onPortNumber(int portNumber) { public boolean onPortNumber(int portNumber) {
@ -282,7 +283,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
synchronized (serverChannels) { synchronized (serverChannels) {
Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber)); Channel channel = serverBootstrap.bind(new InetSocketAddress(hostAddress, portNumber));
serverChannels.add(channel); serverChannels.add(channel);
boundSocket.set(channel.getLocalAddress()); boundSocket.set((InetSocketAddress) channel.getLocalAddress());
} }
} catch (Exception e) { } catch (Exception e) {
lastException.set(e); lastException.set(e);
@ -294,7 +295,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (!success) { if (!success) {
throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get()); throw new BindHttpException("Failed to bind to [" + port + "]", lastException.get());
} }
logger.info("Bound http to address [{}]", boundSocket.get()); logger.info("Bound http to address {{}}", NetworkAddress.format(boundSocket.get()));
} }
@Override @Override

View File

@ -26,6 +26,7 @@ import com.google.common.collect.Iterators;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
@ -55,7 +56,6 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.InternalIndicesLifecycle;
import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.ShardsPluginsModule;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -270,7 +270,8 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} }
public synchronized IndexShard createShard(int sShardId, boolean primary) { public synchronized IndexShard createShard(int sShardId, ShardRouting routing) {
final boolean primary = routing.primary();
/* /*
* TODO: we execute this in parallel but it's a synced method. Yet, we might * TODO: we execute this in parallel but it's a synced method. Yet, we might
* be able to serialize the execution via the cluster state in the future. for now we just * be able to serialize the execution via the cluster state in the future. for now we just
@ -299,7 +300,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} }
if (path == null) { if (path == null) {
path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, getAvgShardSizeInBytes(), this); path = ShardPath.selectNewPathForShard(nodeEnv, shardId, indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), this);
logger.debug("{} creating using a new path [{}]", shardId, path); logger.debug("{} creating using a new path [{}]", shardId, path);
} else { } else {
logger.debug("{} creating using an existing path [{}]", shardId, path); logger.debug("{} creating using an existing path [{}]", shardId, path);
@ -315,7 +316,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
ModulesBuilder modules = new ModulesBuilder(); ModulesBuilder modules = new ModulesBuilder();
modules.add(new ShardsPluginsModule(indexSettings, pluginsService)); // plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.shardModules(indexSettings)) {
modules.add(pluginModule);
}
modules.add(new IndexShardModule(shardId, primary, indexSettings)); modules.add(new IndexShardModule(shardId, primary, indexSettings));
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock, modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() { new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
@ -325,6 +329,9 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
}), path)); }), path));
modules.add(new DeletionPolicyModule()); modules.add(new DeletionPolicyModule());
pluginsService.processModules(modules);
try { try {
shardInjector = modules.createChildInjector(injector); shardInjector = modules.createChildInjector(injector);
} catch (CreationException e) { } catch (CreationException e) {

View File

@ -261,6 +261,9 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
if (mapper.type().length() == 0) { if (mapper.type().length() == 0) {
throw new InvalidTypeNameException("mapping type name is empty"); throw new InvalidTypeNameException("mapping type name is empty");
} }
if (Version.indexCreated(indexSettings).onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
}
if (mapper.type().charAt(0) == '_') { if (mapper.type().charAt(0) == '_') {
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
} }

View File

@ -78,8 +78,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
private String minimumShouldMatch; private String minimumShouldMatch;
private String rewrite = null;
private String fuzzyRewrite = null; private String fuzzyRewrite = null;
private Boolean lenient; private Boolean lenient;
@ -179,11 +177,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
return this; return this;
} }
public MatchQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
public MatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) { public MatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
this.fuzzyRewrite = fuzzyRewrite; this.fuzzyRewrite = fuzzyRewrite;
return this; return this;
@ -249,9 +242,6 @@ public class MatchQueryBuilder extends QueryBuilder implements BoostableQueryBui
if (minimumShouldMatch != null) { if (minimumShouldMatch != null) {
builder.field("minimum_should_match", minimumShouldMatch); builder.field("minimum_should_match", minimumShouldMatch);
} }
if (rewrite != null) {
builder.field("rewrite", rewrite);
}
if (fuzzyRewrite != null) { if (fuzzyRewrite != null) {
builder.field("fuzzy_rewrite", fuzzyRewrite); builder.field("fuzzy_rewrite", fuzzyRewrite);
} }

View File

@ -61,8 +61,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
private String minimumShouldMatch; private String minimumShouldMatch;
private String rewrite = null;
private String fuzzyRewrite = null; private String fuzzyRewrite = null;
private Boolean useDisMax; private Boolean useDisMax;
@ -255,11 +253,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
return this; return this;
} }
public MultiMatchQueryBuilder rewrite(String rewrite) {
this.rewrite = rewrite;
return this;
}
public MultiMatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) { public MultiMatchQueryBuilder fuzzyRewrite(String fuzzyRewrite) {
this.fuzzyRewrite = fuzzyRewrite; this.fuzzyRewrite = fuzzyRewrite;
return this; return this;
@ -367,9 +360,6 @@ public class MultiMatchQueryBuilder extends QueryBuilder implements BoostableQue
if (minimumShouldMatch != null) { if (minimumShouldMatch != null) {
builder.field("minimum_should_match", minimumShouldMatch); builder.field("minimum_should_match", minimumShouldMatch);
} }
if (rewrite != null) {
builder.field("rewrite", rewrite);
}
if (fuzzyRewrite != null) { if (fuzzyRewrite != null) {
builder.field("fuzzy_rewrite", fuzzyRewrite); builder.field("fuzzy_rewrite", fuzzyRewrite);
} }

View File

@ -68,7 +68,6 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
private Locale locale; private Locale locale;
private float boost = -1; private float boost = -1;
private Fuzziness fuzziness; private Fuzziness fuzziness;
@ -99,6 +98,8 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
/** To limit effort spent determinizing regexp queries. */ /** To limit effort spent determinizing regexp queries. */
private Integer maxDeterminizedStates; private Integer maxDeterminizedStates;
private Boolean escape;
public QueryStringQueryBuilder(String queryString) { public QueryStringQueryBuilder(String queryString) {
this.queryString = queryString; this.queryString = queryString;
} }
@ -159,11 +160,11 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
/** /**
* Sets the boolean operator of the query parser used to parse the query string. * Sets the boolean operator of the query parser used to parse the query string.
* <p/> * <p/>
* <p>In default mode ({@link FieldQueryBuilder.Operator#OR}) terms without any modifiers * <p>In default mode ({@link Operator#OR}) terms without any modifiers
* are considered optional: for example <code>capital of Hungary</code> is equal to * are considered optional: for example <code>capital of Hungary</code> is equal to
* <code>capital OR of OR Hungary</code>. * <code>capital OR of OR Hungary</code>.
* <p/> * <p/>
* <p>In {@link FieldQueryBuilder.Operator#AND} mode terms are considered to be in conjunction: the * <p>In {@link Operator#AND} mode terms are considered to be in conjunction: the
* above mentioned query is parsed as <code>capital AND of AND Hungary</code> * above mentioned query is parsed as <code>capital AND of AND Hungary</code>
*/ */
public QueryStringQueryBuilder defaultOperator(Operator defaultOperator) { public QueryStringQueryBuilder defaultOperator(Operator defaultOperator) {
@ -342,6 +343,14 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
return this; return this;
} }
/**
* Set to <tt>true</tt> to enable escaping of the query string
*/
public QueryStringQueryBuilder escape(boolean escape) {
this.escape = escape;
return this;
}
@Override @Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException { protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(QueryStringQueryParser.NAME); builder.startObject(QueryStringQueryParser.NAME);
@ -431,6 +440,9 @@ public class QueryStringQueryBuilder extends QueryBuilder implements BoostableQu
if (timeZone != null) { if (timeZone != null) {
builder.field("time_zone", timeZone); builder.field("time_zone", timeZone);
} }
if (escape != null) {
builder.field("escape", escape);
}
builder.endObject(); builder.endObject();
} }
} }

View File

@ -21,8 +21,15 @@ package org.elasticsearch.index.shard;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.CheckIndex;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.MatchAllDocsQuery;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
@ -162,8 +169,8 @@ public class IndexShard extends AbstractIndexShardComponent {
private TimeValue refreshInterval; private TimeValue refreshInterval;
private volatile ScheduledFuture refreshScheduledFuture; private volatile ScheduledFuture<?> refreshScheduledFuture;
private volatile ScheduledFuture mergeScheduleFuture; private volatile ScheduledFuture<?> mergeScheduleFuture;
protected volatile ShardRouting shardRouting; protected volatile ShardRouting shardRouting;
protected volatile IndexShardState state; protected volatile IndexShardState state;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>(); protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
@ -252,7 +259,42 @@ public class IndexShard extends AbstractIndexShardComponent {
if (indexSettings.getAsBoolean(IndexCacheModule.QUERY_CACHE_EVERYTHING, false)) { if (indexSettings.getAsBoolean(IndexCacheModule.QUERY_CACHE_EVERYTHING, false)) {
cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE; cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE;
} else { } else {
cachingPolicy = new UsageTrackingQueryCachingPolicy(); assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1;
// TODO: remove this hack in Lucene 5.4, use UsageTrackingQueryCachingPolicy directly
// See https://issues.apache.org/jira/browse/LUCENE-6748
// cachingPolicy = new UsageTrackingQueryCachingPolicy();
final QueryCachingPolicy wrapped = new UsageTrackingQueryCachingPolicy();
cachingPolicy = new QueryCachingPolicy() {
@Override
public boolean shouldCache(Query query, LeafReaderContext context) throws IOException {
if (query instanceof MatchAllDocsQuery
// MatchNoDocsQuery currently rewrites to a BooleanQuery,
// but who knows, it might get its own Weight one day
|| query instanceof MatchNoDocsQuery) {
return false;
}
if (query instanceof BooleanQuery) {
BooleanQuery bq = (BooleanQuery) query;
if (bq.clauses().isEmpty()) {
return false;
}
}
if (query instanceof DisjunctionMaxQuery) {
DisjunctionMaxQuery dmq = (DisjunctionMaxQuery) query;
if (dmq.getDisjuncts().isEmpty()) {
return false;
}
}
return wrapped.shouldCache(query, context);
}
@Override
public void onUse(Query query) {
wrapped.onUse(query);
}
};
} }
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy); this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);

View File

@ -20,7 +20,12 @@
package org.elasticsearch.indices; package org.elasticsearch.indices;
import com.google.common.base.Function; import com.google.common.base.Function;
import com.google.common.collect.*; import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
@ -35,7 +40,12 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.*; import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Injectors;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -43,7 +53,12 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLock;
import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.*; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNameModule;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.LocalNodeIdModule;
import org.elasticsearch.index.aliases.IndexAliasesServiceModule; import org.elasticsearch.index.aliases.IndexAliasesServiceModule;
import org.elasticsearch.index.analysis.AnalysisModule; import org.elasticsearch.index.analysis.AnalysisModule;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
@ -71,13 +86,16 @@ import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.IndexStoreModule;
import org.elasticsearch.indices.analysis.IndicesAnalysisService; import org.elasticsearch.indices.analysis.IndicesAnalysisService;
import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.plugins.IndexPluginsModule;
import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.PluginsService;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
import java.util.*; import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
@ -306,7 +324,10 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
modules.add(new IndexNameModule(index)); modules.add(new IndexNameModule(index));
modules.add(new LocalNodeIdModule(localNodeId)); modules.add(new LocalNodeIdModule(localNodeId));
modules.add(new IndexSettingsModule(index, indexSettings)); modules.add(new IndexSettingsModule(index, indexSettings));
modules.add(new IndexPluginsModule(indexSettings, pluginsService)); // plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.indexModules(indexSettings)) {
modules.add(pluginModule);
}
modules.add(new IndexStoreModule(indexSettings)); modules.add(new IndexStoreModule(indexSettings));
modules.add(new AnalysisModule(indexSettings, indicesAnalysisService)); modules.add(new AnalysisModule(indexSettings, indicesAnalysisService));
modules.add(new SimilarityModule(indexSettings)); modules.add(new SimilarityModule(indexSettings));
@ -316,6 +337,8 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
modules.add(new IndexAliasesServiceModule()); modules.add(new IndexAliasesServiceModule());
modules.add(new IndexModule(indexSettings)); modules.add(new IndexModule(indexSettings));
pluginsService.processModules(modules);
Injector indexInjector; Injector indexInjector;
try { try {
indexInjector = modules.createChildInjector(injector); indexInjector = modules.createChildInjector(injector);

View File

@ -638,7 +638,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}][{}] creating shard", shardRouting.index(), shardId); logger.debug("[{}][{}] creating shard", shardRouting.index(), shardId);
} }
IndexShard indexShard = indexService.createShard(shardId, shardRouting.primary()); IndexShard indexShard = indexService.createShard(shardId, shardRouting);
indexShard.updateRoutingEntry(shardRouting, state.blocks().disableStatePersistence() == false); indexShard.updateRoutingEntry(shardRouting, state.blocks().disableStatePersistence() == false);
indexShard.addFailedEngineListener(failedEngineHandler); indexShard.addFailedEngineListener(failedEngineHandler);
} catch (IndexShardAlreadyExistsException e) { } catch (IndexShardAlreadyExistsException e) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.monitor.process; package org.elasticsearch.monitor.process;
import org.elasticsearch.bootstrap.Bootstrap; import org.elasticsearch.bootstrap.BootstrapInfo;
import java.lang.management.ManagementFactory; import java.lang.management.ManagementFactory;
import java.lang.management.OperatingSystemMXBean; import java.lang.management.OperatingSystemMXBean;
@ -136,7 +136,7 @@ public class ProcessProbe {
} }
public ProcessInfo processInfo() { public ProcessInfo processInfo() {
return new ProcessInfo(jvmInfo().pid(), Bootstrap.isMemoryLocked()); return new ProcessInfo(jvmInfo().pid(), BootstrapInfo.isMemoryLocked());
} }
public ProcessStats processStats() { public ProcessStats processStats() {

View File

@ -35,6 +35,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.component.Lifecycle;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lease.Releasables;
@ -159,7 +160,11 @@ public class Node implements Releasable {
ModulesBuilder modules = new ModulesBuilder(); ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version)); modules.add(new Version.Module(version));
modules.add(new CircuitBreakerModule(settings)); modules.add(new CircuitBreakerModule(settings));
modules.add(new PluginsModule(settings, pluginsService)); // plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(settings)); modules.add(new SettingsModule(settings));
modules.add(new NodeModule(this)); modules.add(new NodeModule(this));
modules.add(new NetworkModule()); modules.add(new NetworkModule());
@ -187,6 +192,9 @@ public class Node implements Releasable {
modules.add(new RepositoriesModule()); modules.add(new RepositoriesModule());
modules.add(new TribeModule()); modules.add(new TribeModule());
pluginsService.processModules(modules);
injector = modules.createInjector(); injector = modules.createInjector();
client = injector.getInstance(Client.class); client = injector.getInstance(Client.class);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.node.internal; package org.elasticsearch.node.internal;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.collect.Sets;
import com.google.common.collect.UnmodifiableIterator; import com.google.common.collect.UnmodifiableIterator;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Booleans;
@ -40,6 +41,7 @@ import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.ThreadLocalRandom;
import static org.elasticsearch.common.Strings.cleanPath; import static org.elasticsearch.common.Strings.cleanPath;
@ -113,12 +115,20 @@ public class InternalSettingsPreparer {
} }
} }
if (loadFromEnv) { if (loadFromEnv) {
boolean settingsFileFound = false;
Set<String> foundSuffixes = Sets.newHashSet();
for (String allowedSuffix : ALLOWED_SUFFIXES) { for (String allowedSuffix : ALLOWED_SUFFIXES) {
try { Path path = environment.configFile().resolve("elasticsearch" + allowedSuffix);
settingsBuilder.loadFromPath(environment.configFile().resolve("elasticsearch" + allowedSuffix)); if (Files.exists(path)) {
} catch (SettingsException e) { if (!settingsFileFound) {
// ignore settingsBuilder.loadFromPath(path);
} }
settingsFileFound = true;
foundSuffixes.add(allowedSuffix);
}
}
if (foundSuffixes.size() > 1) {
throw new SettingsException("multiple settings files found with suffixes: " + Strings.collectionToDelimitedString(foundSuffixes, ","));
} }
} }
} }

View File

@ -26,7 +26,6 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector; import org.apache.lucene.search.Collector;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort; import org.apache.lucene.search.Sort;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.Counter; import org.apache.lucene.util.Counter;
@ -52,7 +51,6 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchHitField;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.aggregations.SearchContextAggregations;
@ -67,6 +65,7 @@ import org.elasticsearch.search.highlight.SearchContextHighlight;
import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.InternalSearchHitField; import org.elasticsearch.search.internal.InternalSearchHitField;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.LeafSearchLookup;
@ -347,12 +346,12 @@ public class PercolateContext extends SearchContext {
} }
@Override @Override
public Scroll scroll() { public ScrollContext scrollContext() {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@Override @Override
public SearchContext scroll(Scroll scroll) { public SearchContext scrollContext(ScrollContext scroll) {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@ -620,16 +619,6 @@ public class PercolateContext extends SearchContext {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }
@Override
public void lastEmittedDoc(ScoreDoc doc) {
throw new UnsupportedOperationException();
}
@Override
public ScoreDoc lastEmittedDoc() {
throw new UnsupportedOperationException();
}
@Override @Override
public DfsSearchResult dfsResult() { public DfsSearchResult dfsResult() {
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();

View File

@ -86,7 +86,11 @@ public class PluginInfo implements Streamable, ToXContent {
try (InputStream stream = Files.newInputStream(descriptor)) { try (InputStream stream = Files.newInputStream(descriptor)) {
props.load(stream); props.load(stream);
} }
String name = dir.getFileName().toString(); String name = props.getProperty("name");
if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]");
}
PluginManager.checkForForbiddenName(name);
String description = props.getProperty("description"); String description = props.getProperty("description");
if (description == null) { if (description == null) {
throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]"); throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]");
@ -95,6 +99,7 @@ public class PluginInfo implements Streamable, ToXContent {
if (version == null) { if (version == null) {
throw new IllegalArgumentException("Property [version] is missing for plugin [" + name + "]"); throw new IllegalArgumentException("Property [version] is missing for plugin [" + name + "]");
} }
boolean jvm = Boolean.parseBoolean(props.getProperty("jvm")); boolean jvm = Boolean.parseBoolean(props.getProperty("jvm"));
boolean site = Boolean.parseBoolean(props.getProperty("site")); boolean site = Boolean.parseBoolean(props.getProperty("site"));
if (jvm == false && site == false) { if (jvm == false && site == false) {
@ -115,6 +120,7 @@ public class PluginInfo implements Streamable, ToXContent {
if (javaVersionString == null) { if (javaVersionString == null) {
throw new IllegalArgumentException("Property [java.version] is missing for jvm plugin [" + name + "]"); throw new IllegalArgumentException("Property [java.version] is missing for jvm plugin [" + name + "]");
} }
JarHell.checkVersionFormat(javaVersionString);
JarHell.checkJavaVersion(name, javaVersionString); JarHell.checkJavaVersion(name, javaVersionString);
isolated = Boolean.parseBoolean(props.getProperty("isolated", "true")); isolated = Boolean.parseBoolean(props.getProperty("isolated", "true"));
classname = props.getProperty("classname"); classname = props.getProperty("classname");

View File

@ -84,6 +84,7 @@ public class PluginManager {
"cloud-azure", "cloud-azure",
"cloud-gce", "cloud-gce",
"delete-by-query", "delete-by-query",
"discovery-multicast",
"lang-javascript", "lang-javascript",
"lang-python", "lang-python",
"mapper-murmur3", "mapper-murmur3",
@ -91,11 +92,11 @@ public class PluginManager {
).build(); ).build();
private final Environment environment; private final Environment environment;
private String url; private URL url;
private OutputMode outputMode; private OutputMode outputMode;
private TimeValue timeout; private TimeValue timeout;
public PluginManager(Environment environment, String url, OutputMode outputMode, TimeValue timeout) { public PluginManager(Environment environment, URL url, OutputMode outputMode, TimeValue timeout) {
this.environment = environment; this.environment = environment;
this.url = url; this.url = url;
this.outputMode = outputMode; this.outputMode = outputMode;
@ -103,8 +104,8 @@ public class PluginManager {
} }
public void downloadAndExtract(String name, Terminal terminal) throws IOException { public void downloadAndExtract(String name, Terminal terminal) throws IOException {
if (name == null) { if (name == null && url == null) {
throw new IllegalArgumentException("plugin name must be supplied with install [name]."); throw new IllegalArgumentException("plugin name or url must be supplied with install.");
} }
if (!Files.exists(environment.pluginsFile())) { if (!Files.exists(environment.pluginsFile())) {
@ -116,8 +117,14 @@ public class PluginManager {
throw new IOException("plugin directory " + environment.pluginsFile() + " is read only"); throw new IOException("plugin directory " + environment.pluginsFile() + " is read only");
} }
PluginHandle pluginHandle = PluginHandle.parse(name); PluginHandle pluginHandle;
if (name != null) {
pluginHandle = PluginHandle.parse(name);
checkForForbiddenName(pluginHandle.name); checkForForbiddenName(pluginHandle.name);
} else {
// if we have no name but url, use temporary name that will be overwritten later
pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null);
}
Path pluginFile = download(pluginHandle, terminal); Path pluginFile = download(pluginHandle, terminal);
extract(pluginHandle, terminal, pluginFile); extract(pluginHandle, terminal, pluginFile);
@ -138,7 +145,7 @@ public class PluginManager {
// first, try directly from the URL provided // first, try directly from the URL provided
if (url != null) { if (url != null) {
URL pluginUrl = new URL(url); URL pluginUrl = url;
boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol()); boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo()); boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo());
if (isAuthInfoSet && !isSecureProcotol) { if (isAuthInfoSet && !isSecureProcotol) {
@ -204,10 +211,6 @@ public class PluginManager {
} }
private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile) throws IOException { private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile) throws IOException {
final Path extractLocation = pluginHandle.extractedDir(environment);
if (Files.exists(extractLocation)) {
throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command");
}
// unzip plugin to a staging temp dir, named for the plugin // unzip plugin to a staging temp dir, named for the plugin
Path tmp = Files.createTempDirectory(environment.tmpFile(), null); Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
@ -226,6 +229,13 @@ public class PluginManager {
jarHellCheck(root, info.isIsolated()); jarHellCheck(root, info.isIsolated());
} }
// update name in handle based on 'name' property found in descriptor file
pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user);
final Path extractLocation = pluginHandle.extractedDir(environment);
if (Files.exists(extractLocation)) {
throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command");
}
// install plugin // install plugin
FileSystemUtils.copyDirectoryRecursively(root, extractLocation); FileSystemUtils.copyDirectoryRecursively(root, extractLocation);
terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath()); terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());
@ -395,7 +405,7 @@ public class PluginManager {
} }
} }
private static void checkForForbiddenName(String name) { static void checkForForbiddenName(String name) {
if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) { if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) {
throw new IllegalArgumentException("Illegal plugin name: " + name); throw new IllegalArgumentException("Illegal plugin name: " + name);
} }

View File

@ -20,6 +20,7 @@
package org.elasticsearch.plugins; package org.elasticsearch.plugins;
import com.google.common.base.Strings; import com.google.common.base.Strings;
import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.cli.CliTool; import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig; import org.elasticsearch.common.cli.CliToolConfig;
@ -32,7 +33,8 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.PluginManager.OutputMode; import org.elasticsearch.plugins.PluginManager.OutputMode;
import java.io.IOException; import java.net.MalformedURLException;
import java.net.URL;
import java.util.Locale; import java.util.Locale;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd; import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
@ -166,19 +168,29 @@ public class PluginManagerCliParser extends CliTool {
private static final String NAME = "install"; private static final String NAME = "install";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Install.class) private static final CliToolConfig.Cmd CMD = cmd(NAME, Install.class)
.options(option("u", "url").required(false).hasArg(true))
.options(option("t", "timeout").required(false).hasArg(false)) .options(option("t", "timeout").required(false).hasArg(false))
.build(); .build();
static Command parse(Terminal terminal, CommandLine cli) { static Command parse(Terminal terminal, CommandLine cli) {
String[] args = cli.getArgs(); String[] args = cli.getArgs();
// install [plugin-name/url]
if ((args == null) || (args.length == 0)) { if ((args == null) || (args.length == 0)) {
return exitCmd(ExitStatus.USAGE, terminal, "plugin name is missing (type -h for help)"); return exitCmd(ExitStatus.USAGE, terminal, "plugin name or url is missing (type -h for help)");
}
String name = args[0];
URL optionalPluginUrl = null;
// try parsing cli argument as URL
try {
optionalPluginUrl = new URL(name);
name = null;
} catch (MalformedURLException e) {
// we tried to parse the cli argument as url and failed
// continue treating it as a symbolic plugin name like `analysis-icu` etc.
} }
String name = args[0];
TimeValue timeout = TimeValue.parseTimeValue(cli.getOptionValue("t"), DEFAULT_TIMEOUT, "cli"); TimeValue timeout = TimeValue.parseTimeValue(cli.getOptionValue("t"), DEFAULT_TIMEOUT, "cli");
String url = cli.getOptionValue("u");
OutputMode outputMode = OutputMode.DEFAULT; OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) { if (cli.hasOption("s")) {
@ -188,15 +200,15 @@ public class PluginManagerCliParser extends CliTool {
outputMode = OutputMode.VERBOSE; outputMode = OutputMode.VERBOSE;
} }
return new Install(terminal, name, outputMode, url, timeout); return new Install(terminal, name, outputMode, optionalPluginUrl, timeout);
} }
final String name; final String name;
private OutputMode outputMode; private OutputMode outputMode;
final String url; final URL url;
final TimeValue timeout; final TimeValue timeout;
Install(Terminal terminal, String name, OutputMode outputMode, String url, TimeValue timeout) { Install(Terminal terminal, String name, OutputMode outputMode, URL url, TimeValue timeout) {
super(terminal); super(terminal);
this.name = name; this.name = name;
this.outputMode = outputMode; this.outputMode = outputMode;
@ -207,7 +219,11 @@ public class PluginManagerCliParser extends CliTool {
@Override @Override
public ExitStatus execute(Settings settings, Environment env) throws Exception { public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, url, outputMode, timeout); PluginManager pluginManager = new PluginManager(env, url, outputMode, timeout);
if (name != null) {
terminal.println("-> Installing " + Strings.nullToEmpty(name) + "..."); terminal.println("-> Installing " + Strings.nullToEmpty(name) + "...");
} else {
terminal.println("-> Installing from " + url + "...");
}
pluginManager.downloadAndExtract(name, terminal); pluginManager.downloadAndExtract(name, terminal);
return ExitStatus.OK; return ExitStatus.OK;
} }

View File

@ -20,35 +20,15 @@
package org.elasticsearch.plugins; package org.elasticsearch.plugins;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.PreProcessModule;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.common.settings.Settings;
/** public class PluginsModule extends AbstractModule {
*
*/
public class PluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
private final Settings settings;
private final PluginsService pluginsService; private final PluginsService pluginsService;
public PluginsModule(Settings settings, PluginsService pluginsService) { public PluginsModule(PluginsService pluginsService) {
this.settings = settings;
this.pluginsService = pluginsService; this.pluginsService = pluginsService;
} }
@Override
public Iterable<? extends Module> spawnModules() {
return pluginsService.nodeModules();
}
@Override
public void processModule(Module module) {
pluginsService.processModule(module);
}
@Override @Override
protected void configure() { protected void configure() {
bind(PluginsService.class).toInstance(pluginsService); bind(PluginsService.class).toInstance(pluginsService);

View File

@ -21,9 +21,14 @@ package org.elasticsearch.plugins;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import org.apache.lucene.analysis.util.CharFilterFactory;
import org.apache.lucene.analysis.util.TokenFilterFactory;
import org.apache.lucene.analysis.util.TokenizerFactory;
import org.apache.lucene.codecs.Codec;
import org.apache.lucene.codecs.DocValuesFormat;
import org.apache.lucene.codecs.PostingsFormat;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.MapBuilder;
@ -106,7 +111,7 @@ public class PluginsService extends AbstractComponent {
List<Bundle> bundles = getPluginBundles(environment); List<Bundle> bundles = getPluginBundles(environment);
tupleBuilder.addAll(loadBundles(bundles)); tupleBuilder.addAll(loadBundles(bundles));
} catch (IOException ex) { } catch (IOException ex) {
throw new IllegalStateException(ex); throw new IllegalStateException("Unable to initialize plugins", ex);
} }
plugins = Collections.unmodifiableList(tupleBuilder); plugins = Collections.unmodifiableList(tupleBuilder);
@ -278,9 +283,10 @@ public class PluginsService extends AbstractComponent {
} }
static List<Bundle> getPluginBundles(Environment environment) throws IOException { static List<Bundle> getPluginBundles(Environment environment) throws IOException {
ESLogger logger = Loggers.getLogger(Bootstrap.class); ESLogger logger = Loggers.getLogger(PluginsService.class);
Path pluginsDirectory = environment.pluginsFile(); Path pluginsDirectory = environment.pluginsFile();
// TODO: remove this leniency, but tests bogusly rely on it
if (!isAccessibleDirectory(pluginsDirectory, logger)) { if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return Collections.emptyList(); return Collections.emptyList();
} }
@ -346,6 +352,8 @@ public class PluginsService extends AbstractComponent {
for (PluginInfo pluginInfo : bundle.plugins) { for (PluginInfo pluginInfo : bundle.plugins) {
final Plugin plugin; final Plugin plugin;
if (pluginInfo.isJvm()) { if (pluginInfo.isJvm()) {
// reload lucene SPI with any new services from the plugin
reloadLuceneSPI(loader);
plugin = loadPlugin(pluginInfo.getClassname(), settings, loader); plugin = loadPlugin(pluginInfo.getClassname(), settings, loader);
} else { } else {
plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription()); plugin = new SitePlugin(pluginInfo.getName(), pluginInfo.getDescription());
@ -357,6 +365,24 @@ public class PluginsService extends AbstractComponent {
return Collections.unmodifiableList(plugins); return Collections.unmodifiableList(plugins);
} }
/**
* Reloads all Lucene SPI implementations using the new classloader.
* This method must be called after the new classloader has been created to
* register the services for use.
*/
static void reloadLuceneSPI(ClassLoader loader) {
// do NOT change the order of these method calls!
// Codecs:
PostingsFormat.reloadPostingsFormats(loader);
DocValuesFormat.reloadDocValuesFormats(loader);
Codec.reloadCodecs(loader);
// Analysis:
CharFilterFactory.reloadCharFilters(loader);
TokenFilterFactory.reloadTokenFilters(loader);
TokenizerFactory.reloadTokenizers(loader);
}
private Plugin loadPlugin(String className, Settings settings, ClassLoader loader) { private Plugin loadPlugin(String className, Settings settings, ClassLoader loader) {
try { try {
Class<? extends Plugin> pluginClass = loader.loadClass(className).asSubclass(Plugin.class); Class<? extends Plugin> pluginClass = loader.loadClass(className).asSubclass(Plugin.class);

View File

@ -1,55 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.PreProcessModule;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.common.settings.Settings;
/**
*
*/
public class ShardsPluginsModule extends AbstractModule implements SpawnModules, PreProcessModule {
private final Settings settings;
private final PluginsService pluginsService;
public ShardsPluginsModule(Settings settings, PluginsService pluginsService) {
this.settings = settings;
this.pluginsService = pluginsService;
}
@Override
public Iterable<? extends Module> spawnModules() {
return pluginsService.shardModules(settings);
}
@Override
public void processModule(Module module) {
pluginsService.processModule(module);
}
@Override
protected void configure() {
}
}

View File

@ -20,14 +20,8 @@
package org.elasticsearch.repositories; package org.elasticsearch.repositories;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.Modules;
import org.elasticsearch.common.inject.SpawnModules;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import java.util.Arrays;
import java.util.Collections;
/** /**
* Binds repository classes for the specific repository type. * Binds repository classes for the specific repository type.
*/ */

View File

@ -156,7 +156,7 @@ public class URLRepository extends BlobStoreRepository {
logger.warn("cannot parse the specified url [{}]", url); logger.warn("cannot parse the specified url [{}]", url);
throw new RepositoryException(repositoryName, "cannot parse the specified url [" + url + "]"); throw new RepositoryException(repositoryName, "cannot parse the specified url [" + url + "]");
} }
// We didn't match white list - try to resolve against repo.path // We didn't match white list - try to resolve against path.repo
URL normalizedUrl = environment.resolveRepoURL(url); URL normalizedUrl = environment.resolveRepoURL(url);
if (normalizedUrl == null) { if (normalizedUrl == null) {
logger.warn("The specified url [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] or by repositories.url.allowed_urls setting: [{}] ", url, environment.repoFiles()); logger.warn("The specified url [{}] doesn't start with any repository paths specified by the path.repo setting: [{}] or by repositories.url.allowed_urls setting: [{}] ", url, environment.repoFiles());

View File

@ -25,6 +25,8 @@ import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -115,11 +117,20 @@ public class BytesRestResponse extends RestResponse {
return this.status; return this.status;
} }
private static final ESLogger SUPPRESSED_ERROR_LOGGER = ESLoggerFactory.getLogger("rest.suppressed");
private static XContentBuilder convert(RestChannel channel, RestStatus status, Throwable t) throws IOException { private static XContentBuilder convert(RestChannel channel, RestStatus status, Throwable t) throws IOException {
XContentBuilder builder = channel.newErrorBuilder().startObject(); XContentBuilder builder = channel.newErrorBuilder().startObject();
if (t == null) { if (t == null) {
builder.field("error", "unknown"); builder.field("error", "unknown");
} else if (channel.detailedErrorsEnabled()) { } else if (channel.detailedErrorsEnabled()) {
final ToXContent.Params params;
if (channel.request().paramAsBoolean("error_trace", !ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE_DEFAULT)) {
params = new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "false"), channel.request());
} else {
SUPPRESSED_ERROR_LOGGER.info("{} Params: {}", t, channel.request().path(), channel.request().params());
params = channel.request();
}
builder.field("error"); builder.field("error");
builder.startObject(); builder.startObject();
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t); final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
@ -127,16 +138,13 @@ public class BytesRestResponse extends RestResponse {
builder.startArray(); builder.startArray();
for (ElasticsearchException rootCause : rootCauses){ for (ElasticsearchException rootCause : rootCauses){
builder.startObject(); builder.startObject();
rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), channel.request())); rootCause.toXContent(builder, new ToXContent.DelegatingMapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_CAUSE, "true"), params));
builder.endObject(); builder.endObject();
} }
builder.endArray(); builder.endArray();
ElasticsearchException.toXContent(builder, channel.request(), t); ElasticsearchException.toXContent(builder, params, t);
builder.endObject(); builder.endObject();
if (channel.request().paramAsBoolean("error_trace", false)) {
buildErrorTrace(t, builder);
}
} else { } else {
builder.field("error", simpleMessage(t)); builder.field("error", simpleMessage(t));
} }
@ -145,45 +153,6 @@ public class BytesRestResponse extends RestResponse {
return builder; return builder;
} }
private static void buildErrorTrace(Throwable t, XContentBuilder builder) throws IOException {
builder.startObject("error_trace");
boolean first = true;
int counter = 0;
while (t != null) {
// bail if there are more than 10 levels, becomes useless really...
if (counter++ > 10) {
break;
}
if (!first) {
builder.startObject("cause");
}
buildThrowable(t, builder);
if (!first) {
builder.endObject();
}
t = t.getCause();
first = false;
}
builder.endObject();
}
private static void buildThrowable(Throwable t, XContentBuilder builder) throws IOException {
builder.field("message", t.getMessage());
for (StackTraceElement stElement : t.getStackTrace()) {
builder.startObject("at")
.field("class", stElement.getClassName())
.field("method", stElement.getMethodName());
if (stElement.getFileName() != null) {
builder.field("file", stElement.getFileName());
}
if (stElement.getLineNumber() >= 0) {
builder.field("line", stElement.getLineNumber());
}
builder.endObject();
}
}
/* /*
* Builds a simple error string from the message of the first ElasticsearchException * Builds a simple error string from the message of the first ElasticsearchException
*/ */

View File

@ -26,7 +26,6 @@ import com.google.common.collect.ImmutableMap;
import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
@ -54,7 +53,6 @@ import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.FieldDataType;
import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData;
@ -82,7 +80,6 @@ import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.Template; import org.elasticsearch.script.Template;
import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.script.mustache.MustacheScriptEngineService;
import org.elasticsearch.search.dfs.CachedDfSource;
import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.dfs.DfsPhase;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.*; import org.elasticsearch.search.fetch.*;
@ -266,6 +263,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
} }
} }
@Deprecated // remove in 3.0
public QuerySearchResult executeScan(ShardSearchRequest request) { public QuerySearchResult executeScan(ShardSearchRequest request) {
final SearchContext context = createAndPutContext(request); final SearchContext context = createAndPutContext(request);
final int originalSize = context.size(); final int originalSize = context.size();
@ -274,7 +272,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
throw new IllegalArgumentException("aggregations are not supported with search_type=scan"); throw new IllegalArgumentException("aggregations are not supported with search_type=scan");
} }
if (context.scroll() == null) { if (context.scrollContext() == null || context.scrollContext().scroll == null) {
throw new ElasticsearchException("Scroll must be provided when scanning..."); throw new ElasticsearchException("Scroll must be provided when scanning...");
} }
@ -322,7 +320,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try { try {
shortcutDocIdsToLoadForScanning(context); shortcutDocIdsToLoadForScanning(context);
fetchPhase.execute(context); fetchPhase.execute(context);
if (context.scroll() == null || context.fetchResult().hits().hits().length < context.size()) { if (context.scrollContext() == null || context.fetchResult().hits().hits().length < context.size()) {
freeContext(request.id()); freeContext(request.id());
} else { } else {
contextProcessedSuccessfully(context); contextProcessedSuccessfully(context);
@ -365,7 +363,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
loadOrExecuteQueryPhase(request, context, queryPhase); loadOrExecuteQueryPhase(request, context, queryPhase);
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) { if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
freeContext(context.id()); freeContext(context.id());
} else { } else {
contextProcessedSuccessfully(context); contextProcessedSuccessfully(context);
@ -412,23 +410,14 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public QuerySearchResult executeQueryPhase(QuerySearchRequest request) { public QuerySearchResult executeQueryPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id()); final SearchContext context = findContext(request.id());
contextProcessing(context); contextProcessing(context);
context.searcher().setAggregatedDfs(request.dfs());
IndexShard indexShard = context.indexShard(); IndexShard indexShard = context.indexShard();
try {
final IndexCache indexCache = indexShard.indexService().cache();
final QueryCachingPolicy cachingPolicy = indexShard.getQueryCachingPolicy();
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
indexCache.query(), cachingPolicy));
} catch (Throwable e) {
processFailure(context, e);
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
ShardSearchStats shardSearchStats = indexShard.searchService(); ShardSearchStats shardSearchStats = indexShard.searchService();
try { try {
shardSearchStats.onPreQueryPhase(context); shardSearchStats.onPreQueryPhase(context);
long time = System.nanoTime(); long time = System.nanoTime();
queryPhase.execute(context); queryPhase.execute(context);
if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scroll() == null) { if (context.queryResult().topDocs().scoreDocs.length == 0 && context.scrollContext() == null) {
// no hits, we can release the context since there will be no fetch phase // no hits, we can release the context since there will be no fetch phase
freeContext(context.id()); freeContext(context.id());
} else { } else {
@ -446,6 +435,16 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
} }
} }
private boolean fetchPhaseShouldFreeContext(SearchContext context) {
if (context.scrollContext() == null) {
// simple search, no scroll
return true;
} else {
// scroll request, but the scroll was not extended
return context.scrollContext().scroll == null;
}
}
public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) { public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) {
final SearchContext context = createAndPutContext(request); final SearchContext context = createAndPutContext(request);
contextProcessing(context); contextProcessing(context);
@ -465,7 +464,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try { try {
shortcutDocIdsToLoad(context); shortcutDocIdsToLoad(context);
fetchPhase.execute(context); fetchPhase.execute(context);
if (context.scroll() == null) { if (fetchPhaseShouldFreeContext(context)) {
freeContext(context.id()); freeContext(context.id());
} else { } else {
contextProcessedSuccessfully(context); contextProcessedSuccessfully(context);
@ -488,17 +487,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) { public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) {
final SearchContext context = findContext(request.id()); final SearchContext context = findContext(request.id());
contextProcessing(context); contextProcessing(context);
try { context.searcher().setAggregatedDfs(request.dfs());
final IndexShard indexShard = context.indexShard();
final IndexCache indexCache = indexShard.indexService().cache();
final QueryCachingPolicy cachingPolicy = indexShard.getQueryCachingPolicy();
context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(),
indexCache.query(), cachingPolicy));
} catch (Throwable e) {
freeContext(context.id());
cleanContext(context);
throw new QueryPhaseExecutionException(context, "Failed to set aggregated df", e);
}
try { try {
ShardSearchStats shardSearchStats = context.indexShard().searchService(); ShardSearchStats shardSearchStats = context.indexShard().searchService();
shardSearchStats.onPreQueryPhase(context); shardSearchStats.onPreQueryPhase(context);
@ -515,7 +504,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try { try {
shortcutDocIdsToLoad(context); shortcutDocIdsToLoad(context);
fetchPhase.execute(context); fetchPhase.execute(context);
if (context.scroll() == null) { if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id()); freeContext(request.id());
} else { } else {
contextProcessedSuccessfully(context); contextProcessedSuccessfully(context);
@ -555,7 +544,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
try { try {
shortcutDocIdsToLoad(context); shortcutDocIdsToLoad(context);
fetchPhase.execute(context); fetchPhase.execute(context);
if (context.scroll() == null) { if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id()); freeContext(request.id());
} else { } else {
contextProcessedSuccessfully(context); contextProcessedSuccessfully(context);
@ -581,13 +570,13 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
final ShardSearchStats shardSearchStats = context.indexShard().searchService(); final ShardSearchStats shardSearchStats = context.indexShard().searchService();
try { try {
if (request.lastEmittedDoc() != null) { if (request.lastEmittedDoc() != null) {
context.lastEmittedDoc(request.lastEmittedDoc()); context.scrollContext().lastEmittedDoc = request.lastEmittedDoc();
} }
context.docIdsToLoad(request.docIds(), 0, request.docIdsSize()); context.docIdsToLoad(request.docIds(), 0, request.docIdsSize());
shardSearchStats.onPreFetchPhase(context); shardSearchStats.onPreFetchPhase(context);
long time = System.nanoTime(); long time = System.nanoTime();
fetchPhase.execute(context); fetchPhase.execute(context);
if (context.scroll() == null) { if (fetchPhaseShouldFreeContext(context)) {
freeContext(request.id()); freeContext(request.id());
} else { } else {
contextProcessedSuccessfully(context); contextProcessedSuccessfully(context);
@ -642,7 +631,10 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout);
SearchContext.setCurrent(context); SearchContext.setCurrent(context);
try { try {
context.scroll(request.scroll()); if (request.scroll() != null) {
context.scrollContext(new ScrollContext());
context.scrollContext().scroll = request.scroll();
}
parseTemplate(request, context); parseTemplate(request, context);
parseSource(context, request.source()); parseSource(context, request.source());
@ -695,7 +687,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
if (context != null) { if (context != null) {
try { try {
context.indexShard().searchService().onFreeContext(context); context.indexShard().searchService().onFreeContext(context);
if (context.scroll() != null) { if (context.scrollContext() != null) {
context.indexShard().searchService().onFreeScrollContext(context); context.indexShard().searchService().onFreeScrollContext(context);
} }
} finally { } finally {
@ -708,7 +700,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
public void freeAllScrollContexts() { public void freeAllScrollContexts() {
for (SearchContext searchContext : activeContexts.values()) { for (SearchContext searchContext : activeContexts.values()) {
if (searchContext.scroll() != null) { if (searchContext.scrollContext() != null) {
freeContext(searchContext.id()); freeContext(searchContext.id());
} }
} }
@ -902,7 +894,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
private void processScroll(InternalScrollSearchRequest request, SearchContext context) { private void processScroll(InternalScrollSearchRequest request, SearchContext context) {
// process scroll // process scroll
context.from(context.from() + context.size()); context.from(context.from() + context.size());
context.scroll(request.scroll()); context.scrollContext().scroll = request.scroll();
// update the context keep alive based on the new scroll value // update the context keep alive based on the new scroll value
if (request.scroll() != null && request.scroll().keepAlive() != null) { if (request.scroll() != null && request.scroll().keepAlive() != null) {
context.keepAlive(request.scroll().keepAlive().millis()); context.keepAlive(request.scroll().keepAlive().millis());

View File

@ -418,6 +418,7 @@ public class SearchServiceTransportAction extends AbstractComponent {
} }
} }
@Deprecated // remove in 3.0
class SearchScanTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> { class SearchScanTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
@Override @Override
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {

View File

@ -31,7 +31,7 @@ import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory<ValuesSource> { final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory.LeafOnly<ValuesSource> {
private final long precisionThreshold; private final long precisionThreshold;

View File

@ -186,9 +186,8 @@ public class AggregationPath {
} }
public AggregationPath subPath(int offset, int length) { public AggregationPath subPath(int offset, int length) {
PathElement[] subTokens = new PathElement[length]; List<PathElement> subTokens = new ArrayList<>(pathElements.subList(offset, offset + length));
System.arraycopy(pathElements, offset, subTokens, 0, length); return new AggregationPath(subTokens);
return new AggregationPath(pathElements);
} }
/** /**

View File

@ -69,14 +69,14 @@ public class ValueFormat {
public static final DateTime DEFAULT = new DateTime(DateFieldMapper.Defaults.DATE_TIME_FORMATTER.format(), ValueFormatter.DateTime.DEFAULT, ValueParser.DateMath.DEFAULT); public static final DateTime DEFAULT = new DateTime(DateFieldMapper.Defaults.DATE_TIME_FORMATTER.format(), ValueFormatter.DateTime.DEFAULT, ValueParser.DateMath.DEFAULT);
public static DateTime format(String format, DateTimeZone timezone) { public static DateTime format(String format, DateTimeZone timezone) {
return new DateTime(format, new ValueFormatter.DateTime(format, timezone), new ValueParser.DateMath(format)); return new DateTime(format, new ValueFormatter.DateTime(format, timezone), new ValueParser.DateMath(format, timezone));
} }
public static DateTime mapper(DateFieldMapper.DateFieldType fieldType, DateTimeZone timezone) { public static DateTime mapper(DateFieldMapper.DateFieldType fieldType, DateTimeZone timezone) {
return new DateTime(fieldType.dateTimeFormatter().format(), ValueFormatter.DateTime.mapper(fieldType, timezone), ValueParser.DateMath.mapper(fieldType)); return new DateTime(fieldType.dateTimeFormatter().format(), ValueFormatter.DateTime.mapper(fieldType, timezone), ValueParser.DateMath.mapper(fieldType, timezone));
} }
public DateTime(String pattern, ValueFormatter formatter, ValueParser parser) { private DateTime(String pattern, ValueFormatter formatter, ValueParser parser) {
super(pattern, formatter, parser); super(pattern, formatter, parser);
} }

View File

@ -18,6 +18,7 @@
*/ */
package org.elasticsearch.search.aggregations.support.format; package org.elasticsearch.search.aggregations.support.format;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.joda.Joda;
@ -25,6 +26,7 @@ import org.elasticsearch.index.mapper.core.DateFieldMapper;
import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationExecutionException;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.joda.time.DateTimeZone;
import java.text.DecimalFormat; import java.text.DecimalFormat;
import java.text.DecimalFormatSymbols; import java.text.DecimalFormatSymbols;
@ -80,16 +82,21 @@ public interface ValueParser {
*/ */
static class DateMath implements ValueParser { static class DateMath implements ValueParser {
public static final DateMath DEFAULT = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER)); public static final DateMath DEFAULT = new ValueParser.DateMath(new DateMathParser(DateFieldMapper.Defaults.DATE_TIME_FORMATTER), DateTimeZone.UTC);
private DateMathParser parser; private DateMathParser parser;
public DateMath(String format) { private DateTimeZone timezone = DateTimeZone.UTC;
this(new DateMathParser(Joda.forPattern(format)));
public DateMath(String format, DateTimeZone timezone) {
this(new DateMathParser(Joda.forPattern(format)), timezone);
} }
public DateMath(DateMathParser parser) { public DateMath(DateMathParser parser, @Nullable DateTimeZone timeZone) {
this.parser = parser; this.parser = parser;
if (timeZone != null) {
this.timezone = timeZone;
}
} }
@Override @Override
@ -100,7 +107,7 @@ public interface ValueParser {
return searchContext.nowInMillis(); return searchContext.nowInMillis();
} }
}; };
return parser.parse(value, now); return parser.parse(value, now, false, timezone);
} }
@Override @Override
@ -108,8 +115,8 @@ public interface ValueParser {
return parseLong(value, searchContext); return parseLong(value, searchContext);
} }
public static DateMath mapper(DateFieldMapper.DateFieldType fieldType) { public static DateMath mapper(DateFieldMapper.DateFieldType fieldType, @Nullable DateTimeZone timezone) {
return new DateMath(new DateMathParser(fieldType.dateTimeFormatter())); return new DateMath(new DateMathParser(fieldType.dateTimeFormatter()), timezone);
} }
} }

View File

@ -1,97 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.dfs;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.*;
import org.apache.lucene.search.*;
import org.apache.lucene.search.similarities.Similarity;
import java.io.IOException;
import java.util.List;
/**
*
*/
public class CachedDfSource extends IndexSearcher {
private final AggregatedDfs aggregatedDfs;
private final int maxDoc;
public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity,
QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) throws IOException {
super(reader);
this.aggregatedDfs = aggregatedDfs;
setSimilarity(similarity);
setQueryCache(queryCache);
setQueryCachingPolicy(queryCachingPolicy);
if (aggregatedDfs.maxDoc() > Integer.MAX_VALUE) {
maxDoc = Integer.MAX_VALUE;
} else {
maxDoc = (int) aggregatedDfs.maxDoc();
}
}
@Override
public TermStatistics termStatistics(Term term, TermContext context) throws IOException {
TermStatistics termStatistics = aggregatedDfs.termStatistics().get(term);
if (termStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.termStatistics(term, context);
}
return termStatistics;
}
@Override
public CollectionStatistics collectionStatistics(String field) throws IOException {
CollectionStatistics collectionStatistics = aggregatedDfs.fieldStatistics().get(field);
if (collectionStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.collectionStatistics(field);
}
return collectionStatistics;
}
public int maxDoc() {
return this.maxDoc;
}
@Override
public Document doc(int i) {
throw new UnsupportedOperationException();
}
@Override
public void doc(int docID, StoredFieldVisitor fieldVisitor) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Explanation explain(Weight weight, int doc) {
throw new UnsupportedOperationException();
}
@Override
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
throw new UnsupportedOperationException();
}
}

View File

@ -20,15 +20,13 @@
package org.elasticsearch.search.internal; package org.elasticsearch.search.internal;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.search.Collector; import org.apache.lucene.index.Term;
import org.apache.lucene.search.Explanation; import org.apache.lucene.index.TermContext;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.*;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Weight;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.search.dfs.CachedDfSource; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.internal.SearchContext.Lifetime;
import java.io.IOException; import java.io.IOException;
@ -46,21 +44,23 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
private final SearchContext searchContext; private final SearchContext searchContext;
private CachedDfSource dfSource; private AggregatedDfs aggregatedDfs;
public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) { public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) {
super(searcher.reader()); super(searcher.reader());
in = searcher.searcher(); in = searcher.searcher();
this.searchContext = searchContext; this.searchContext = searchContext;
setSimilarity(searcher.searcher().getSimilarity(true)); setSimilarity(searcher.searcher().getSimilarity(true));
setQueryCache(searchContext.indexShard().indexService().cache().query());
setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy());
} }
@Override @Override
public void close() { public void close() {
} }
public void dfSource(CachedDfSource dfSource) { public void setAggregatedDfs(AggregatedDfs aggregatedDfs) {
this.dfSource = dfSource; this.aggregatedDfs = aggregatedDfs;
} }
@Override @Override
@ -75,10 +75,12 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
@Override @Override
public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException { public Weight createNormalizedWeight(Query query, boolean needsScores) throws IOException {
// During tests we prefer to use the wrapped IndexSearcher, because then we use the AssertingIndexSearcher
// it is hacky, because if we perform a dfs search, we don't use the wrapped IndexSearcher...
try { try {
// if scores are needed and we have dfs data then use it // if scores are needed and we have dfs data then use it
if (dfSource != null && needsScores) { if (aggregatedDfs != null && needsScores) {
return dfSource.createNormalizedWeight(query, needsScores); return super.createNormalizedWeight(query, needsScores);
} }
return in.createNormalizedWeight(query, needsScores); return in.createNormalizedWeight(query, needsScores);
} catch (Throwable t) { } catch (Throwable t) {
@ -104,4 +106,32 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable {
searchContext.clearReleasables(Lifetime.COLLECTION); searchContext.clearReleasables(Lifetime.COLLECTION);
} }
} }
@Override
public TermStatistics termStatistics(Term term, TermContext context) throws IOException {
if (aggregatedDfs == null) {
// we are either executing the dfs phase or the search_type doesn't include the dfs phase.
return super.termStatistics(term, context);
}
TermStatistics termStatistics = aggregatedDfs.termStatistics().get(term);
if (termStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.termStatistics(term, context);
}
return termStatistics;
}
@Override
public CollectionStatistics collectionStatistics(String field) throws IOException {
if (aggregatedDfs == null) {
// we are either executing the dfs phase or the search_type doesn't include the dfs phase.
return super.collectionStatistics(field);
}
CollectionStatistics collectionStatistics = aggregatedDfs.fieldStatistics().get(field);
if (collectionStatistics == null) {
// we don't have stats for this - this might be a must_not clauses etc. that doesn't allow extract terms on the query
return super.collectionStatistics(field);
}
return collectionStatistics;
}
} }

View File

@ -48,7 +48,6 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
@ -97,7 +96,7 @@ public class DefaultSearchContext extends SearchContext {
// terminate after count // terminate after count
private int terminateAfter = DEFAULT_TERMINATE_AFTER; private int terminateAfter = DEFAULT_TERMINATE_AFTER;
private List<String> groupStats; private List<String> groupStats;
private Scroll scroll; private ScrollContext scrollContext;
private boolean explain; private boolean explain;
private boolean version = false; // by default, we don't return versions private boolean version = false; // by default, we don't return versions
private List<String> fieldNames; private List<String> fieldNames;
@ -289,13 +288,13 @@ public class DefaultSearchContext extends SearchContext {
} }
@Override @Override
public Scroll scroll() { public ScrollContext scrollContext() {
return this.scroll; return this.scrollContext;
} }
@Override @Override
public SearchContext scroll(Scroll scroll) { public SearchContext scrollContext(ScrollContext scrollContext) {
this.scroll = scroll; this.scrollContext = scrollContext;
return this; return this;
} }
@ -651,16 +650,6 @@ public class DefaultSearchContext extends SearchContext {
this.keepAlive = keepAlive; this.keepAlive = keepAlive;
} }
@Override
public void lastEmittedDoc(ScoreDoc doc) {
this.lastEmittedDoc = doc;
}
@Override
public ScoreDoc lastEmittedDoc() {
return lastEmittedDoc;
}
@Override @Override
public SearchLookup lookup() { public SearchLookup lookup() {
// TODO: The types should take into account the parsing context in QueryParserContext... // TODO: The types should take into account the parsing context in QueryParserContext...

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectObjectAssociativeContainer;
import org.apache.lucene.search.Collector; import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort; import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Counter; import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
@ -42,7 +41,6 @@ import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
@ -154,13 +152,13 @@ public abstract class FilteredSearchContext extends SearchContext {
} }
@Override @Override
public Scroll scroll() { public ScrollContext scrollContext() {
return in.scroll(); return in.scrollContext();
} }
@Override @Override
public SearchContext scroll(Scroll scroll) { public SearchContext scrollContext(ScrollContext scroll) {
return in.scroll(scroll); return in.scrollContext(scroll);
} }
@Override @Override
@ -483,16 +481,6 @@ public abstract class FilteredSearchContext extends SearchContext {
in.keepAlive(keepAlive); in.keepAlive(keepAlive);
} }
@Override
public void lastEmittedDoc(ScoreDoc doc) {
in.lastEmittedDoc(doc);
}
@Override
public ScoreDoc lastEmittedDoc() {
return in.lastEmittedDoc();
}
@Override @Override
public SearchLookup lookup() { public SearchLookup lookup() {
return in.lookup(); return in.lookup();

View File

@ -17,20 +17,17 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.common.inject; package org.elasticsearch.search.internal;
/** import org.apache.lucene.search.ScoreDoc;
* This interface can be added to a Module to spawn sub modules. DO NOT USE. import org.elasticsearch.search.Scroll;
*
* This is fundamentally broken. /** Wrapper around information that needs to stay around when scrolling. */
* <ul> public class ScrollContext {
* <li>If you have a plugin with multiple modules, return all the modules at once.</li>
* <li>If you are trying to make the implementation of a module "pluggable", don't do it. public int totalHits = -1;
* This is not extendable because custom implementations (using onModule) cannot be public float maxScore;
* registered before spawnModules() is called.</li> public ScoreDoc lastEmittedDoc;
* </ul> public Scroll scroll;
*/
public interface SpawnModules {
Iterable<? extends Module> spawnModules();
} }

View File

@ -24,7 +24,6 @@ import com.google.common.collect.MultimapBuilder;
import org.apache.lucene.search.Collector; import org.apache.lucene.search.Collector;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort; import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Counter; import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
@ -159,9 +158,9 @@ public abstract class SearchContext implements Releasable, HasContextAndHeaders
protected abstract long nowInMillisImpl(); protected abstract long nowInMillisImpl();
public abstract Scroll scroll(); public abstract ScrollContext scrollContext();
public abstract SearchContext scroll(Scroll scroll); public abstract SearchContext scrollContext(ScrollContext scroll);
public abstract SearchContextAggregations aggregations(); public abstract SearchContextAggregations aggregations();
@ -303,10 +302,6 @@ public abstract class SearchContext implements Releasable, HasContextAndHeaders
public abstract void keepAlive(long keepAlive); public abstract void keepAlive(long keepAlive);
public abstract void lastEmittedDoc(ScoreDoc doc);
public abstract ScoreDoc lastEmittedDoc();
public abstract SearchLookup lookup(); public abstract SearchLookup lookup();
public abstract DfsSearchResult dfsResult(); public abstract DfsSearchResult dfsResult();

View File

@ -20,13 +20,10 @@ package org.elasticsearch.search.internal;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.lucene.search.Filter; import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort; import org.apache.lucene.search.Sort;
import org.apache.lucene.util.Counter; import org.apache.lucene.util.Counter;
import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.search.Scroll;
import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext;
@ -101,7 +98,7 @@ public class SubSearchContext extends FilteredSearchContext {
} }
@Override @Override
public SearchContext scroll(Scroll scroll) { public SearchContext scrollContext(ScrollContext scrollContext) {
throw new UnsupportedOperationException("Not supported"); throw new UnsupportedOperationException("Not supported");
} }
@ -304,11 +301,6 @@ public class SubSearchContext extends FilteredSearchContext {
throw new UnsupportedOperationException("Not supported"); throw new UnsupportedOperationException("Not supported");
} }
@Override
public void lastEmittedDoc(ScoreDoc doc) {
throw new UnsupportedOperationException("Not supported");
}
@Override @Override
public QuerySearchResult queryResult() { public QuerySearchResult queryResult() {
return querySearchResult; return querySearchResult;

View File

@ -21,12 +21,16 @@ package org.elasticsearch.search.query;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import org.apache.lucene.queries.MinDocQuery;
import org.apache.lucene.search.BooleanClause.Occur;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.Collector; import org.apache.lucene.search.Collector;
import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.MultiCollector;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TimeLimitingCollector; import org.apache.lucene.search.TimeLimitingCollector;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopDocsCollector;
@ -43,8 +47,8 @@ import org.elasticsearch.search.SearchParseElement;
import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.SearchPhase;
import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.SearchContext.Lifetime;
import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.rescore.RescorePhase;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
import org.elasticsearch.search.scan.ScanContext.ScanCollector; import org.elasticsearch.search.scan.ScanContext.ScanCollector;
@ -52,7 +56,6 @@ import org.elasticsearch.search.sort.SortParseElement;
import org.elasticsearch.search.sort.TrackScoresParseElement; import org.elasticsearch.search.sort.TrackScoresParseElement;
import org.elasticsearch.search.suggest.SuggestPhase; import org.elasticsearch.search.suggest.SuggestPhase;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -115,6 +118,7 @@ public class QueryPhase implements SearchPhase {
searchContext.queryResult().searchTimedOut(false); searchContext.queryResult().searchTimedOut(false);
final SearchType searchType = searchContext.searchType();
boolean rescore = false; boolean rescore = false;
try { try {
searchContext.queryResult().from(searchContext.from()); searchContext.queryResult().from(searchContext.from());
@ -138,7 +142,7 @@ public class QueryPhase implements SearchPhase {
return new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0); return new TopDocs(totalHitCountCollector.getTotalHits(), Lucene.EMPTY_SCORE_DOCS, 0);
} }
}; };
} else if (searchContext.searchType() == SearchType.SCAN) { } else if (searchType == SearchType.SCAN) {
query = searchContext.scanContext().wrapQuery(query); query = searchContext.scanContext().wrapQuery(query);
final ScanCollector scanCollector = searchContext.scanContext().collector(searchContext); final ScanCollector scanCollector = searchContext.scanContext().collector(searchContext);
collector = scanCollector; collector = scanCollector;
@ -150,11 +154,32 @@ public class QueryPhase implements SearchPhase {
}; };
} else { } else {
// Perhaps have a dedicated scroll phase? // Perhaps have a dedicated scroll phase?
final ScrollContext scrollContext = searchContext.scrollContext();
assert (scrollContext != null) == (searchContext.request().scroll() != null);
final TopDocsCollector<?> topDocsCollector; final TopDocsCollector<?> topDocsCollector;
ScoreDoc lastEmittedDoc; ScoreDoc lastEmittedDoc;
if (searchContext.request().scroll() != null) { if (searchContext.request().scroll() != null) {
numDocs = Math.min(searchContext.size(), totalNumDocs); numDocs = Math.min(searchContext.size(), totalNumDocs);
lastEmittedDoc = searchContext.lastEmittedDoc(); lastEmittedDoc = scrollContext.lastEmittedDoc;
if (Sort.INDEXORDER.equals(searchContext.sort())) {
if (scrollContext.totalHits == -1) {
// first round
assert scrollContext.lastEmittedDoc == null;
// there is not much that we can optimize here since we want to collect all
// documents in order to get the total number of hits
} else {
// now this gets interesting: since we sort in index-order, we can directly
// skip to the desired doc and stop collecting after ${size} matches
if (scrollContext.lastEmittedDoc != null) {
BooleanQuery bq = new BooleanQuery();
bq.add(query, Occur.MUST);
bq.add(new MinDocQuery(lastEmittedDoc.doc + 1), Occur.FILTER);
query = bq;
}
searchContext.terminateAfter(numDocs);
}
}
} else { } else {
lastEmittedDoc = null; lastEmittedDoc = null;
} }
@ -177,7 +202,31 @@ public class QueryPhase implements SearchPhase {
topDocsCallable = new Callable<TopDocs>() { topDocsCallable = new Callable<TopDocs>() {
@Override @Override
public TopDocs call() throws Exception { public TopDocs call() throws Exception {
return topDocsCollector.topDocs(); TopDocs topDocs = topDocsCollector.topDocs();
if (scrollContext != null) {
if (scrollContext.totalHits == -1) {
// first round
scrollContext.totalHits = topDocs.totalHits;
scrollContext.maxScore = topDocs.getMaxScore();
} else {
// subsequent round: the total number of hits and
// the maximum score were computed on the first round
topDocs.totalHits = scrollContext.totalHits;
topDocs.setMaxScore(scrollContext.maxScore);
}
switch (searchType) {
case QUERY_AND_FETCH:
case DFS_QUERY_AND_FETCH:
// for (DFS_)QUERY_AND_FETCH, we already know the last emitted doc
if (topDocs.scoreDocs.length > 0) {
// set the last emitted doc
scrollContext.lastEmittedDoc = topDocs.scoreDocs[topDocs.scoreDocs.length - 1];
}
default:
break;
}
}
return topDocs;
} }
}; };
} }
@ -227,19 +276,7 @@ public class QueryPhase implements SearchPhase {
searchContext.queryResult().terminatedEarly(false); searchContext.queryResult().terminatedEarly(false);
} }
final TopDocs topDocs = topDocsCallable.call(); searchContext.queryResult().topDocs(topDocsCallable.call());
if (searchContext.request().scroll() != null) {
int size = topDocs.scoreDocs.length;
if (size > 0) {
// In the case of *QUERY_AND_FETCH we don't get back to shards telling them which least
// relevant docs got emitted as hit, we can simply mark the last doc as last emitted
if (searchContext.searchType() == SearchType.QUERY_AND_FETCH ||
searchContext.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
searchContext.lastEmittedDoc(topDocs.scoreDocs[size - 1]);
}
}
}
searchContext.queryResult().topDocs(topDocs);
} catch (Throwable e) { } catch (Throwable e) {
throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e);
} }

View File

@ -20,18 +20,13 @@
package org.elasticsearch.search.scan; package org.elasticsearch.search.scan;
import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.queries.MinDocQuery;
import org.apache.lucene.search.CollectionTerminatedException; import org.apache.lucene.search.CollectionTerminatedException;
import org.apache.lucene.search.ConstantScoreScorer;
import org.apache.lucene.search.ConstantScoreWeight;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Scorer;
import org.apache.lucene.search.SimpleCollector; import org.apache.lucene.search.SimpleCollector;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
@ -118,93 +113,4 @@ public class ScanContext {
} }
} }
/**
* A filtering query that matches all doc IDs that are not deleted and
* greater than or equal to the configured doc ID.
*/
// pkg-private for testing
static class MinDocQuery extends Query {
private final int minDoc;
MinDocQuery(int minDoc) {
this.minDoc = minDoc;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + minDoc;
}
@Override
public boolean equals(Object obj) {
if (super.equals(obj) == false) {
return false;
}
MinDocQuery that = (MinDocQuery) obj;
return minDoc == that.minDoc;
}
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
return new ConstantScoreWeight(this) {
@Override
public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException {
final int maxDoc = context.reader().maxDoc();
if (context.docBase + maxDoc <= minDoc) {
return null;
}
final int segmentMinDoc = Math.max(0, minDoc - context.docBase);
final DocIdSetIterator disi = new DocIdSetIterator() {
int doc = -1;
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
assert target > doc;
if (doc == -1) {
// skip directly to minDoc
doc = Math.max(target, segmentMinDoc);
} else {
doc = target;
}
while (doc < maxDoc) {
if (acceptDocs == null || acceptDocs.get(doc)) {
break;
}
doc += 1;
}
if (doc >= maxDoc) {
doc = NO_MORE_DOCS;
}
return doc;
}
@Override
public long cost() {
return maxDoc - minDoc;
}
};
return new ConstantScoreScorer(this, score(), disi);
}
};
}
@Override
public String toString(String field) {
return "MinDocQuery(minDoc=" + minDoc + ")";
}
}
} }

View File

@ -727,7 +727,7 @@ public class ThreadPool extends AbstractComponent {
if (queueSize == null) { if (queueSize == null) {
builder.field(Fields.QUEUE_SIZE, -1); builder.field(Fields.QUEUE_SIZE, -1);
} else { } else {
builder.field(Fields.QUEUE_SIZE, queueSize.toString()); builder.field(Fields.QUEUE_SIZE, queueSize.singles());
} }
builder.endObject(); builder.endObject();
return builder; return builder;

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Map; import java.util.Map;
/** /**
@ -32,6 +33,7 @@ import java.util.Map;
*/ */
public interface Transport extends LifecycleComponent<Transport> { public interface Transport extends LifecycleComponent<Transport> {
public static class TransportSettings { public static class TransportSettings {
public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress"; public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress";
} }
@ -52,7 +54,7 @@ public interface Transport extends LifecycleComponent<Transport> {
/** /**
* Returns an address from its string representation. * Returns an address from its string representation.
*/ */
TransportAddress[] addressesFromString(String address) throws Exception; TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception;
/** /**
* Is the address type supported. * Is the address type supported.
@ -89,4 +91,6 @@ public interface Transport extends LifecycleComponent<Transport> {
* Returns count of currently open connections * Returns count of currently open connections
*/ */
long serverOpen(); long serverOpen();
List<String> getLocalAddresses();
} }

View File

@ -92,7 +92,6 @@ public class TransportModule extends AbstractModule {
} }
bind(NamedWriteableRegistry.class).asEagerSingleton(); bind(NamedWriteableRegistry.class).asEagerSingleton();
if (configuredTransport != null) { if (configuredTransport != null) {
logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource); logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource);
bind(Transport.class).to(configuredTransport).asEagerSingleton(); bind(Transport.class).to(configuredTransport).asEagerSingleton();

View File

@ -40,10 +40,7 @@ import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.*;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
@ -221,6 +218,10 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
return transport.boundAddress(); return transport.boundAddress();
} }
public List<String> getLocalAddresses() {
return transport.getLocalAddresses();
}
public boolean nodeConnected(DiscoveryNode node) { public boolean nodeConnected(DiscoveryNode node) {
return node.equals(localNode) || transport.nodeConnected(node); return node.equals(localNode) || transport.nodeConnected(node);
} }
@ -383,8 +384,8 @@ public class TransportService extends AbstractLifecycleComponent<TransportServic
return requestIds.getAndIncrement(); return requestIds.getAndIncrement();
} }
public TransportAddress[] addressesFromString(String address) throws Exception { public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
return transport.addressesFromString(address); return transport.addressesFromString(address, perAddressLimit);
} }
/** /**

View File

@ -41,8 +41,7 @@ import org.elasticsearch.transport.*;
import org.elasticsearch.transport.support.TransportStatus; import org.elasticsearch.transport.support.TransportStatus;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.*;
import java.util.Map;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.ThreadPoolExecutor;
@ -57,14 +56,13 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.new
public class LocalTransport extends AbstractLifecycleComponent<Transport> implements Transport { public class LocalTransport extends AbstractLifecycleComponent<Transport> implements Transport {
public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport"; public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport";
private final ThreadPool threadPool; private final ThreadPool threadPool;
private final ThreadPoolExecutor workers; private final ThreadPoolExecutor workers;
private final Version version; private final Version version;
private volatile TransportServiceAdapter transportServiceAdapter; private volatile TransportServiceAdapter transportServiceAdapter;
private volatile BoundTransportAddress boundAddress; private volatile BoundTransportAddress boundAddress;
private volatile LocalTransportAddress localAddress; private volatile LocalTransportAddress localAddress;
private final static ConcurrentMap<TransportAddress, LocalTransport> transports = newConcurrentMap(); private final static ConcurrentMap<LocalTransportAddress, LocalTransport> transports = newConcurrentMap();
private static final AtomicLong transportAddressIdGenerator = new AtomicLong(); private static final AtomicLong transportAddressIdGenerator = new AtomicLong();
private final ConcurrentMap<DiscoveryNode, LocalTransport> connectedNodes = newConcurrentMap(); private final ConcurrentMap<DiscoveryNode, LocalTransport> connectedNodes = newConcurrentMap();
private final NamedWriteableRegistry namedWriteableRegistry; private final NamedWriteableRegistry namedWriteableRegistry;
@ -78,7 +76,6 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
super(settings); super(settings);
this.threadPool = threadPool; this.threadPool = threadPool;
this.version = version; this.version = version;
int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings)); int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings));
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1); int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize); logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
@ -88,7 +85,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
} }
@Override @Override
public TransportAddress[] addressesFromString(String address) { public TransportAddress[] addressesFromString(String address, int perAddressLimit) {
return new TransportAddress[]{new LocalTransportAddress(address)}; return new TransportAddress[]{new LocalTransportAddress(address)};
} }
@ -359,4 +356,9 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
logger.error("failed to handle exception response [{}]", t, handler); logger.error("failed to handle exception response [{}]", t, handler);
} }
} }
@Override
public List<String> getLocalAddresses() {
return Collections.singletonList("0.0.0.0");
}
} }

View File

@ -21,8 +21,8 @@ package org.elasticsearch.transport.netty;
import com.google.common.base.Charsets; import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
@ -41,6 +41,7 @@ import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.netty.NettyUtils; import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler; import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.netty.ReleaseChannelFutureListener; import org.elasticsearch.common.netty.ReleaseChannelFutureListener;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.network.NetworkUtils;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -74,6 +75,7 @@ import java.io.IOException;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.SocketAddress; import java.net.SocketAddress;
import java.net.UnknownHostException;
import java.nio.channels.CancelledKeyException; import java.nio.channels.CancelledKeyException;
import java.util.*; import java.util.*;
import java.util.concurrent.*; import java.util.concurrent.*;
@ -81,6 +83,8 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.*; import static org.elasticsearch.common.network.NetworkService.TcpSettings.*;
import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@ -403,6 +407,13 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} catch (IOException e) { } catch (IOException e) {
throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e); throw new BindTransportException("Failed to resolve host [" + bindHost + "]", e);
} }
if (logger.isDebugEnabled()) {
String[] addresses = new String[hostAddresses.length];
for (int i = 0; i < hostAddresses.length; i++) {
addresses[i] = NetworkAddress.format(hostAddresses[i]);
}
logger.debug("binding server bootstrap to: {}", addresses);
}
for (InetAddress hostAddress : hostAddresses) { for (InetAddress hostAddress : hostAddresses) {
bindServerBootstrap(name, hostAddress, settings); bindServerBootstrap(name, hostAddress, settings);
} }
@ -413,7 +424,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
String port = settings.get("port"); String port = settings.get("port");
PortsRange portsRange = new PortsRange(port); PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>(); final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<SocketAddress> boundSocket = new AtomicReference<>(); final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() { boolean success = portsRange.iterate(new PortsRange.PortCallback() {
@Override @Override
public boolean onPortNumber(int portNumber) { public boolean onPortNumber(int portNumber) {
@ -426,7 +437,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
serverChannels.put(name, list); serverChannels.put(name, list);
} }
list.add(channel); list.add(channel);
boundSocket.set(channel.getLocalAddress()); boundSocket.set((InetSocketAddress)channel.getLocalAddress());
} }
} catch (Exception e) { } catch (Exception e) {
lastException.set(e); lastException.set(e);
@ -440,7 +451,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
if (!DEFAULT_PROFILE.equals(name)) { if (!DEFAULT_PROFILE.equals(name)) {
InetSocketAddress boundAddress = (InetSocketAddress) boundSocket.get(); InetSocketAddress boundAddress = boundSocket.get();
int publishPort = settings.getAsInt("publish_port", boundAddress.getPort()); int publishPort = settings.getAsInt("publish_port", boundAddress.getPort());
String publishHost = settings.get("publish_host", boundAddress.getHostString()); String publishHost = settings.get("publish_host", boundAddress.getHostString());
InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort); InetSocketAddress publishAddress = createPublishAddress(publishHost, publishPort);
@ -448,7 +459,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
profileBoundAddresses.putIfAbsent(name, new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress))); profileBoundAddresses.putIfAbsent(name, new BoundTransportAddress(new InetSocketTransportAddress(boundAddress), new InetSocketTransportAddress(publishAddress)));
} }
logger.info("Bound profile [{}] to address [{}]", name, boundSocket.get()); logger.info("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get()));
} }
private void createServerBootstrap(String name, Settings settings) { private void createServerBootstrap(String name, Settings settings) {
@ -496,7 +507,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory); serverBootstrap.setOption("child.receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
serverBootstrap.setOption("reuseAddress", reuseAddress); serverBootstrap.setOption("reuseAddress", reuseAddress);
serverBootstrap.setOption("child.reuseAddress", reuseAddress); serverBootstrap.setOption("child.reuseAddress", reuseAddress);
serverBootstraps.put(name, serverBootstrap); serverBootstraps.put(name, serverBootstrap);
} }
@ -579,37 +589,67 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
} }
@Override @Override
public TransportAddress[] addressesFromString(String address) throws Exception { public TransportAddress[] addressesFromString(String address, int perAddressLimit) throws Exception {
int index = address.indexOf('['); return parse(address, settings.get("transport.profiles.default.port",
if (index != -1) { settings.get("transport.netty.port",
String host = address.substring(0, index); settings.get("transport.tcp.port",
Set<String> ports = Strings.commaDelimitedListToSet(address.substring(index + 1, address.indexOf(']'))); DEFAULT_PORT_RANGE))), perAddressLimit);
List<TransportAddress> addresses = Lists.newArrayList();
for (String port : ports) {
int[] iPorts = new PortsRange(port).ports();
for (int iPort : iPorts) {
addresses.add(new InetSocketTransportAddress(host, iPort));
} }
// this code is a take on guava's HostAndPort, like a HostAndPortRange
// pattern for validating ipv6 bracked addresses.
// not perfect, but PortsRange should take care of any port range validation, not a regex
private static final Pattern BRACKET_PATTERN = Pattern.compile("^\\[(.*:.*)\\](?::([\\d\\-]*))?$");
/** parse a hostname+port range spec into its equivalent addresses */
static TransportAddress[] parse(String hostPortString, String defaultPortRange, int perAddressLimit) throws UnknownHostException {
Objects.requireNonNull(hostPortString);
String host;
String portString = null;
if (hostPortString.startsWith("[")) {
// Parse a bracketed host, typically an IPv6 literal.
Matcher matcher = BRACKET_PATTERN.matcher(hostPortString);
if (!matcher.matches()) {
throw new IllegalArgumentException("Invalid bracketed host/port range: " + hostPortString);
} }
return addresses.toArray(new TransportAddress[addresses.size()]); host = matcher.group(1);
portString = matcher.group(2); // could be null
} else { } else {
index = address.lastIndexOf(':'); int colonPos = hostPortString.indexOf(':');
if (index == -1) { if (colonPos >= 0 && hostPortString.indexOf(':', colonPos + 1) == -1) {
List<TransportAddress> addresses = Lists.newArrayList(); // Exactly 1 colon. Split into host:port.
String defaultPort = settings.get("transport.profiles.default.port", settings.get("transport.netty.port", this.settings.get("transport.tcp.port", DEFAULT_PORT_RANGE))); host = hostPortString.substring(0, colonPos);
int[] iPorts = new PortsRange(defaultPort).ports(); portString = hostPortString.substring(colonPos + 1);
for (int iPort : iPorts) {
addresses.add(new InetSocketTransportAddress(address, iPort));
}
return addresses.toArray(new TransportAddress[addresses.size()]);
} else { } else {
String host = address.substring(0, index); // 0 or 2+ colons. Bare hostname or IPv6 literal.
int port = Integer.parseInt(address.substring(index + 1)); host = hostPortString;
return new TransportAddress[]{new InetSocketTransportAddress(host, port)}; // 2+ colons and not bracketed: exception
if (colonPos >= 0) {
throw new IllegalArgumentException("IPv6 addresses must be bracketed: " + hostPortString);
} }
} }
} }
// if port isn't specified, fill with the default
if (portString == null || portString.isEmpty()) {
portString = defaultPortRange;
}
// generate address for each port in the range
Set<InetAddress> addresses = new HashSet<>(Arrays.asList(InetAddress.getAllByName(host)));
List<TransportAddress> transportAddresses = new ArrayList<>();
int[] ports = new PortsRange(portString).ports();
int limit = Math.min(ports.length, perAddressLimit);
for (int i = 0; i < limit; i++) {
for (InetAddress address : addresses) {
transportAddresses.add(new InetSocketTransportAddress(address, ports[i]));
}
}
return transportAddresses.toArray(new TransportAddress[transportAddresses.size()]);
}
@Override @Override
public boolean addressSupported(Class<? extends TransportAddress> address) { public boolean addressSupported(Class<? extends TransportAddress> address) {
return InetSocketTransportAddress.class.equals(address); return InetSocketTransportAddress.class.equals(address);
@ -670,6 +710,17 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
return channels == null ? 0 : channels.numberOfOpenChannels(); return channels == null ? 0 : channels.numberOfOpenChannels();
} }
@Override
public List<String> getLocalAddresses() {
List<String> local = new ArrayList<>();
local.add("127.0.0.1");
// check if v6 is supported, if so, v4 will also work via mapped addresses.
if (NetworkUtils.SUPPORTS_V6) {
local.add("[::1]"); // may get ports appended!
}
return local;
}
@Override @Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException {

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@ -4,29 +4,30 @@ NAME
SYNOPSIS SYNOPSIS
plugin install <name> plugin install <name or url>
DESCRIPTION DESCRIPTION
This command installs an elasticsearch plugin This command installs an elasticsearch plugin. It can be used as follows:
<name> can be one of the official plugins, or refer to a github repository, or to one of the official plugins Officially supported or commercial plugins require just the plugin name:
The notation of just specifying a plugin name, downloads an officially supported plugin. plugin install analysis-icu
plugin install shield
The notation of 'elasticsearch/plugin/version' allows to easily download a commercial elastic plugin. Plugins from GitHub require 'username/repository' or 'username/repository/version':
The notation of 'groupId/artifactId/version' refers to community plugins using maven central or sonatype
The notation of 'username/repository' refers to a github repository.
EXAMPLES
plugin install analysis-kuromoji
plugin install elasticsearch/shield/latest
plugin install lmenezes/elasticsearch-kopf plugin install lmenezes/elasticsearch-kopf
plugin install lmenezes/elasticsearch-kopf/1.5.7
Plugins from Maven Central or Sonatype require 'groupId/artifactId/version':
plugin install org.elasticsearch/elasticsearch-mapper-attachments/2.6.0
Plugins can be installed from a custom URL or file location as follows:
plugin install http://some.domain.name//my-plugin-1.0.0.zip
plugin install file:/path/to/my-plugin-1.0.0.zip
OFFICIAL PLUGINS OFFICIAL PLUGINS
@ -41,6 +42,7 @@ OFFICIAL PLUGINS
- cloud-azure - cloud-azure
- cloud-gce - cloud-gce
- delete-by-query - delete-by-query
- discovery-multicast
- lang-javascript - lang-javascript
- lang-python - lang-python
- mapper-murmur3 - mapper-murmur3
@ -49,8 +51,6 @@ OFFICIAL PLUGINS
OPTIONS OPTIONS
-u,--url URL to retrieve the plugin from
-t,--timeout Timeout until the plugin download is abort -t,--timeout Timeout until the plugin download is abort
-v,--verbose Verbose output -v,--verbose Verbose output

View File

@ -0,0 +1,61 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.queries;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.RandomIndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.QueryUtils;
import org.apache.lucene.store.Directory;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
public class MinDocQueryTests extends ESTestCase {
public void testBasics() {
MinDocQuery query1 = new MinDocQuery(42);
MinDocQuery query2 = new MinDocQuery(42);
MinDocQuery query3 = new MinDocQuery(43);
QueryUtils.check(query1);
QueryUtils.checkEqual(query1, query2);
QueryUtils.checkUnequal(query1, query3);
}
public void testRandom() throws IOException {
final int numDocs = randomIntBetween(10, 200);
final Document doc = new Document();
final Directory dir = newDirectory();
final RandomIndexWriter w = new RandomIndexWriter(getRandom(), dir);
for (int i = 0; i < numDocs; ++i) {
w.addDocument(doc);
}
final IndexReader reader = w.getReader();
final IndexSearcher searcher = newSearcher(reader);
for (int i = 0; i <= numDocs; ++i) {
assertEquals(numDocs - i, searcher.count(new MinDocQuery(i)));
}
w.close();
reader.close();
dir.close();
}
}

View File

@ -57,7 +57,7 @@ import java.util.Collections;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
public class ESExceptionTests extends ESTestCase { public class ESExceptionTests extends ESTestCase {
private static final ToXContent.Params PARAMS = new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true")); private static final ToXContent.Params PARAMS = ToXContent.EMPTY_PARAMS;
@Test @Test
public void testStatus() { public void testStatus() {

View File

@ -526,7 +526,7 @@ public class ExceptionSerializationTests extends ESTestCase {
try { try {
XContentBuilder builder = XContentFactory.jsonBuilder(); XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject(); builder.startObject();
x.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(ElasticsearchException.REST_EXCEPTION_SKIP_STACK_TRACE, "true"))); x.toXContent(builder, ToXContent.EMPTY_PARAMS);
builder.endObject(); builder.endObject();
return builder.string(); return builder.string();
} catch (IOException e) { } catch (IOException e) {
@ -607,4 +607,20 @@ public class ExceptionSerializationTests extends ESTestCase {
assertEquals(ex.status(), e.status()); assertEquals(ex.status(), e.status());
assertEquals(RestStatus.UNAUTHORIZED, e.status()); assertEquals(RestStatus.UNAUTHORIZED, e.status());
} }
public void testInterruptedException() throws IOException {
InterruptedException orig = randomBoolean() ? new InterruptedException("boom") : new InterruptedException();
InterruptedException ex = serialize(orig);
assertEquals(orig.getMessage(), ex.getMessage());
}
public static class UnknownException extends Exception {
public UnknownException(String message) {
super(message);
}
public UnknownException(String message, Throwable cause) {
super(message, cause);
}
}
} }

Some files were not shown because too many files have changed in this diff Show More