Merge branch 'master' into remove_multicast

This commit is contained in:
Ryan Ernst 2016-02-01 07:25:45 -08:00
commit 3787f437ec
286 changed files with 6534 additions and 7133 deletions

View File

@ -61,10 +61,6 @@ public class ForbiddenPatternsTask extends DefaultTask {
// add mandatory rules
patterns.put('nocommit', /nocommit/)
patterns.put('tab', /\t/)
patterns.put('wildcard imports', /^\s*import.*\.\*/)
// We don't use Java serialization so we fail if it looks like we're trying to.
patterns.put('declares serialVersionUID', /serialVersionUID/)
patterns.put('references Serializable', /java\.io\.Serializable/)
inputs.property("excludes", filesFilter.excludes)
inputs.property("rules", patterns)

View File

@ -30,9 +30,9 @@ class PrecommitTasks {
/** Adds a precommit task, which depends on non-test verification tasks. */
public static Task create(Project project, boolean includeDependencyLicenses) {
List<Task> precommitTasks = [
configureForbiddenApis(project),
configureCheckstyle(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('jarHell', JarHellTask.class),
@ -83,4 +83,25 @@ class PrecommitTasks {
forbiddenApis.group = "" // clear group, so this does not show up under verification tasks
return forbiddenApis
}
private static Task configureCheckstyle(Project project) {
Task checkstyleTask = project.tasks.create('checkstyle')
// Apply the checkstyle plugin to create `checkstyleMain` and `checkstyleTest`. It only
// creates them if there is main or test code to check and it makes `check` depend
// on them. But we want `precommit` to depend on `checkstyle` which depends on them so
// we have to swap them.
project.pluginManager.apply('checkstyle')
project.checkstyle {
config = project.resources.text.fromFile(
PrecommitTasks.getResource('/checkstyle.xml'), 'UTF-8')
}
for (String taskName : ['checkstyleMain', 'checkstyleTest']) {
Task task = project.tasks.findByName(taskName)
if (task != null) {
project.tasks['check'].dependsOn.remove(task)
checkstyleTask.dependsOn(task)
}
}
return checkstyleTask
}
}

View File

@ -0,0 +1,53 @@
<?xml version="1.0"?>
<!DOCTYPE module PUBLIC
"-//Puppy Crawl//DTD Check Configuration 1.3//EN"
"http://www.puppycrawl.com/dtds/configuration_1_3.dtd">
<module name="Checker">
<property name="charset" value="UTF-8" />
<module name="TreeWalker">
<!-- ~3500 violations
<module name="LineLength">
<property name="max" value="140"/>
</module>
-->
<module name="AvoidStarImport" />
<!-- Doesn't pass but we could make it pass pretty quick.
<module name="UnusedImports">
The next property is optional. If we remove it then imports that are
only referenced by Javadoc cause the check to fail.
<property name="processJavadoc" value="true" />
</module>
-->
<!-- Non-inner classes must be in files that match their names. -->
<module name="OuterTypeFilename" />
<!-- No line wraps inside of import and package statements. -->
<module name="NoLineWrap" />
<!-- Each java file has only one outer class -->
<module name="OneTopLevelClass" />
<!-- The suffix L is preferred, because the letter l (ell) is often
hard to distinguish from the digit 1 (one). -->
<module name="UpperEll"/>
<!-- We don't use Java's builtin serialization and we suppress all warning
about it. The flip side of that coin is that we shouldn't _try_ to use
it. We can't outright ban it with ForbiddenApis because it complain about
every we reference a class that implements Serializable like String or
Exception.
-->
<module name="RegexpSinglelineJava">
<property name="format" value="serialVersionUID" />
<property name="message" value="Do not declare serialVersionUID." />
<property name="ignoreComments" value="true" />
</module>
<module name="RegexpSinglelineJava">
<property name="format" value="java\.io\.Serializable" />
<property name="message" value="References java.io.Serializable." />
<property name="ignoreComments" value="true" />
</module>
<!-- end Orwellian suppression of Serializable -->
</module>
</module>

View File

@ -320,7 +320,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
public void onFailure(Throwable t) {
if (t instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
final ThreadContext threadContext = threadPool.getThreadContext();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@ -528,7 +527,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
finishAsFailed(failure);
return;
}
final ThreadContext threadContext = threadPool.getThreadContext();
final ThreadContext.StoredContext context = threadPool.getThreadContext().newStoredContext();
observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override
@ -898,7 +896,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
onReplicaFailure(nodeId, exp);
} else {
String message = String.format(Locale.ROOT, "failed to perform %s on replica on node %s", transportReplicaAction, node);
logger.warn("{} {}", exp, shardId, message);
logger.warn("[{}] {}", exp, shardId, message);
shardStateAction.shardFailed(
shard,
indexUUID,

View File

@ -20,7 +20,9 @@
package org.elasticsearch.bootstrap;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
@ -40,6 +42,7 @@ import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.nio.file.Path;
import java.util.Locale;
@ -114,7 +117,11 @@ final class Bootstrap {
public boolean handle(int code) {
if (CTRL_CLOSE_EVENT == code) {
logger.info("running graceful exit on windows");
Bootstrap.stop();
try {
Bootstrap.stop();
} catch (IOException e) {
throw new ElasticsearchException("failed to stop node", e);
}
return true;
}
return false;
@ -153,8 +160,10 @@ final class Bootstrap {
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
if (node != null) {
node.close();
try {
IOUtils.close(node);
} catch (IOException ex) {
throw new ElasticsearchException("failed to stop node", ex);
}
}
});
@ -221,9 +230,9 @@ final class Bootstrap {
keepAliveThread.start();
}
static void stop() {
static void stop() throws IOException {
try {
Releasables.close(INSTANCE.node);
IOUtils.close(INSTANCE.node);
} finally {
INSTANCE.keepAliveLatch.countDown();
}

View File

@ -19,6 +19,8 @@
package org.elasticsearch.bootstrap;
import java.io.IOException;
/**
* This class starts elasticsearch.
*/
@ -48,7 +50,7 @@ public final class Elasticsearch {
*
* NOTE: If this method is renamed and/or moved, make sure to update service.bat!
*/
static void close(String[] args) {
static void close(String[] args) throws IOException {
Bootstrap.stop();
}
}
}

View File

@ -25,7 +25,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.transport.TransportSettings;
@ -270,9 +270,7 @@ final class Security {
static void addBindPermissions(Permissions policy, Settings settings) throws IOException {
// http is simple
String httpRange = settings.get("http.netty.port",
settings.get("http.port",
NettyHttpServerTransport.DEFAULT_PORT_RANGE));
String httpRange = HttpTransportSettings.SETTING_HTTP_PORT.get(settings).getPortRangeString();
// listen is always called with 'localhost' but use wildcard to be sure, no name service is consulted.
// see SocketPermission implies() code
policy.add(new SocketPermission("*:" + httpRange, "listen,resolve"));

View File

@ -22,9 +22,12 @@ package org.elasticsearch.cache.recycler;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.threadpool.ThreadPool;
@ -38,17 +41,22 @@ import static org.elasticsearch.common.recycler.Recyclers.dequeFactory;
import static org.elasticsearch.common.recycler.Recyclers.none;
/** A recycler of fixed-size pages. */
public class PageCacheRecycler extends AbstractComponent {
public class PageCacheRecycler extends AbstractComponent implements Releasable {
public static final String TYPE = "recycler.page.type";
public static final String LIMIT_HEAP = "recycler.page.limit.heap";
public static final String WEIGHT = "recycler.page.weight";
public static final Setting<Type> TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER);
// object pages are less useful to us so we give them a lower weight by default
public static final Setting<Double> WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER);
private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage;
private final Recycler<long[]> longPage;
private final Recycler<Object[]> objectPage;
@Override
public void close() {
bytePage.close();
intPage.close();
@ -71,8 +79,8 @@ public class PageCacheRecycler extends AbstractComponent {
@Inject
public PageCacheRecycler(Settings settings, ThreadPool threadPool) {
super(settings);
final Type type = Type.parse(settings.get(TYPE));
final long limit = settings.getAsMemory(LIMIT_HEAP, "10%").bytes();
final Type type = TYPE_SETTING .get(settings);
final long limit = LIMIT_HEAP_SETTING .get(settings).bytes();
final int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings);
final int searchThreadPoolSize = maximumSearchThreadPoolSize(threadPool, settings);
@ -89,11 +97,10 @@ public class PageCacheRecycler extends AbstractComponent {
// to direct ByteBuffers or sun.misc.Unsafe on a byte[] but this would have other issues
// that would need to be addressed such as garbage collection of native memory or safety
// of Unsafe writes.
final double bytesWeight = settings.getAsDouble(WEIGHT + ".bytes", 1d);
final double intsWeight = settings.getAsDouble(WEIGHT + ".ints", 1d);
final double longsWeight = settings.getAsDouble(WEIGHT + ".longs", 1d);
// object pages are less useful to us so we give them a lower weight by default
final double objectsWeight = settings.getAsDouble(WEIGHT + ".objects", 0.1d);
final double bytesWeight = WEIGHT_BYTES_SETTING .get(settings);
final double intsWeight = WEIGHT_INT_SETTING .get(settings);
final double longsWeight = WEIGHT_LONG_SETTING .get(settings);
final double objectsWeight = WEIGHT_OBJECTS_SETTING .get(settings);
final double totalWeight = bytesWeight + intsWeight + longsWeight + objectsWeight;
final int maxPageCount = (int) Math.min(Integer.MAX_VALUE, limit / BigArrays.PAGE_SIZE_IN_BYTES);
@ -188,7 +195,7 @@ public class PageCacheRecycler extends AbstractComponent {
return recycler;
}
public static enum Type {
public enum Type {
QUEUE {
@Override
<T> Recycler<T> build(Recycler.C<T> c, int limit, int estimatedThreadPoolSize, int availableProcessors) {
@ -209,9 +216,6 @@ public class PageCacheRecycler extends AbstractComponent {
};
public static Type parse(String type) {
if (Strings.isNullOrEmpty(type)) {
return CONCURRENT;
}
try {
return Type.valueOf(type.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {

View File

@ -112,7 +112,7 @@ public class TransportClient extends AbstractClient {
final Settings.Builder settingsBuilder = settingsBuilder()
.put(NettyTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
.put(InternalSettingsPreparer.prepareSettings(settings))
.put(NettyTransport.NETWORK_SERVER.getKey(), false)
.put(NetworkService.NETWORK_SERVER.getKey(), false)
.put(Node.NODE_CLIENT_SETTING.getKey(), true)
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);

View File

@ -302,7 +302,7 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
}
public long getCreationDate() {
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
return settings.getAsLong(SETTING_CREATION_DATE, -1L);
}
public State getState() {

View File

@ -106,7 +106,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
private final Reason reason;
private final long unassignedTimeMillis; // used for display and log messages, in milliseconds
private final long unassignedTimeNanos; // in nanoseconds, used to calculate delay for delayed shard allocation
private volatile long lastComputedLeftDelayNanos = 0l; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
private volatile long lastComputedLeftDelayNanos = 0L; // how long to delay shard allocation, not serialized (always positive, 0 means no delay)
private final String message;
private final Throwable failure;
@ -217,7 +217,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
return 0;
}
TimeValue delayTimeout = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexSettings, settings);
return Math.max(0l, delayTimeout.nanos());
return Math.max(0L, delayTimeout.nanos());
}
/**
@ -236,8 +236,8 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
public long updateDelay(long nanoTimeNow, Settings settings, Settings indexSettings) {
long delayTimeoutNanos = getAllocationDelayTimeoutSettingNanos(settings, indexSettings);
final long newComputedLeftDelayNanos;
if (delayTimeoutNanos == 0l) {
newComputedLeftDelayNanos = 0l;
if (delayTimeoutNanos == 0L) {
newComputedLeftDelayNanos = 0L;
} else {
assert nanoTimeNow >= unassignedTimeNanos;
newComputedLeftDelayNanos = Math.max(0L, delayTimeoutNanos - (nanoTimeNow - unassignedTimeNanos));
@ -277,7 +277,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
}
}
return minDelaySetting == Long.MAX_VALUE ? 0l : minDelaySetting;
return minDelaySetting == Long.MAX_VALUE ? 0L : minDelaySetting;
}
@ -294,7 +294,7 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
}
}
}
return nextDelay == Long.MAX_VALUE ? 0l : nextDelay;
return nextDelay == Long.MAX_VALUE ? 0L : nextDelay;
}
public String shortSummary() {

View File

@ -50,7 +50,7 @@ public class HelpPrinter {
}
});
} catch (IOException ioe) {
ioe.printStackTrace(terminal.writer());
throw new RuntimeException(ioe);
}
terminal.println();
}

View File

@ -132,8 +132,6 @@ public abstract class Terminal {
protected abstract void doPrint(String msg, Object... args);
public abstract PrintWriter writer();
private static class ConsoleTerminal extends Terminal {
final Console console = System.console();
@ -158,11 +156,6 @@ public abstract class Terminal {
return console.readPassword(text, args);
}
@Override
public PrintWriter writer() {
return console.writer();
}
@Override
public void printStackTrace(Throwable t) {
t.printStackTrace(console.writer());
@ -199,10 +192,5 @@ public abstract class Terminal {
public void printStackTrace(Throwable t) {
t.printStackTrace(printWriter);
}
@Override
public PrintWriter writer() {
return printWriter;
}
}
}

View File

@ -179,7 +179,7 @@ public class GeoUtils {
final double width = Math.sqrt((meters*meters)/(ratio*ratio)); // convert to cell width
final long part = Math.round(Math.ceil(EARTH_EQUATOR / width));
final int level = Long.SIZE - Long.numberOfLeadingZeros(part)-1; // (log_2)
return (part<=(1l<<level)) ?level :(level+1); // adjust level
return (part<=(1L<<level)) ?level :(level+1); // adjust level
}
}

View File

@ -1,488 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.http.client;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchCorruptionException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.unit.TimeValue;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.HttpURLConnection;
import java.net.URL;
import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.attribute.FileTime;
import java.util.List;
/**
*
*/
public class HttpDownloadHelper {
private boolean useTimestamp = false;
private boolean skipExisting = false;
public boolean download(URL source, Path dest, @Nullable DownloadProgress progress, TimeValue timeout) throws Exception {
if (Files.exists(dest) && skipExisting) {
return true;
}
//don't do any progress, unless asked
if (progress == null) {
progress = new NullProgress();
}
//set the timestamp to the file date.
long timestamp = 0;
boolean hasTimestamp = false;
if (useTimestamp && Files.exists(dest) ) {
timestamp = Files.getLastModifiedTime(dest).toMillis();
hasTimestamp = true;
}
GetThread getThread = new GetThread(source, dest, hasTimestamp, timestamp, progress);
try {
getThread.setDaemon(true);
getThread.start();
getThread.join(timeout.millis());
if (getThread.isAlive()) {
throw new ElasticsearchTimeoutException("The GET operation took longer than " + timeout + ", stopping it.");
}
}
catch (InterruptedException ie) {
return false;
} finally {
getThread.closeStreams();
}
return getThread.wasSuccessful();
}
public interface Checksummer {
/** Return the hex string for the given byte array */
String checksum(byte[] filebytes);
/** Human-readable name for the checksum format */
String name();
}
/** Checksummer for SHA1 */
public static Checksummer SHA1_CHECKSUM = new Checksummer() {
@Override
public String checksum(byte[] filebytes) {
return MessageDigests.toHexString(MessageDigests.sha1().digest(filebytes));
}
@Override
public String name() {
return "SHA1";
}
};
/** Checksummer for MD5 */
public static Checksummer MD5_CHECKSUM = new Checksummer() {
@Override
public String checksum(byte[] filebytes) {
return MessageDigests.toHexString(MessageDigests.md5().digest(filebytes));
}
@Override
public String name() {
return "MD5";
}
};
/**
* Download the given checksum URL to the destination and check the checksum
* @param checksumURL URL for the checksum file
* @param originalFile original file to calculate checksum of
* @param checksumFile destination to download the checksum file to
* @param hashFunc class used to calculate the checksum of the file
* @return true if the checksum was validated, false if it did not exist
* @throws Exception if the checksum failed to match
*/
public boolean downloadAndVerifyChecksum(URL checksumURL, Path originalFile, Path checksumFile,
@Nullable DownloadProgress progress,
TimeValue timeout, Checksummer hashFunc) throws Exception {
try {
if (download(checksumURL, checksumFile, progress, timeout)) {
byte[] fileBytes = Files.readAllBytes(originalFile);
List<String> checksumLines = Files.readAllLines(checksumFile, StandardCharsets.UTF_8);
if (checksumLines.size() != 1) {
throw new ElasticsearchCorruptionException("invalid format for checksum file (" +
hashFunc.name() + "), expected 1 line, got: " + checksumLines.size());
}
String checksumHex = checksumLines.get(0);
String fileHex = hashFunc.checksum(fileBytes);
if (fileHex.equals(checksumHex) == false) {
throw new ElasticsearchCorruptionException("incorrect hash (" + hashFunc.name() +
"), file hash: [" + fileHex + "], expected: [" + checksumHex + "]");
}
return true;
}
} catch (FileNotFoundException | NoSuchFileException e) {
// checksum file doesn't exist
return false;
} finally {
IOUtils.deleteFilesIgnoringExceptions(checksumFile);
}
return false;
}
/**
* Interface implemented for reporting
* progress of downloading.
*/
public interface DownloadProgress {
/**
* begin a download
*/
void beginDownload();
/**
* tick handler
*/
void onTick();
/**
* end a download
*/
void endDownload();
}
/**
* do nothing with progress info
*/
public static class NullProgress implements DownloadProgress {
/**
* begin a download
*/
@Override
public void beginDownload() {
}
/**
* tick handler
*/
@Override
public void onTick() {
}
/**
* end a download
*/
@Override
public void endDownload() {
}
}
/**
* verbose progress system prints to some output stream
*/
public static class VerboseProgress implements DownloadProgress {
private int dots = 0;
// CheckStyle:VisibilityModifier OFF - bc
PrintWriter writer;
// CheckStyle:VisibilityModifier ON
/**
* Construct a verbose progress reporter.
*
* @param writer the output stream.
*/
public VerboseProgress(PrintWriter writer) {
this.writer = writer;
}
/**
* begin a download
*/
@Override
public void beginDownload() {
writer.print("Downloading ");
dots = 0;
}
/**
* tick handler
*/
@Override
public void onTick() {
writer.print(".");
if (dots++ > 50) {
writer.flush();
dots = 0;
}
}
/**
* end a download
*/
@Override
public void endDownload() {
writer.println("DONE");
writer.flush();
}
}
private class GetThread extends Thread {
private final URL source;
private final Path dest;
private final boolean hasTimestamp;
private final long timestamp;
private final DownloadProgress progress;
private boolean success = false;
private IOException ioexception = null;
private InputStream is = null;
private OutputStream os = null;
private URLConnection connection;
private int redirections = 0;
GetThread(URL source, Path dest, boolean h, long t, DownloadProgress p) {
this.source = source;
this.dest = dest;
hasTimestamp = h;
timestamp = t;
progress = p;
}
@Override
public void run() {
try {
success = get();
} catch (IOException ioex) {
ioexception = ioex;
}
}
private boolean get() throws IOException {
connection = openConnection(source);
if (connection == null) {
return false;
}
boolean downloadSucceeded = downloadFile();
//if (and only if) the use file time option is set, then
//the saved file now has its timestamp set to that of the
//downloaded file
if (downloadSucceeded && useTimestamp) {
updateTimeStamp();
}
return downloadSucceeded;
}
private boolean redirectionAllowed(URL aSource, URL aDest) throws IOException {
// Argh, github does this...
// if (!(aSource.getProtocol().equals(aDest.getProtocol()) || ("http"
// .equals(aSource.getProtocol()) && "https".equals(aDest
// .getProtocol())))) {
// String message = "Redirection detected from "
// + aSource.getProtocol() + " to " + aDest.getProtocol()
// + ". Protocol switch unsafe, not allowed.";
// throw new IOException(message);
// }
redirections++;
if (redirections > 5) {
String message = "More than " + 5 + " times redirected, giving up";
throw new IOException(message);
}
return true;
}
private URLConnection openConnection(URL aSource) throws IOException {
// set up the URL connection
URLConnection connection = aSource.openConnection();
// modify the headers
// NB: things like user authentication could go in here too.
if (hasTimestamp) {
connection.setIfModifiedSince(timestamp);
}
// in case the plugin manager is its own project, this can become an authenticator
boolean isSecureProcotol = "https".equalsIgnoreCase(aSource.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(aSource.getUserInfo());
if (isAuthInfoSet) {
if (!isSecureProcotol) {
throw new IOException("Basic auth is only supported for HTTPS!");
}
String basicAuth = Base64.encodeBytes(aSource.getUserInfo().getBytes(StandardCharsets.UTF_8));
connection.setRequestProperty("Authorization", "Basic " + basicAuth);
}
if (connection instanceof HttpURLConnection) {
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
connection.setUseCaches(true);
connection.setConnectTimeout(5000);
}
connection.setRequestProperty("ES-Version", Version.CURRENT.toString());
connection.setRequestProperty("ES-Build-Hash", Build.CURRENT.shortHash());
connection.setRequestProperty("User-Agent", "elasticsearch-plugin-manager");
// connect to the remote site (may take some time)
connection.connect();
// First check on a 301 / 302 (moved) response (HTTP only)
if (connection instanceof HttpURLConnection) {
HttpURLConnection httpConnection = (HttpURLConnection) connection;
int responseCode = httpConnection.getResponseCode();
if (responseCode == HttpURLConnection.HTTP_MOVED_PERM ||
responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
String newLocation = httpConnection.getHeaderField("Location");
URL newURL = new URL(newLocation);
if (!redirectionAllowed(aSource, newURL)) {
return null;
}
return openConnection(newURL);
}
// next test for a 304 result (HTTP only)
long lastModified = httpConnection.getLastModified();
if (responseCode == HttpURLConnection.HTTP_NOT_MODIFIED
|| (lastModified != 0 && hasTimestamp && timestamp >= lastModified)) {
// not modified so no file download. just return
// instead and trace out something so the user
// doesn't think that the download happened when it
// didn't
return null;
}
// test for 401 result (HTTP only)
if (responseCode == HttpURLConnection.HTTP_UNAUTHORIZED) {
String message = "HTTP Authorization failure";
throw new IOException(message);
}
}
//REVISIT: at this point even non HTTP connections may
//support the if-modified-since behaviour -we just check
//the date of the content and skip the write if it is not
//newer. Some protocols (FTP) don't include dates, of
//course.
return connection;
}
private boolean downloadFile() throws FileNotFoundException, IOException {
IOException lastEx = null;
for (int i = 0; i < 3; i++) {
// this three attempt trick is to get round quirks in different
// Java implementations. Some of them take a few goes to bind
// property; we ignore the first couple of such failures.
try {
is = connection.getInputStream();
break;
} catch (IOException ex) {
lastEx = ex;
}
}
if (is == null) {
throw lastEx;
}
os = Files.newOutputStream(dest);
progress.beginDownload();
boolean finished = false;
try {
byte[] buffer = new byte[1024 * 100];
int length;
while (!isInterrupted() && (length = is.read(buffer)) >= 0) {
os.write(buffer, 0, length);
progress.onTick();
}
finished = !isInterrupted();
} finally {
if (!finished) {
// we have started to (over)write dest, but failed.
// Try to delete the garbage we'd otherwise leave
// behind.
IOUtils.closeWhileHandlingException(os, is);
IOUtils.deleteFilesIgnoringExceptions(dest);
} else {
IOUtils.close(os, is);
}
}
progress.endDownload();
return true;
}
private void updateTimeStamp() throws IOException {
long remoteTimestamp = connection.getLastModified();
if (remoteTimestamp != 0) {
Files.setLastModifiedTime(dest, FileTime.fromMillis(remoteTimestamp));
}
}
/**
* Has the download completed successfully?
* <p>
* Re-throws any exception caught during executaion.</p>
*/
boolean wasSuccessful() throws IOException {
if (ioexception != null) {
throw ioexception;
}
return success;
}
/**
* Closes streams, interrupts the download, may delete the
* output file.
*/
void closeStreams() throws IOException {
interrupt();
if (success) {
IOUtils.close(is, os);
} else {
IOUtils.closeWhileHandlingException(is, os);
if (dest != null && Files.exists(dest)) {
IOUtils.deleteFilesIgnoringExceptions(dest);
}
}
}
}
}

View File

@ -52,33 +52,6 @@ public final class FileSystemUtils {
private FileSystemUtils() {} // only static methods
/**
* Returns <code>true</code> iff a file under the given root has one of the given extensions. This method
* will travers directories recursively and will terminate once any of the extensions was found. This
* methods will not follow any links.
*
* @param root the root directory to travers. Must be a directory
* @param extensions the file extensions to look for
* @return <code>true</code> iff a file under the given root has one of the given extensions, otherwise <code>false</code>
* @throws IOException if an IOException occurs or if the given root path is not a directory.
*/
public static boolean hasExtensions(Path root, final String... extensions) throws IOException {
final AtomicBoolean retVal = new AtomicBoolean(false);
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
for (String extension : extensions) {
if (file.getFileName().toString().endsWith(extension)) {
retVal.set(true);
return FileVisitResult.TERMINATE;
}
}
return super.visitFile(file, attrs);
}
});
return retVal.get();
}
/**
* Returns <code>true</code> iff one of the files exists otherwise <code>false</code>
*/
@ -168,167 +141,6 @@ public final class FileSystemUtils {
return new BufferedReader(reader);
}
/**
* This utility copy a full directory content (excluded) under
* a new directory but without overwriting existing files.
*
* When a file already exists in destination dir, the source file is copied under
* destination directory but with a suffix appended if set or source file is ignored
* if suffix is not set (null).
* @param source Source directory (for example /tmp/es/src)
* @param destination Destination directory (destination directory /tmp/es/dst)
* @param suffix When not null, files are copied with a suffix appended to the original name (eg: ".new")
* When null, files are ignored
*/
public static void moveFilesWithoutOverwriting(Path source, final Path destination, final String suffix) throws IOException {
// Create destination dir
Files.createDirectories(destination);
final int configPathRootLevel = source.getNameCount();
// We walk through the file tree from
Files.walkFileTree(source, new SimpleFileVisitor<Path>() {
private Path buildPath(Path path) {
return destination.resolve(path);
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
// We are now in dir. We need to remove root of config files to have a relative path
// If we are not walking in root dir, we might be able to copy its content
// if it does not already exist
if (configPathRootLevel != dir.getNameCount()) {
Path subpath = dir.subpath(configPathRootLevel, dir.getNameCount());
Path path = buildPath(subpath);
if (!Files.exists(path)) {
// We just move the structure to new dir
// we can't do atomic move here since src / dest might be on different mounts?
move(dir, path);
// We just ignore sub files from here
return FileVisitResult.SKIP_SUBTREE;
}
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path subpath = null;
if (configPathRootLevel != file.getNameCount()) {
subpath = file.subpath(configPathRootLevel, file.getNameCount());
}
Path path = buildPath(subpath);
if (!Files.exists(path)) {
// We just move the new file to new dir
move(file, path);
} else if (suffix != null) {
if (!isSameFile(file, path)) {
// If it already exists we try to copy this new version appending suffix to its name
path = path.resolveSibling(path.getFileName().toString().concat(suffix));
// We just move the file to new dir but with a new name (appended with suffix)
Files.move(file, path, StandardCopyOption.REPLACE_EXISTING);
}
}
return FileVisitResult.CONTINUE;
}
/**
* Compares the content of two paths by comparing them
*/
private boolean isSameFile(Path first, Path second) throws IOException {
// do quick file size comparison before hashing
boolean sameFileSize = Files.size(first) == Files.size(second);
if (!sameFileSize) {
return false;
}
byte[] firstBytes = Files.readAllBytes(first);
byte[] secondBytes = Files.readAllBytes(second);
return Arrays.equals(firstBytes, secondBytes);
}
});
}
/**
* Copy recursively a dir to a new location
* @param source source dir
* @param destination destination dir
*/
public static void copyDirectoryRecursively(Path source, Path destination) throws IOException {
Files.walkFileTree(source, new TreeCopier(source, destination, false));
}
/**
* Move or rename a file to a target file. This method supports moving a file from
* different filesystems (not supported by Files.move()).
*
* @param source source file
* @param destination destination file
*/
public static void move(Path source, Path destination) throws IOException {
try {
// We can't use atomic move here since source & target can be on different filesystems.
Files.move(source, destination);
} catch (DirectoryNotEmptyException e) {
Files.walkFileTree(source, new TreeCopier(source, destination, true));
}
}
// TODO: note that this will fail if source and target are on different NIO.2 filesystems.
static class TreeCopier extends SimpleFileVisitor<Path> {
private final Path source;
private final Path target;
private final boolean delete;
TreeCopier(Path source, Path target, boolean delete) {
this.source = source;
this.target = target;
this.delete = delete;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
Path newDir = target.resolve(source.relativize(dir));
try {
Files.copy(dir, newDir);
} catch (FileAlreadyExistsException x) {
// We ignore this
} catch (IOException x) {
return SKIP_SUBTREE;
}
return CONTINUE;
}
@Override
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
if (delete) {
IOUtils.rm(dir);
}
return CONTINUE;
}
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
Path newFile = target.resolve(source.relativize(file));
try {
Files.copy(file, newFile);
if (delete) {
Files.deleteIfExists(file);
}
} catch (IOException x) {
// We ignore this
}
return CONTINUE;
}
}
/**
* Returns an array of all files in the given directory matching.
*/

View File

@ -38,7 +38,8 @@ public final class NotSerializableExceptionWrapper extends ElasticsearchExceptio
private final RestStatus status;
public NotSerializableExceptionWrapper(Throwable other) {
super(other.getMessage(), other.getCause());
super(ElasticsearchException.getExceptionName(other) +
": " + other.getMessage(), other.getCause());
this.name = ElasticsearchException.getExceptionName(other);
this.status = ExceptionsHelper.status(other);
setStackTrace(other.getStackTrace());

View File

@ -21,10 +21,12 @@ package org.elasticsearch.common.lease;
import org.elasticsearch.ElasticsearchException;
import java.io.Closeable;
/**
* Specialization of {@link AutoCloseable} that may only throw an {@link ElasticsearchException}.
*/
public interface Releasable extends AutoCloseable {
public interface Releasable extends Closeable {
@Override
void close();

View File

@ -19,38 +19,24 @@
package org.elasticsearch.common.lease;
import org.apache.lucene.util.IOUtils;
import java.io.IOException;
import java.util.Arrays;
/** Utility methods to work with {@link Releasable}s. */
public enum Releasables {
;
private static void rethrow(Throwable t) {
if (t instanceof RuntimeException) {
throw (RuntimeException) t;
}
if (t instanceof Error) {
throw (Error) t;
}
throw new RuntimeException(t);
}
private static void close(Iterable<? extends Releasable> releasables, boolean ignoreException) {
Throwable th = null;
for (Releasable releasable : releasables) {
if (releasable != null) {
try {
releasable.close();
} catch (Throwable t) {
if (th == null) {
th = t;
}
}
try {
// this does the right thing with respect to add suppressed and not wrapping errors etc.
IOUtils.close(releasables);
} catch (Throwable t) {
if (ignoreException == false) {
IOUtils.reThrowUnchecked(t);
}
}
if (th != null && !ignoreException) {
rethrow(th);
}
}
/** Release the provided {@link Releasable}s. */
@ -99,25 +85,11 @@ public enum Releasables {
* </pre>
*/
public static Releasable wrap(final Iterable<Releasable> releasables) {
return new Releasable() {
@Override
public void close() {
Releasables.close(releasables);
}
};
return () -> close(releasables);
}
/** @see #wrap(Iterable) */
public static Releasable wrap(final Releasable... releasables) {
return new Releasable() {
@Override
public void close() {
Releasables.close(releasables);
}
};
return () -> close(releasables);
}
}

View File

@ -25,7 +25,7 @@ import org.apache.log4j.spi.LoggingEvent;
import org.elasticsearch.common.cli.Terminal;
/**
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginManagerCliParser.
* TerminalAppender logs event to Terminal.DEFAULT. It is used for example by the PluginCli.
* */
public class TerminalAppender extends AppenderSkeleton {
@Override

View File

@ -49,6 +49,7 @@ public class NetworkService extends AbstractComponent {
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
public static final class TcpSettings {
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
@ -149,7 +150,7 @@ public class NetworkService extends AbstractComponent {
*/
// TODO: needs to be InetAddress[]
public InetAddress resolvePublishHostAddresses(String publishHosts[]) throws IOException {
if (publishHosts == null) {
if (publishHosts == null || publishHosts.length == 0) {
if (GLOBAL_NETWORK_PUBLISHHOST_SETTING.exists(settings) || GLOBAL_NETWORK_HOST_SETTING.exists(settings)) {
// if we have settings use them (we have a fallback to GLOBAL_NETWORK_HOST_SETTING inline
publishHosts = GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);

View File

@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.cluster.ClusterModule;
@ -56,6 +57,7 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
@ -112,9 +114,9 @@ public final class ClusterSettings extends AbstractScopedSettings {
@Override
public boolean hasChanged(Settings current, Settings previous) {
return current.filter(loggerPredicate).getAsMap().equals(previous.filter(loggerPredicate).getAsMap()) == false;
}
}
@Override
@Override
public Settings getValue(Settings current, Settings previous) {
Settings.Builder builder = Settings.builder();
builder.put(current.filter(loggerPredicate).getAsMap());
@ -130,7 +132,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
return builder.build();
}
@Override
@Override
public void apply(Settings value, Settings current, Settings previous) {
for (String key : value.getAsMap().keySet()) {
assert loggerPredicate.test(key);
@ -141,91 +143,109 @@ public final class ClusterSettings extends AbstractScopedSettings {
} else {
ESLoggerFactory.getLogger(component).setLevel(value.get(key));
}
}
}
}
}
};
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(
Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
TransportClientNodesService.CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL, // TODO these transport client settings are kind of odd here and should only be valid if we are a transport client
TransportClientNodesService.CLIENT_TRANSPORT_PING_TIMEOUT,
TransportClientNodesService.CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
FsRepository.REPOSITORIES_CHUNK_SIZE_SETTING,
FsRepository.REPOSITORIES_COMPRESS_SETTING,
FsRepository.REPOSITORIES_LOCATION_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_SIZE_SETTING,
IndicesQueryCache.INDICES_CACHE_QUERY_COUNT_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
ThreadPool.THREADPOOL_GROUP_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS,
NettyHttpServerTransport.SETTING_CORS_ENABLED,
NettyHttpServerTransport.SETTING_CORS_MAX_AGE,
NettyHttpServerTransport.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
NettyHttpServerTransport.SETTING_PIPELINING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
TransportService.TRACE_LOG_EXCLUDE_SETTING,
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
ThreadPool.THREADPOOL_GROUP_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
GatewayService.EXPECTED_DATA_NODES_SETTING,
GatewayService.EXPECTED_MASTER_NODES_SETTING,
GatewayService.EXPECTED_NODES_SETTING,
GatewayService.RECOVER_AFTER_DATA_NODES_SETTING,
GatewayService.RECOVER_AFTER_MASTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
HttpTransportSettings.SETTING_CORS_ENABLED,
HttpTransportSettings.SETTING_CORS_MAX_AGE,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_PIPELINING,
HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN,
HttpTransportSettings.SETTING_HTTP_PORT,
HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT,
HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS,
HttpTransportSettings.SETTING_HTTP_COMPRESSION,
HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL,
HttpTransportSettings.SETTING_CORS_ALLOW_METHODS,
HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS,
HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED,
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
TransportService.TRACE_LOG_EXCLUDE_SETTING,
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_TCP_COMPRESS,
TransportSettings.TRANSPORT_PROFILES_SETTING,
TransportSettings.HOST,
TransportSettings.PUBLISH_HOST,
TransportSettings.BIND_HOST,
TransportSettings.PUBLISH_PORT,
TransportSettings.PORT,
NettyTransport.WORKER_COUNT,
NettyTransport.CONNECTIONS_PER_NODE_RECOVERY,
@ -241,74 +261,80 @@ public final class ClusterSettings extends AbstractScopedSettings {
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
NettyTransport.NETWORK_SERVER,
NetworkService.NETWORK_SERVER,
NettyTransport.NETTY_BOSS_COUNT,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
NettyTransport.TCP_NO_DELAY,
NettyTransport.TCP_KEEP_ALIVE,
NettyTransport.TCP_REUSE_ADDRESS,
NettyTransport.TCP_SEND_BUFFER_SIZE,
NettyTransport.TCP_RECEIVE_BUFFER_SIZE,
NettyTransport.TCP_BLOCKING_SERVER,
NetworkService.GLOBAL_NETWORK_HOST_SETTING,
NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING,
NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING,
NetworkService.TcpSettings.TCP_NO_DELAY,
NetworkService.TcpSettings.TCP_KEEP_ALIVE,
NetworkService.TcpSettings.TCP_REUSE_ADDRESS,
NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE,
NetworkService.TcpSettings.TCP_BLOCKING,
NetworkService.TcpSettings.TCP_BLOCKING_SERVER,
NetworkService.TcpSettings.TCP_BLOCKING_CLIENT,
NetworkService.TcpSettings.TCP_CONNECT_TIMEOUT,
IndexSettings.QUERY_STRING_ANALYZE_WILDCARD,
IndexSettings.QUERY_STRING_ALLOW_LEADING_WILDCARD,
PrimaryShardAllocator.NODE_INITIAL_SHARDS_SETTING,
ScriptService.SCRIPT_CACHE_SIZE_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING,
IndicesFieldDataCache.INDICES_FIELDDATA_CACHE_SIZE_KEY,
IndicesRequestCache.INDICES_CACHE_QUERY_SIZE,
IndicesRequestCache.INDICES_CACHE_QUERY_EXPIRE,
IndicesRequestCache.INDICES_CACHE_REQUEST_CLEAN_INTERVAL,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING,
HunspellService.HUNSPELL_LAZY_LOAD,
HunspellService.HUNSPELL_IGNORE_CASE,
HunspellService.HUNSPELL_DICTIONARY_OPTIONS,
IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT,
Environment.PATH_CONF_SETTING,
Environment.PATH_DATA_SETTING,
Environment.PATH_HOME_SETTING,
Environment.PATH_LOGS_SETTING,
Environment.PATH_PLUGINS_SETTING,
Environment.PATH_REPO_SETTING,
Environment.PATH_SCRIPTS_SETTING,
Environment.PATH_SHARED_DATA_SETTING,
Environment.PIDFILE_SETTING,
DiscoveryService.DISCOVERY_SEED_SETTING,
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
DiscoveryModule.DISCOVERY_TYPE_SETTING,
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
FaultDetection.PING_RETRIES_SETTING,
FaultDetection.PING_TIMEOUT_SETTING,
FaultDetection.REGISTER_CONNECTION_LISTENER_SETTING,
FaultDetection.PING_INTERVAL_SETTING,
FaultDetection.CONNECT_ON_NETWORK_DISCONNECT_SETTING,
ZenDiscovery.PING_TIMEOUT_SETTING,
ZenDiscovery.JOIN_TIMEOUT_SETTING,
ZenDiscovery.JOIN_RETRY_ATTEMPTS_SETTING,
ZenDiscovery.JOIN_RETRY_DELAY_SETTING,
ZenDiscovery.MAX_PINGS_FROM_ANOTHER_MASTER_SETTING,
ZenDiscovery.SEND_LEAVE_REQUEST_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_CLIENT_SETTING,
ZenDiscovery.MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT_SETTING,
ZenDiscovery.MASTER_ELECTION_FILTER_DATA_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING,
UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING,
SearchService.DEFAULT_KEEPALIVE_SETTING,
SearchService.KEEPALIVE_INTERVAL_SETTING,
Node.WRITE_PORTS_FIELD_SETTING,
Node.NODE_CLIENT_SETTING,
Node.NODE_DATA_SETTING,
Node.NODE_MASTER_SETTING,
Node.NODE_LOCAL_SETTING,
Node.NODE_MODE_SETTING,
Node.NODE_INGEST_SETTING,
URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.ALLOWED_URLS_SETTING,
URLRepository.REPOSITORIES_LIST_DIRECTORIES_SETTING,
URLRepository.REPOSITORIES_URL_SETTING,
URLRepository.SUPPORTED_PROTOCOLS_SETTING,
TransportMasterNodeReadAction.FORCE_LOCAL_SETTING,
AutoCreateIndex.AUTO_CREATE_INDEX_SETTING,
@ -336,6 +362,12 @@ public final class ClusterSettings extends AbstractScopedSettings {
FsService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.ENABLED_SETTING,
JvmGcMonitorService.REFRESH_INTERVAL_SETTING,
JvmGcMonitorService.GC_SETTING
JvmGcMonitorService.GC_SETTING,
PageCacheRecycler.LIMIT_HEAP_SETTING,
PageCacheRecycler.WEIGHT_BYTES_SETTING,
PageCacheRecycler.WEIGHT_INT_SETTING,
PageCacheRecycler.WEIGHT_LONG_SETTING,
PageCacheRecycler.WEIGHT_OBJECTS_SETTING,
PageCacheRecycler.TYPE_SETTING
)));
}

View File

@ -939,6 +939,14 @@ public final class Settings implements ToXContent {
return this;
}
/**
* Sets the setting with the provided setting key and an array of values.
*
* @param setting The setting key
* @param values The values
* @return The builder
*/
/**
* Sets the setting with the provided setting key and an array of values.
*
@ -947,6 +955,17 @@ public final class Settings implements ToXContent {
* @return The builder
*/
public Builder putArray(String setting, String... values) {
return putArray(setting, Arrays.asList(values));
}
/**
* Sets the setting with the provided setting key and a list of values.
*
* @param setting The setting key
* @param values The values
* @return The builder
*/
public Builder putArray(String setting, List<String> values) {
remove(setting);
int counter = 0;
while (true) {
@ -955,8 +974,8 @@ public final class Settings implements ToXContent {
break;
}
}
for (int i = 0; i < values.length; i++) {
put(setting + "." + i, values[i]);
for (int i = 0; i < values.size(); i++) {
put(setting + "." + i, values.get(i));
}
return this;
}

View File

@ -35,6 +35,10 @@ public class PortsRange {
this.portRange = portRange;
}
public String getPortRangeString() {
return portRange;
}
public int[] ports() throws NumberFormatException {
final IntArrayList ports = new IntArrayList();
iterate(new PortCallback() {

View File

@ -41,7 +41,7 @@ import java.util.concurrent.TimeUnit;
public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0l, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
private static class InitialStateListener implements InitialStateDiscoveryListener {

View File

@ -19,7 +19,6 @@
package org.elasticsearch.discovery.local;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
@ -29,7 +28,6 @@ import org.elasticsearch.cluster.Diff;
import org.elasticsearch.cluster.IncompatibleClusterStateVersionException;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodeService;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RoutingService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
@ -44,12 +42,10 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.discovery.AckClusterStatePublishResponseHandler;
import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.DiscoveryStats;
import org.elasticsearch.discovery.InitialStateDiscoveryListener;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.transport.TransportService;
import java.util.HashSet;
import java.util.Queue;
@ -67,17 +63,12 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
private static final LocalDiscovery[] NO_MEMBERS = new LocalDiscovery[0];
private final TransportService transportService;
private final ClusterService clusterService;
private final DiscoveryNodeService discoveryNodeService;
private RoutingService routingService;
private final ClusterName clusterName;
private final Version version;
private final DiscoverySettings discoverySettings;
private DiscoveryNode localNode;
private volatile boolean master = false;
private final AtomicBoolean initialStateSent = new AtomicBoolean();
@ -89,14 +80,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
private volatile ClusterState lastProcessedClusterState;
@Inject
public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService,
DiscoveryNodeService discoveryNodeService, Version version, DiscoverySettings discoverySettings) {
public LocalDiscovery(Settings settings, ClusterName clusterName, ClusterService clusterService,
DiscoverySettings discoverySettings) {
super(settings);
this.clusterName = clusterName;
this.clusterService = clusterService;
this.transportService = transportService;
this.discoveryNodeService = discoveryNodeService;
this.version = version;
this.discoverySettings = discoverySettings;
}
@ -119,8 +107,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
clusterGroups.put(clusterName, clusterGroup);
}
logger.debug("Connected to cluster [{}]", clusterName);
this.localNode = new DiscoveryNode(settings.get("name"), DiscoveryService.generateNodeId(settings), transportService.boundAddress().publishAddress(),
discoveryNodeService.buildAttributes(), version);
clusterGroup.members().add(this);
@ -147,7 +133,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode);
nodesBuilder.put(discovery.localNode());
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
// remove the NO_MASTER block in this case
@ -166,30 +152,9 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
}
});
} else if (firstMaster != null) {
// update as fast as we can the local node state with the new metadata (so we create indices for example)
final ClusterState masterState = firstMaster.clusterService.state();
clusterService.submitStateUpdateTask("local-disco(detected_master)", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
}
@Override
public ClusterState execute(ClusterState currentState) {
// make sure we have the local node id set, we might need it as a result of the new metadata
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentState.nodes()).put(localNode).localNodeId(localNode.id());
return ClusterState.builder(currentState).metaData(masterState.metaData()).nodes(nodesBuilder).build();
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
});
// tell the master to send the fact that we are here
final LocalDiscovery master = firstMaster;
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode + "])", new ClusterStateUpdateTask() {
firstMaster.clusterService.submitStateUpdateTask("local-disco-receive(from node[" + localNode() + "])", new ClusterStateUpdateTask() {
@Override
public boolean runOnlyOnMaster() {
return false;
@ -199,7 +164,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
for (LocalDiscovery discovery : clusterGroups.get(clusterName).members()) {
nodesBuilder.put(discovery.localNode);
nodesBuilder.put(discovery.localNode());
}
nodesBuilder.localNodeId(master.localNode().id()).masterNodeId(master.localNode().id());
return ClusterState.builder(currentState).nodes(nodesBuilder).build();
@ -254,7 +219,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
final Set<String> newMembers = new HashSet<>();
for (LocalDiscovery discovery : clusterGroup.members()) {
newMembers.add(discovery.localNode.id());
newMembers.add(discovery.localNode().id());
}
final LocalDiscovery master = firstMaster;
@ -266,7 +231,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode.id());
DiscoveryNodes newNodes = currentState.nodes().removeDeadMembers(newMembers, master.localNode().id());
DiscoveryNodes.Delta delta = newNodes.delta(currentState.nodes());
if (delta.added()) {
logger.warn("No new nodes should be created when a new discovery view is accepted");
@ -293,7 +258,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
public DiscoveryNode localNode() {
return localNode;
return clusterService.localNode();
}
@Override
@ -308,7 +273,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
public String nodeDescription() {
return clusterName.value() + "/" + localNode.id();
return clusterName.value() + "/" + localNode().id();
}
@Override
@ -323,7 +288,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
if (localDiscovery.master) {
continue;
}
nodesToPublishTo.add(localDiscovery.localNode);
nodesToPublishTo.add(localDiscovery.localNode());
}
publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener));
}
@ -359,7 +324,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
synchronized (this) {
// we do the marshaling intentionally, to check it works well...
// check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode.id())) {
if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode().id())) {
// both conditions are true - which means we can try sending cluster state as diffs
if (clusterStateDiffBytes == null) {
Diff diff = clusterState.diff(clusterChangedEvent.previousState());
@ -369,7 +334,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
}
try {
newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(StreamInput.wrap(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState);
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode.getName());
logger.trace("sending diff cluster state version [{}] with size {} to [{}]", clusterState.version(), clusterStateDiffBytes.length, discovery.localNode().getName());
} catch (IncompatibleClusterStateVersionException ex) {
logger.warn("incompatible cluster state version [{}] - resending complete cluster state", ex, clusterState.version());
}
@ -378,7 +343,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
if (clusterStateBytes == null) {
clusterStateBytes = Builder.toBytes(clusterState);
}
newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode);
newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode());
}
discovery.lastProcessedClusterState = newNodeSpecificClusterState;
}
@ -423,17 +388,17 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
publishResponseHandler.onFailure(discovery.localNode, t);
publishResponseHandler.onFailure(discovery.localNode(), t);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
sendInitialStateEventIfNeeded();
publishResponseHandler.onResponse(discovery.localNode);
publishResponseHandler.onResponse(discovery.localNode());
}
});
} else {
publishResponseHandler.onResponse(discovery.localNode);
publishResponseHandler.onResponse(discovery.localNode());
}
}

View File

@ -60,7 +60,7 @@ public abstract class PriorityComparator implements Comparator<ShardRouting> {
}
private long timeCreated(Settings settings) {
return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1l);
return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L);
}
protected abstract Settings getIndexSettings(String index);

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.http;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.transport.PortsRange;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
public final class HttpTransportSettings {
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_ORIGIN = new Setting<String>("http.cors.allow-origin", "", (value) -> value, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_METHODS = new Setting<String>("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, false, Scope.CLUSTER);
public static final Setting<String> SETTING_CORS_ALLOW_HEADERS = new Setting<String>("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS = Setting.intSetting("http.pipelining.max_events", 10000, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION = Setting.boolSetting("http.compression", false, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL = Setting.intSetting("http.compression_level", 6, false, Scope.CLUSTER);
public static final Setting<PortsRange> SETTING_HTTP_PORT = new Setting<PortsRange>("http.port", "9200-9300", PortsRange::new, false, Scope.CLUSTER);
public static final Setting<Integer> SETTING_HTTP_PUBLISH_PORT = Setting.intSetting("http.publish_port", 0, 0, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CONTENT_LENGTH = Setting.byteSizeSetting("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_CHUNK_SIZE = Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), false, Scope.CLUSTER) ;
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES = Setting.boolSetting("http.reset_cookies", false, false, Scope.CLUSTER);
private HttpTransportSettings() {
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.http.netty;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.OrderedUpstreamMessageEvent;
import org.elasticsearch.rest.support.RestUtils;
import org.jboss.netty.channel.ChannelHandler;
@ -46,7 +47,8 @@ public class HttpRequestHandler extends SimpleChannelUpstreamHandler {
public HttpRequestHandler(NettyHttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) {
this.serverTransport = serverTransport;
this.corsPattern = RestUtils.checkCorsSettingForRegex(serverTransport.settings().get(NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN));
this.corsPattern = RestUtils
.checkCorsSettingForRegex(HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN.get(serverTransport.settings()));
this.httpPipeliningEnabled = serverTransport.pipelining;
this.detailedErrorsEnabled = detailedErrorsEnabled;
this.threadContext = threadContext;

View File

@ -49,12 +49,12 @@ import java.util.Map;
import java.util.Set;
import java.util.regex.Pattern;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.netty.NettyHttpServerTransport.SETTING_CORS_MAX_AGE;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_CREDENTIALS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_HEADERS;
import static org.jboss.netty.handler.codec.http.HttpHeaders.Names.ACCESS_CONTROL_ALLOW_METHODS;
@ -117,7 +117,7 @@ public class NettyHttpChannel extends HttpChannel {
String originHeader = request.header(ORIGIN);
if (!Strings.isNullOrEmpty(originHeader)) {
if (corsPattern == null) {
String allowedOrigins = transport.settings().get(SETTING_CORS_ALLOW_ORIGIN, null);
String allowedOrigins = SETTING_CORS_ALLOW_ORIGIN.get(transport.settings());
if (!Strings.isNullOrEmpty(allowedOrigins)) {
resp.headers().add(ACCESS_CONTROL_ALLOW_ORIGIN, allowedOrigins);
}
@ -128,8 +128,8 @@ public class NettyHttpChannel extends HttpChannel {
if (nettyRequest.getMethod() == HttpMethod.OPTIONS) {
// Allow Ajax requests based on the CORS "preflight" request
resp.headers().add(ACCESS_CONTROL_MAX_AGE, SETTING_CORS_MAX_AGE.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, transport.settings().get(SETTING_CORS_ALLOW_METHODS, "OPTIONS, HEAD, GET, POST, PUT, DELETE"));
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, transport.settings().get(SETTING_CORS_ALLOW_HEADERS, "X-Requested-With, Content-Type, Content-Length"));
resp.headers().add(ACCESS_CONTROL_ALLOW_METHODS, SETTING_CORS_ALLOW_METHODS.get(transport.settings()));
resp.headers().add(ACCESS_CONTROL_ALLOW_HEADERS, SETTING_CORS_ALLOW_HEADERS.get(transport.settings()));
}
if (SETTING_CORS_ALLOW_CREDENTIALS.get(transport.settings())) {

View File

@ -26,8 +26,6 @@ import org.elasticsearch.common.netty.NettyUtils;
import org.elasticsearch.common.netty.OpenChannelsHandler;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.BoundTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
@ -46,6 +44,7 @@ import org.elasticsearch.http.HttpRequest;
import org.elasticsearch.http.HttpServerAdapter;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpStats;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.pipelining.HttpPipeliningHandler;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.threadpool.ThreadPool;
@ -75,7 +74,6 @@ import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
@ -93,22 +91,6 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
NettyUtils.setup();
}
public static final Setting<Boolean> SETTING_CORS_ENABLED = Setting.boolSetting("http.cors.enabled", false, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_ORIGIN = "http.cors.allow-origin";
public static final Setting<Integer> SETTING_CORS_MAX_AGE = Setting.intSetting("http.cors.max-age", 1728000, false, Scope.CLUSTER);
public static final String SETTING_CORS_ALLOW_METHODS = "http.cors.allow-methods";
public static final String SETTING_CORS_ALLOW_HEADERS = "http.cors.allow-headers";
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = Setting.boolSetting("http.cors.allow-credentials", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SETTING_PIPELINING = Setting.boolSetting("http.pipelining", true, false, Scope.CLUSTER);
public static final String SETTING_PIPELINING_MAX_EVENTS = "http.pipelining.max_events";
public static final String SETTING_HTTP_COMPRESSION = "http.compression";
public static final String SETTING_HTTP_COMPRESSION_LEVEL = "http.compression_level";
public static final Setting<Boolean> SETTING_HTTP_DETAILED_ERRORS_ENABLED = Setting.boolSetting("http.detailed_errors.enabled", true, false, Scope.CLUSTER);
public static final int DEFAULT_SETTING_PIPELINING_MAX_EVENTS = 10000;
public static final String DEFAULT_PORT_RANGE = "9200-9300";
protected final NetworkService networkService;
protected final BigArrays bigArrays;
@ -131,7 +113,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
protected final boolean resetCookies;
protected final String port;
protected final PortsRange port;
protected final String bindHosts[];
@ -176,28 +158,25 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
if (settings.getAsBoolean("netty.epollBugWorkaround", false)) {
System.setProperty("org.jboss.netty.epollBugWorkaround", "true");
}
ByteSizeValue maxContentLength = settings.getAsBytesSize("http.netty.max_content_length", settings.getAsBytesSize("http.max_content_length", new ByteSizeValue(100, ByteSizeUnit.MB)));
this.maxChunkSize = settings.getAsBytesSize("http.netty.max_chunk_size", settings.getAsBytesSize("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
this.maxHeaderSize = settings.getAsBytesSize("http.netty.max_header_size", settings.getAsBytesSize("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB)));
this.maxInitialLineLength = settings.getAsBytesSize("http.netty.max_initial_line_length", settings.getAsBytesSize("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB)));
// don't reset cookies by default, since I don't think we really need to
// note, parsing cookies was fixed in netty 3.5.1 regarding stack allocation, but still, currently, we don't need cookies
this.resetCookies = settings.getAsBoolean("http.netty.reset_cookies", settings.getAsBoolean("http.reset_cookies", false));
ByteSizeValue maxContentLength = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings);
this.maxChunkSize = HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
this.maxHeaderSize = HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
this.maxInitialLineLength = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
this.resetCookies = HttpTransportSettings.SETTING_HTTP_RESET_COOKIES.get(settings);
this.maxCumulationBufferCapacity = settings.getAsBytesSize("http.netty.max_cumulation_buffer_capacity", null);
this.maxCompositeBufferComponents = settings.getAsInt("http.netty.max_composite_buffer_components", -1);
this.workerCount = settings.getAsInt("http.netty.worker_count", EsExecutors.boundedNumberOfProcessors(settings) * 2);
this.blockingServer = settings.getAsBoolean("http.netty.http.blocking_server", TCP_BLOCKING.get(settings));
this.port = settings.get("http.netty.port", settings.get("http.port", DEFAULT_PORT_RANGE));
this.port = HttpTransportSettings.SETTING_HTTP_PORT.get(settings);
this.bindHosts = settings.getAsArray("http.netty.bind_host", settings.getAsArray("http.bind_host", settings.getAsArray("http.host", null)));
this.publishHosts = settings.getAsArray("http.netty.publish_host", settings.getAsArray("http.publish_host", settings.getAsArray("http.host", null)));
this.publishPort = settings.getAsInt("http.netty.publish_port", settings.getAsInt("http.publish_port", 0));
this.publishPort = HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT.get(settings);
this.tcpNoDelay = settings.getAsBoolean("http.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
this.tcpKeepAlive = settings.getAsBoolean("http.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
this.reuseAddress = settings.getAsBoolean("http.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
this.tcpSendBufferSize = settings.getAsBytesSize("http.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
this.tcpReceiveBufferSize = settings.getAsBytesSize("http.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
this.detailedErrorsEnabled = HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
long defaultReceiverPredictor = 512 * 1024;
if (JvmInfo.jvmInfo().getMem().getDirectMemoryMax().bytes() > 0) {
@ -215,10 +194,10 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
receiveBufferSizePredictorFactory = new AdaptiveReceiveBufferSizePredictorFactory((int) receivePredictorMin.bytes(), (int) receivePredictorMin.bytes(), (int) receivePredictorMax.bytes());
}
this.compression = settings.getAsBoolean(SETTING_HTTP_COMPRESSION, false);
this.compressionLevel = settings.getAsInt(SETTING_HTTP_COMPRESSION_LEVEL, 6);
this.pipelining = SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = settings.getAsInt(SETTING_PIPELINING_MAX_EVENTS, DEFAULT_SETTING_PIPELINING_MAX_EVENTS);
this.compression = HttpTransportSettings.SETTING_HTTP_COMPRESSION.get(settings);
this.compressionLevel = HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL.get(settings);
this.pipelining = HttpTransportSettings.SETTING_PIPELINING.get(settings);
this.pipeliningMaxEvents = HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS.get(settings);
// validate max content length
if (maxContentLength.bytes() > Integer.MAX_VALUE) {
@ -312,10 +291,9 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
}
private InetSocketTransportAddress bindAddress(final InetAddress hostAddress) {
PortsRange portsRange = new PortsRange(port);
final AtomicReference<Exception> lastException = new AtomicReference<>();
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(new PortsRange.PortCallback() {
boolean success = port.iterate(new PortsRange.PortCallback() {
@Override
public boolean onPortNumber(int portNumber) {
try {

View File

@ -456,7 +456,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l;
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
shard.shardBitsetFilterCache().onCached(ramBytesUsed);
}
}
@ -467,7 +467,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC
if (shardId != null) {
final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l;
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0L;
shard.shardBitsetFilterCache().onRemoval(ramBytesUsed);
}
}

View File

@ -121,7 +121,7 @@ public interface ScriptDocValues<T> extends List<T> {
public long getValue() {
int numValues = values.count();
if (numValues == 0) {
return 0l;
return 0L;
}
return values.valueAt(0);
}

View File

@ -81,7 +81,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
}
public static final String DEFAULT_MAPPING = "_default_";
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50l, 0, true, Setting.Scope.INDEX);
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING = Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, true, Setting.Scope.INDEX);
public static final boolean INDEX_MAPPER_DYNAMIC_DEFAULT = true;
public static final Setting<Boolean> INDEX_MAPPER_DYNAMIC_SETTING = Setting.boolSetting("index.mapper.dynamic", INDEX_MAPPER_DYNAMIC_DEFAULT, false, Setting.Scope.INDEX);
private static ObjectHashSet<String> META_FIELDS = ObjectHashSet.from(

View File

@ -66,7 +66,7 @@ import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField;
public class IpFieldMapper extends NumberFieldMapper {
public static final String CONTENT_TYPE = "ip";
public static final long MAX_IP = 4294967296l;
public static final long MAX_IP = 4294967296L;
public static String longToIp(long longIp) {
int octet3 = (int) ((longIp >> 24) % 256);

View File

@ -930,7 +930,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
return new Tuple<>(indexInput.readStringStringMap(), lastFound);
}
}
return new Tuple<>(new HashMap<>(), -1l);
return new Tuple<>(new HashMap<>(), -1L);
}
}

View File

@ -31,6 +31,7 @@ import org.elasticsearch.common.cache.RemovalListener;
import org.elasticsearch.common.cache.RemovalNotification;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.common.settings.Setting;
@ -52,7 +53,7 @@ import java.util.function.ToLongBiFunction;
/**
*/
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable> {
public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener<IndicesFieldDataCache.Key, Accountable>, Releasable{
public static final Setting<TimeValue> INDICES_FIELDDATA_CLEAN_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.fielddata.cache.cleanup_interval", TimeValue.timeValueMinutes(1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.byteSizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
@ -84,6 +85,7 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL
new FieldDataCacheCleaner(this.cache, this.logger, this.threadPool, this.cleanInterval));
}
@Override
public void close() {
cache.invalidateAll();
this.closed = true;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.node;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
@ -100,6 +101,7 @@ import org.elasticsearch.watcher.ResourceWatcherModule;
import org.elasticsearch.watcher.ResourceWatcherService;
import java.io.BufferedWriter;
import java.io.Closeable;
import java.io.IOException;
import java.net.Inet6Address;
import java.net.InetAddress;
@ -108,9 +110,11 @@ import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
@ -120,7 +124,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
* A node represent a node within a cluster (<tt>cluster.name</tt>). The {@link #client()} can be used
* in order to use a {@link Client} to perform actions/operations against the cluster.
*/
public class Node implements Releasable {
public class Node implements Closeable {
public static final Setting<Boolean> WRITE_PORTS_FIELD_SETTING = Setting.boolSetting("node.portsfile", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NODE_CLIENT_SETTING = Setting.boolSetting("node.client", false, false, Setting.Scope.CLUSTER);
@ -351,7 +355,7 @@ public class Node implements Releasable {
// If not, the hook that is added in Bootstrap#setup() will be useless: close() might not be executed, in case another (for example api) call
// to close() has already set some lifecycles to stopped. In this case the process will be terminated even if the first call to close() has not finished yet.
@Override
public synchronized void close() {
public synchronized void close() throws IOException {
if (lifecycle.started()) {
stop();
}
@ -361,88 +365,80 @@ public class Node implements Releasable {
ESLogger logger = Loggers.getLogger(Node.class, settings.get("name"));
logger.info("closing ...");
List<Closeable> toClose = new ArrayList<>();
StopWatch stopWatch = new StopWatch("node_close");
stopWatch.start("tribe");
injector.getInstance(TribeService.class).close();
stopWatch.stop().start("node_service");
try {
injector.getInstance(NodeService.class).close();
} catch (IOException e) {
logger.warn("NodeService close failed", e);
}
stopWatch.stop().start("http");
toClose.add(() -> stopWatch.start("tribe"));
toClose.add(injector.getInstance(TribeService.class));
toClose.add(() -> stopWatch.stop().start("node_service"));
toClose.add(injector.getInstance(NodeService.class));
toClose.add(() ->stopWatch.stop().start("http"));
if (settings.getAsBoolean("http.enabled", true)) {
injector.getInstance(HttpServer.class).close();
toClose.add(injector.getInstance(HttpServer.class));
}
stopWatch.stop().start("snapshot_service");
injector.getInstance(SnapshotsService.class).close();
injector.getInstance(SnapshotShardsService.class).close();
stopWatch.stop().start("client");
toClose.add(() ->stopWatch.stop().start("snapshot_service"));
toClose.add(injector.getInstance(SnapshotsService.class));
toClose.add(injector.getInstance(SnapshotShardsService.class));
toClose.add(() ->stopWatch.stop().start("client"));
Releasables.close(injector.getInstance(Client.class));
stopWatch.stop().start("indices_cluster");
injector.getInstance(IndicesClusterStateService.class).close();
stopWatch.stop().start("indices");
injector.getInstance(IndicesTTLService.class).close();
injector.getInstance(IndicesService.class).close();
toClose.add(() ->stopWatch.stop().start("indices_cluster"));
toClose.add(injector.getInstance(IndicesClusterStateService.class));
toClose.add(() ->stopWatch.stop().start("indices"));
toClose.add(injector.getInstance(IndicesTTLService.class));
toClose.add(injector.getInstance(IndicesService.class));
// close filter/fielddata caches after indices
injector.getInstance(IndicesQueryCache.class).close();
injector.getInstance(IndicesFieldDataCache.class).close();
injector.getInstance(IndicesStore.class).close();
stopWatch.stop().start("routing");
injector.getInstance(RoutingService.class).close();
stopWatch.stop().start("cluster");
injector.getInstance(ClusterService.class).close();
stopWatch.stop().start("discovery");
injector.getInstance(DiscoveryService.class).close();
stopWatch.stop().start("monitor");
injector.getInstance(MonitorService.class).close();
stopWatch.stop().start("gateway");
injector.getInstance(GatewayService.class).close();
stopWatch.stop().start("search");
injector.getInstance(SearchService.class).close();
stopWatch.stop().start("rest");
injector.getInstance(RestController.class).close();
stopWatch.stop().start("transport");
injector.getInstance(TransportService.class).close();
stopWatch.stop().start("percolator_service");
injector.getInstance(PercolatorService.class).close();
toClose.add(injector.getInstance(IndicesQueryCache.class));
toClose.add(injector.getInstance(IndicesFieldDataCache.class));
toClose.add(injector.getInstance(IndicesStore.class));
toClose.add(() ->stopWatch.stop().start("routing"));
toClose.add(injector.getInstance(RoutingService.class));
toClose.add(() ->stopWatch.stop().start("cluster"));
toClose.add(injector.getInstance(ClusterService.class));
toClose.add(() ->stopWatch.stop().start("discovery"));
toClose.add(injector.getInstance(DiscoveryService.class));
toClose.add(() ->stopWatch.stop().start("monitor"));
toClose.add(injector.getInstance(MonitorService.class));
toClose.add(() ->stopWatch.stop().start("gateway"));
toClose.add(injector.getInstance(GatewayService.class));
toClose.add(() ->stopWatch.stop().start("search"));
toClose.add(injector.getInstance(SearchService.class));
toClose.add(() ->stopWatch.stop().start("rest"));
toClose.add(injector.getInstance(RestController.class));
toClose.add(() ->stopWatch.stop().start("transport"));
toClose.add(injector.getInstance(TransportService.class));
toClose.add(() ->stopWatch.stop().start("percolator_service"));
toClose.add(injector.getInstance(PercolatorService.class));
for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {
stopWatch.stop().start("plugin(" + plugin.getName() + ")");
injector.getInstance(plugin).close();
toClose.add(() ->stopWatch.stop().start("plugin(" + plugin.getName() + ")"));
toClose.add(injector.getInstance(plugin));
}
stopWatch.stop().start("script");
try {
injector.getInstance(ScriptService.class).close();
} catch(IOException e) {
logger.warn("ScriptService close failed", e);
}
toClose.add(() ->stopWatch.stop().start("script"));
toClose.add(injector.getInstance(ScriptService.class));
stopWatch.stop().start("thread_pool");
toClose.add(() ->stopWatch.stop().start("thread_pool"));
// TODO this should really use ThreadPool.terminate()
injector.getInstance(ThreadPool.class).shutdown();
try {
injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
stopWatch.stop().start("thread_pool_force_shutdown");
try {
injector.getInstance(ThreadPool.class).shutdownNow();
} catch (Exception e) {
// ignore
}
stopWatch.stop();
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
toClose.add(() -> {
try {
injector.getInstance(ThreadPool.class).awaitTermination(10, TimeUnit.SECONDS);
} catch (InterruptedException e) {
// ignore
}
});
toClose.add(() ->stopWatch.stop().start("thread_pool_force_shutdown"));
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdownNow());
toClose.add(() -> stopWatch.stop());
toClose.add(injector.getInstance(NodeEnvironment.class));
toClose.add(injector.getInstance(PageCacheRecycler.class));
if (logger.isTraceEnabled()) {
logger.trace("Close times for each service:\n{}", stopWatch.prettyPrint());
}
injector.getInstance(NodeEnvironment.class).close();
injector.getInstance(PageCacheRecycler.class).close();
IOUtils.close(toClose);
logger.info("closed");
}

View File

@ -43,6 +43,7 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
@ -85,7 +86,7 @@ import java.util.stream.StreamSupport;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
import static org.apache.lucene.search.BooleanClause.Occur.MUST;
public class PercolatorService extends AbstractComponent {
public class PercolatorService extends AbstractComponent implements Releasable {
public final static float NO_SCORE = Float.NEGATIVE_INFINITY;
public final static String TYPE_NAME = ".percolator";
@ -304,6 +305,7 @@ public class PercolatorService extends AbstractComponent {
}
}
@Override
public void close() {
cache.close();
}

View File

@ -0,0 +1,401 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.net.URL;
import java.net.URLDecoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
/**
* A command for the plugin cli to install a plugin into elasticsearch.
*
* The install command takes a plugin id, which may be any of the following:
* <ul>
* <li>An official elasticsearch plugin name</li>
* <li>Maven coordinates to a plugin zip</li>
* <li>A URL to a plugin zip</li>
* </ul>
*
* Plugins are packaged as zip files. Each packaged plugin must contain a
* plugin properties file. See {@link PluginInfo}.
* <p>
* The installation process first extracts the plugin files into a temporary
* directory in order to verify the plugin satisfies the following requirements:
* <ul>
* <li>Jar hell does not exist, either between the plugin's own jars, or with elasticsearch</li>
* <li>The plugin is not a module already provided with elasticsearch</li>
* <li>If the plugin contains extra security permissions, the policy file is validated</li>
* </ul>
* <p>
* A plugin may also contain an optional {@code bin} directory which contains scripts. The
* scripts will be installed into a subdirectory of the elasticsearch bin directory, using
* the name of the plugin, and the scripts will be marked executable.
* <p>
* A plugin may also contain an optional {@code config} directory which contains configuration
* files specific to the plugin. The config files be installed into a subdirectory of the
* elasticsearch config directory, using the name of the plugin. If any files to be installed
* already exist, they will be skipped.
*/
class InstallPluginCommand extends CliTool.Command {
private static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging";
// TODO: make this a resource file generated by gradle
static final Set<String> MODULES = unmodifiableSet(newHashSet(
"lang-expression",
"lang-groovy"));
// TODO: make this a resource file generated by gradle
static final Set<String> OFFICIAL_PLUGINS = unmodifiableSet(newHashSet(
"analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"delete-by-query",
"discovery-azure",
"discovery-ec2",
"discovery-gce",
"lang-javascript",
"lang-painless",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",
"repository-hdfs",
"repository-s3",
"store-smb"));
private final String pluginId;
private final boolean batch;
InstallPluginCommand(Terminal terminal, String pluginId, boolean batch) {
super(terminal);
this.pluginId = pluginId;
this.batch = batch;
}
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
// TODO: remove this leniency!! is it needed anymore?
if (Files.exists(env.pluginsFile()) == false) {
terminal.println("Plugins directory [%s] does not exist. Creating...", env.pluginsFile());
Files.createDirectory(env.pluginsFile());
}
if (Environment.isWritable(env.pluginsFile()) == false) {
throw new IOException("Plugins directory is read only: " + env.pluginsFile());
}
Path pluginZip = download(pluginId, env.tmpFile());
Path extractedZip = unzip(pluginZip, env.pluginsFile());
install(extractedZip, env);
return CliTool.ExitStatus.OK;
}
/** Downloads the plugin and returns the file it was downloaded to. */
private Path download(String pluginId, Path tmpDir) throws IOException {
if (OFFICIAL_PLUGINS.contains(pluginId)) {
final String version = Version.CURRENT.toString();
final String url;
if (System.getProperty(PROPERTY_SUPPORT_STAGING_URLS, "false").equals("true")) {
url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%1$s-%2$s/org/elasticsearch/plugin/%3$s/%1$s/%3$s-%1$s.zip",
version, Build.CURRENT.shortHash(), pluginId);
} else {
url = String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%1$s/%2$s/%1$s-%2$s.zip",
pluginId, version);
}
terminal.println("-> Downloading " + pluginId + " from elastic");
return downloadZipAndChecksum(url, tmpDir);
}
// now try as maven coordinates, a valid URL would only have a single colon
String[] coordinates = pluginId.split(":");
if (coordinates.length == 3) {
String mavenUrl = String.format(Locale.ROOT, "https://repo1.maven.org/maven2/%1$s/%2$s/%3$s/%2$s-%3$s.zip",
coordinates[0].replace(".", "/") /* groupId */, coordinates[1] /* artifactId */, coordinates[2] /* version */);
terminal.println("-> Downloading " + pluginId + " from maven central");
return downloadZipAndChecksum(mavenUrl, tmpDir);
}
// fall back to plain old URL
terminal.println("-> Downloading " + URLDecoder.decode(pluginId, "UTF-8"));
return downloadZip(pluginId, tmpDir);
}
/** Downloads a zip from the url, into a temp file under the given temp dir. */
private Path downloadZip(String urlString, Path tmpDir) throws IOException {
URL url = new URL(urlString);
Path zip = Files.createTempFile(tmpDir, null, ".zip");
try (InputStream in = url.openStream()) {
// must overwrite since creating the temp file above actually created the file
Files.copy(in, zip, StandardCopyOption.REPLACE_EXISTING);
}
return zip;
}
/** Downloads a zip from the url, as well as a SHA1 checksum, and checks the checksum. */
private Path downloadZipAndChecksum(String urlString, Path tmpDir) throws IOException {
Path zip = downloadZip(urlString, tmpDir);
URL checksumUrl = new URL(urlString + ".sha1");
final String expectedChecksum;
try (InputStream in = checksumUrl.openStream()) {
BufferedReader checksumReader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
expectedChecksum = checksumReader.readLine();
if (checksumReader.readLine() != null) {
throw new IllegalArgumentException("Invalid checksum file at " + urlString.toString());
}
}
byte[] zipbytes = Files.readAllBytes(zip);
String gotChecksum = MessageDigests.toHexString(MessageDigests.sha1().digest(zipbytes));
if (expectedChecksum.equals(gotChecksum) == false) {
throw new IllegalStateException("SHA1 mismatch, expected " + expectedChecksum + " but got " + gotChecksum);
}
return zip;
}
private Path unzip(Path zip, Path pluginsDir) throws IOException {
// unzip plugin to a staging temp dir
Path target = Files.createTempDirectory(pluginsDir, ".installing-");
Files.createDirectories(target);
// TODO: we should wrap this in a try/catch and try deleting the target dir on failure?
try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
ZipEntry entry;
byte[] buffer = new byte[8192];
while ((entry = zipInput.getNextEntry()) != null) {
Path targetFile = target.resolve(entry.getName());
// TODO: handle name being an absolute path
// be on the safe side: do not rely on that directories are always extracted
// before their children (although this makes sense, but is it guaranteed?)
Files.createDirectories(targetFile.getParent());
if (entry.isDirectory() == false) {
try (OutputStream out = Files.newOutputStream(targetFile)) {
int len;
while((len = zipInput.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
}
}
zipInput.closeEntry();
}
}
return target;
}
/** Load information about the plugin, and verify it can be installed with no errors. */
private PluginInfo verify(Path pluginRoot, Environment env) throws Exception {
// read and validate the plugin descriptor
PluginInfo info = PluginInfo.readFromProperties(pluginRoot);
terminal.println(VERBOSE, "%s", info);
// don't let luser install plugin as a module...
// they might be unavoidably in maven central and are packaged up the same way)
if (MODULES.contains(info.getName())) {
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
}
// check for jar hell before any copying
jarHellCheck(pluginRoot, env.pluginsFile(), info.isIsolated());
// read optional security policy (extra permissions)
// if it exists, confirm or warn the user
Path policy = pluginRoot.resolve(PluginInfo.ES_PLUGIN_POLICY);
if (Files.exists(policy)) {
PluginSecurity.readPolicy(policy, terminal, env, batch);
}
return info;
}
/** check a candidate plugin for jar hell before installing it */
private void jarHellCheck(Path candidate, Path pluginsDir, boolean isolated) throws Exception {
// create list of current jars in classpath
final List<URL> jars = new ArrayList<>();
jars.addAll(Arrays.asList(JarHell.parseClassPath()));
// read existing bundles. this does some checks on the installation too.
List<PluginsService.Bundle> bundles = PluginsService.getPluginBundles(pluginsDir);
// if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
// thats always the first bundle
if (isolated == false) {
jars.addAll(bundles.get(0).urls);
}
// add plugin jars to the list
Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar");
for (Path jar : pluginJars) {
jars.add(jar.toUri().toURL());
}
// TODO: no jars should be an error
// TODO: verify the classname exists in one of the jars!
// check combined (current classpath + new jars to-be-added)
JarHell.checkJarHell(jars.toArray(new URL[jars.size()]));
}
/**
* Installs the plugin from {@code tmpRoot} into the plugins dir.
* If the plugin has a bin dir and/or a config dir, those are copied.
*/
private void install(Path tmpRoot, Environment env) throws Exception {
List<Path> deleteOnFailure = new ArrayList<>();
deleteOnFailure.add(tmpRoot);
try {
PluginInfo info = verify(tmpRoot, env);
final Path destination = env.pluginsFile().resolve(info.getName());
if (Files.exists(destination)) {
throw new IOException("plugin directory " + destination.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + info.getName() + "' command");
}
Path tmpBinDir = tmpRoot.resolve("bin");
if (Files.exists(tmpBinDir)) {
Path destBinDir = env.binFile().resolve(info.getName());
deleteOnFailure.add(destBinDir);
installBin(info, tmpBinDir, destBinDir);
}
Path tmpConfigDir = tmpRoot.resolve("config");
if (Files.exists(tmpConfigDir)) {
// some files may already exist, and we don't remove plugin config files on plugin removal,
// so any installed config files are left on failure too
installConfig(info, tmpConfigDir, env.configFile().resolve(info.getName()));
}
Files.move(tmpRoot, destination, StandardCopyOption.ATOMIC_MOVE);
terminal.println("-> Installed " + info.getName());
} catch (Exception installProblem) {
try {
IOUtils.rm(deleteOnFailure.toArray(new Path[0]));
} catch (IOException exceptionWhileRemovingFiles) {
installProblem.addSuppressed(exceptionWhileRemovingFiles);
}
throw installProblem;
}
}
/** Copies the files from {@code tmpBinDir} into {@code destBinDir}, along with permissions from dest dirs parent. */
private void installBin(PluginInfo info, Path tmpBinDir, Path destBinDir) throws IOException {
if (Files.isDirectory(tmpBinDir) == false) {
throw new IOException("bin in plugin " + info.getName() + " is not a directory");
}
Files.createDirectory(destBinDir);
// setup file attributes for the installed files to those of the parent dir
Set<PosixFilePermission> perms = new HashSet<>();
PosixFileAttributeView binAttrs = Files.getFileAttributeView(destBinDir.getParent(), PosixFileAttributeView.class);
if (binAttrs != null) {
perms = new HashSet<>(binAttrs.readAttributes().permissions());
// setting execute bits, since this just means "the file is executable", and actual execution requires read
perms.add(PosixFilePermission.OWNER_EXECUTE);
perms.add(PosixFilePermission.GROUP_EXECUTE);
perms.add(PosixFilePermission.OTHERS_EXECUTE);
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpBinDir)) {
for (Path srcFile : stream) {
if (Files.isDirectory(srcFile)) {
throw new IOException("Directories not allowed in bin dir for plugin " + info.getName());
}
Path destFile = destBinDir.resolve(tmpBinDir.relativize(srcFile));
Files.copy(srcFile, destFile);
if (perms.isEmpty() == false) {
PosixFileAttributeView view = Files.getFileAttributeView(destFile, PosixFileAttributeView.class);
view.setPermissions(perms);
}
}
}
IOUtils.rm(tmpBinDir); // clean up what we just copied
}
/**
* Copies the files from {@code tmpConfigDir} into {@code destConfigDir}.
* Any files existing in both the source and destination will be skipped.
*/
private void installConfig(PluginInfo info, Path tmpConfigDir, Path destConfigDir) throws IOException {
if (Files.isDirectory(tmpConfigDir) == false) {
throw new IOException("config in plugin " + info.getName() + " is not a directory");
}
// create the plugin's config dir "if necessary"
Files.createDirectories(destConfigDir);
try (DirectoryStream<Path> stream = Files.newDirectoryStream(tmpConfigDir)) {
for (Path srcFile : stream) {
if (Files.isDirectory(srcFile)) {
throw new IOException("Directories not allowed in config dir for plugin " + info.getName());
}
Path destFile = destConfigDir.resolve(tmpConfigDir.relativize(srcFile));
if (Files.exists(destFile) == false) {
Files.copy(srcFile, destFile);
}
}
}
IOUtils.rm(tmpConfigDir); // clean up what we just copied
}
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import java.io.IOException;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
/**
* A command for the plugin cli to list plugins installed in elasticsearch.
*/
class ListPluginsCommand extends CliTool.Command {
ListPluginsCommand(Terminal terminal) {
super(terminal);
}
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
if (Files.exists(env.pluginsFile()) == false) {
throw new IOException("Plugins directory missing: " + env.pluginsFile());
}
terminal.println(Terminal.Verbosity.VERBOSE, "Plugins directory: " + env.pluginsFile());
try (DirectoryStream<Path> stream = Files.newDirectoryStream(env.pluginsFile())) {
for (Path plugin : stream) {
terminal.println(plugin.getFileName().toString());
}
}
return CliTool.ExitStatus.OK;
}
}

View File

@ -0,0 +1,124 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.util.Locale;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.option;
/**
* A cli tool for adding, removing and listing plugins for elasticsearch.
*/
public class PluginCli extends CliTool {
// commands
private static final String LIST_CMD_NAME = "list";
private static final String INSTALL_CMD_NAME = "install";
private static final String REMOVE_CMD_NAME = "remove";
// usage config
private static final CliToolConfig.Cmd LIST_CMD = cmd(LIST_CMD_NAME, ListPluginsCommand.class).build();
private static final CliToolConfig.Cmd INSTALL_CMD = cmd(INSTALL_CMD_NAME, InstallPluginCommand.class)
.options(option("b", "batch").required(false))
.build();
private static final CliToolConfig.Cmd REMOVE_CMD = cmd(REMOVE_CMD_NAME, RemovePluginCommand.class).build();
static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginCli.class)
.cmds(LIST_CMD, INSTALL_CMD, REMOVE_CMD)
.build();
public static void main(String[] args) {
// initialize default for es.logger.level because we will not read the logging.yml
String loggerLevel = System.getProperty("es.logger.level", "INFO");
// Set the appender for all potential log files to terminal so that other components that use the logger print out the
// same terminal.
// The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is
// executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch
// is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs.
// Therefore we print to Terminal.
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder()
.put("appender.terminal.type", "terminal")
.put("rootLogger", "${es.logger.level}, terminal")
.put("es.logger.level", loggerLevel)
.build(), Terminal.DEFAULT);
// configure but do not read the logging conf file
LogConfigurator.configure(env.settings(), false);
int status = new PluginCli(Terminal.DEFAULT).execute(args).status();
exit(status);
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
private static void exit(int status) {
System.exit(status);
}
PluginCli(Terminal terminal) {
super(CONFIG, terminal);
}
@Override
protected Command parse(String cmdName, CommandLine cli) throws Exception {
switch (cmdName.toLowerCase(Locale.ROOT)) {
case LIST_CMD_NAME:
return new ListPluginsCommand(terminal);
case INSTALL_CMD_NAME:
return parseInstallPluginCommand(cli);
case REMOVE_CMD_NAME:
return parseRemovePluginCommand(cli);
default:
assert false : "can't get here as cmd name is validated before this method is called";
return exitCmd(ExitStatus.USAGE);
}
}
private Command parseInstallPluginCommand(CommandLine cli) {
String[] args = cli.getArgs();
if (args.length != 1) {
return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin id argument");
}
boolean batch = System.console() == null;
if (cli.hasOption("b")) {
batch = true;
}
return new InstallPluginCommand(terminal, args[0], batch);
}
private Command parseRemovePluginCommand(CommandLine cli) {
String[] args = cli.getArgs();
if (args.length != 1) {
return exitCmd(ExitStatus.USAGE, terminal, "Must supply a single plugin name argument");
}
return new RemovePluginCommand(terminal, args[0]);
}
}

View File

@ -82,7 +82,6 @@ public class PluginInfo implements Streamable, ToXContent {
if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("Property [name] is missing in [" + descriptor + "]");
}
PluginManager.checkForForbiddenName(name);
String description = props.getProperty("description");
if (description == null) {
throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]");

View File

@ -1,685 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchCorruptionException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.http.client.HttpDownloadHelper;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.plugins.PluginsService.Bundle;
import java.io.IOException;
import java.io.OutputStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.nio.file.attribute.GroupPrincipal;
import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.UserPrincipal;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.stream.StreamSupport;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import static java.util.Collections.unmodifiableSet;
import static org.elasticsearch.common.Strings.hasLength;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
import static org.elasticsearch.common.io.FileSystemUtils.moveFilesWithoutOverwriting;
import static org.elasticsearch.common.util.set.Sets.newHashSet;
/**
*
*/
public class PluginManager {
public static final String PROPERTY_SUPPORT_STAGING_URLS = "es.plugins.staging";
public enum OutputMode {
DEFAULT, SILENT, VERBOSE
}
private static final Set<String> BLACKLIST = unmodifiableSet(newHashSet(
"elasticsearch",
"elasticsearch.bat",
"elasticsearch.in.sh",
"plugin",
"plugin.bat",
"service.bat"));
static final Set<String> MODULES = unmodifiableSet(newHashSet(
"lang-expression",
"lang-groovy"));
static final Set<String> OFFICIAL_PLUGINS = unmodifiableSet(newHashSet(
"analysis-icu",
"analysis-kuromoji",
"analysis-phonetic",
"analysis-smartcn",
"analysis-stempel",
"delete-by-query",
"discovery-azure",
"discovery-ec2",
"discovery-gce",
"ingest-geoip",
"lang-javascript",
"lang-painless",
"lang-python",
"mapper-attachments",
"mapper-murmur3",
"mapper-size",
"repository-azure",
"repository-hdfs",
"repository-s3",
"store-smb"));
private final Environment environment;
private URL url;
private OutputMode outputMode;
private TimeValue timeout;
public PluginManager(Environment environment, URL url, OutputMode outputMode, TimeValue timeout) {
this.environment = environment;
this.url = url;
this.outputMode = outputMode;
this.timeout = timeout;
}
public void downloadAndExtract(String name, Terminal terminal, boolean batch) throws IOException {
if (name == null && url == null) {
throw new IllegalArgumentException("plugin name or url must be supplied with install.");
}
if (!Files.exists(environment.pluginsFile())) {
terminal.println("Plugins directory [%s] does not exist. Creating...", environment.pluginsFile());
Files.createDirectory(environment.pluginsFile());
}
if (!Environment.isWritable(environment.pluginsFile())) {
throw new IOException("plugin directory " + environment.pluginsFile() + " is read only");
}
PluginHandle pluginHandle;
if (name != null) {
pluginHandle = PluginHandle.parse(name);
checkForForbiddenName(pluginHandle.name);
} else {
// if we have no name but url, use temporary name that will be overwritten later
pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null);
}
Path pluginFile = download(pluginHandle, terminal);
extract(pluginHandle, terminal, pluginFile, batch);
}
private Path download(PluginHandle pluginHandle, Terminal terminal) throws IOException {
Path pluginFile = pluginHandle.newDistroFile(environment);
HttpDownloadHelper downloadHelper = new HttpDownloadHelper();
boolean downloaded = false;
boolean verified = false;
HttpDownloadHelper.DownloadProgress progress;
if (outputMode == OutputMode.SILENT) {
progress = new HttpDownloadHelper.NullProgress();
} else {
progress = new HttpDownloadHelper.VerboseProgress(terminal.writer());
}
// first, try directly from the URL provided
if (url != null) {
URL pluginUrl = url;
boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol());
boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo());
if (isAuthInfoSet && !isSecureProcotol) {
throw new IOException("Basic auth is only supported for HTTPS!");
}
terminal.println("Trying %s ...", pluginUrl.toExternalForm());
try {
downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout);
downloaded = true;
terminal.println("Verifying %s checksums if available ...", pluginUrl.toExternalForm());
Tuple<URL, Path> sha1Info = pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "sha1");
verified = downloadHelper.downloadAndVerifyChecksum(sha1Info.v1(), pluginFile,
sha1Info.v2(), progress, this.timeout, HttpDownloadHelper.SHA1_CHECKSUM);
Tuple<URL, Path> md5Info = pluginHandle.newChecksumUrlAndFile(environment, pluginUrl, "md5");
verified = verified || downloadHelper.downloadAndVerifyChecksum(md5Info.v1(), pluginFile,
md5Info.v2(), progress, this.timeout, HttpDownloadHelper.MD5_CHECKSUM);
} catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) {
throw e;
} catch (Exception e) {
// ignore
terminal.println("Failed: %s", ExceptionsHelper.detailedMessage(e));
}
} else {
if (PluginHandle.isOfficialPlugin(pluginHandle.name, pluginHandle.user, pluginHandle.version)) {
checkForOfficialPlugins(pluginHandle.name);
}
}
if (!downloaded && url == null) {
// We try all possible locations
for (URL url : pluginHandle.urls()) {
terminal.println("Trying %s ...", url.toExternalForm());
try {
downloadHelper.download(url, pluginFile, progress, this.timeout);
downloaded = true;
terminal.println("Verifying %s checksums if available ...", url.toExternalForm());
Tuple<URL, Path> sha1Info = pluginHandle.newChecksumUrlAndFile(environment, url, "sha1");
verified = downloadHelper.downloadAndVerifyChecksum(sha1Info.v1(), pluginFile,
sha1Info.v2(), progress, this.timeout, HttpDownloadHelper.SHA1_CHECKSUM);
Tuple<URL, Path> md5Info = pluginHandle.newChecksumUrlAndFile(environment, url, "md5");
verified = verified || downloadHelper.downloadAndVerifyChecksum(md5Info.v1(), pluginFile,
md5Info.v2(), progress, this.timeout, HttpDownloadHelper.MD5_CHECKSUM);
break;
} catch (ElasticsearchTimeoutException | ElasticsearchCorruptionException e) {
throw e;
} catch (Exception e) {
terminal.println(VERBOSE, "Failed: %s", ExceptionsHelper.detailedMessage(e));
}
}
}
if (!downloaded) {
// try to cleanup what we downloaded
IOUtils.deleteFilesIgnoringExceptions(pluginFile);
throw new IOException("failed to download out of all possible locations..., use --verbose to get detailed information");
}
if (verified == false) {
terminal.println("NOTE: Unable to verify checksum for downloaded plugin (unable to find .sha1 or .md5 file to verify)");
}
return pluginFile;
}
private void extract(PluginHandle pluginHandle, Terminal terminal, Path pluginFile, boolean batch) throws IOException {
// unzip plugin to a staging temp dir, named for the plugin
Path tmp = Files.createTempDirectory(environment.tmpFile(), null);
Path root = tmp.resolve(pluginHandle.name);
unzipPlugin(pluginFile, root);
// find the actual root (in case its unzipped with extra directory wrapping)
root = findPluginRoot(root);
// read and validate the plugin descriptor
PluginInfo info = PluginInfo.readFromProperties(root);
terminal.println(VERBOSE, "%s", info);
// don't let luser install plugin as a module...
// they might be unavoidably in maven central and are packaged up the same way)
if (MODULES.contains(info.getName())) {
throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module");
}
// update name in handle based on 'name' property found in descriptor file
pluginHandle = new PluginHandle(info.getName(), pluginHandle.version, pluginHandle.user);
final Path extractLocation = pluginHandle.extractedDir(environment);
if (Files.exists(extractLocation)) {
throw new IOException("plugin directory " + extractLocation.toAbsolutePath() + " already exists. To update the plugin, uninstall it first using 'remove " + pluginHandle.name + "' command");
}
// check for jar hell before any copying
jarHellCheck(root, info.isIsolated());
// read optional security policy (extra permissions)
// if it exists, confirm or warn the user
Path policy = root.resolve(PluginInfo.ES_PLUGIN_POLICY);
if (Files.exists(policy)) {
PluginSecurity.readPolicy(policy, terminal, environment, batch);
}
// install plugin
FileSystemUtils.copyDirectoryRecursively(root, extractLocation);
terminal.println("Installed %s into %s", pluginHandle.name, extractLocation.toAbsolutePath());
// cleanup
tryToDeletePath(terminal, tmp, pluginFile);
// take care of bin/ by moving and applying permissions if needed
Path sourcePluginBinDirectory = extractLocation.resolve("bin");
Path destPluginBinDirectory = pluginHandle.binDir(environment);
boolean needToCopyBinDirectory = Files.exists(sourcePluginBinDirectory);
if (needToCopyBinDirectory) {
if (Files.exists(destPluginBinDirectory) && !Files.isDirectory(destPluginBinDirectory)) {
tryToDeletePath(terminal, extractLocation);
throw new IOException("plugin bin directory " + destPluginBinDirectory + " is not a directory");
}
try {
copyBinDirectory(sourcePluginBinDirectory, destPluginBinDirectory, pluginHandle.name, terminal);
} catch (IOException e) {
// rollback and remove potentially before installed leftovers
terminal.printError("Error copying bin directory [%s] to [%s], cleaning up, reason: %s", sourcePluginBinDirectory, destPluginBinDirectory, ExceptionsHelper.detailedMessage(e));
tryToDeletePath(terminal, extractLocation, pluginHandle.binDir(environment));
throw e;
}
}
Path sourceConfigDirectory = extractLocation.resolve("config");
Path destConfigDirectory = pluginHandle.configDir(environment);
boolean needToCopyConfigDirectory = Files.exists(sourceConfigDirectory);
if (needToCopyConfigDirectory) {
if (Files.exists(destConfigDirectory) && !Files.isDirectory(destConfigDirectory)) {
tryToDeletePath(terminal, extractLocation, destPluginBinDirectory);
throw new IOException("plugin config directory " + destConfigDirectory + " is not a directory");
}
try {
terminal.println(VERBOSE, "Found config, moving to %s", destConfigDirectory.toAbsolutePath());
moveFilesWithoutOverwriting(sourceConfigDirectory, destConfigDirectory, ".new");
if (Environment.getFileStore(destConfigDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) {
//We copy owner, group and permissions from the parent ES_CONFIG directory, assuming they were properly set depending
// on how es was installed in the first place: can be root:elasticsearch (750) if es was installed from rpm/deb packages
// or most likely elasticsearch:elasticsearch if installed from tar/zip. As for permissions we don't rely on umask.
PosixFileAttributes parentDirAttributes = Files.getFileAttributeView(destConfigDirectory.getParent(), PosixFileAttributeView.class).readAttributes();
//for files though, we make sure not to copy execute permissions from the parent dir and leave them untouched
Set<PosixFilePermission> baseFilePermissions = new HashSet<>();
for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
switch (posixFilePermission) {
case OWNER_EXECUTE:
case GROUP_EXECUTE:
case OTHERS_EXECUTE:
break;
default:
baseFilePermissions.add(posixFilePermission);
}
}
Files.walkFileTree(destConfigDirectory, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
Set<PosixFilePermission> newFilePermissions = new HashSet<>(baseFilePermissions);
Set<PosixFilePermission> currentFilePermissions = Files.getPosixFilePermissions(file);
for (PosixFilePermission posixFilePermission : currentFilePermissions) {
switch (posixFilePermission) {
case OWNER_EXECUTE:
case GROUP_EXECUTE:
case OTHERS_EXECUTE:
newFilePermissions.add(posixFilePermission);
}
}
setPosixFileAttributes(file, parentDirAttributes.owner(), parentDirAttributes.group(), newFilePermissions);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
setPosixFileAttributes(dir, parentDirAttributes.owner(), parentDirAttributes.group(), parentDirAttributes.permissions());
return FileVisitResult.CONTINUE;
}
});
} else {
terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
}
terminal.println(VERBOSE, "Installed %s into %s", pluginHandle.name, destConfigDirectory.toAbsolutePath());
} catch (IOException e) {
terminal.printError("Error copying config directory [%s] to [%s], cleaning up, reason: %s", sourceConfigDirectory, destConfigDirectory, ExceptionsHelper.detailedMessage(e));
tryToDeletePath(terminal, extractLocation, destPluginBinDirectory, destConfigDirectory);
throw e;
}
}
}
private static void setPosixFileAttributes(Path path, UserPrincipal owner, GroupPrincipal group, Set<PosixFilePermission> permissions) throws IOException {
PosixFileAttributeView fileAttributeView = Files.getFileAttributeView(path, PosixFileAttributeView.class);
fileAttributeView.setOwner(owner);
fileAttributeView.setGroup(group);
fileAttributeView.setPermissions(permissions);
}
static void tryToDeletePath(Terminal terminal, Path ... paths) {
for (Path path : paths) {
try {
IOUtils.rm(path);
} catch (IOException e) {
terminal.printError(e);
}
}
}
private void copyBinDirectory(Path sourcePluginBinDirectory, Path destPluginBinDirectory, String pluginName, Terminal terminal) throws IOException {
boolean canCopyFromSource = Files.exists(sourcePluginBinDirectory) && Files.isReadable(sourcePluginBinDirectory) && Files.isDirectory(sourcePluginBinDirectory);
if (canCopyFromSource) {
terminal.println(VERBOSE, "Found bin, moving to %s", destPluginBinDirectory.toAbsolutePath());
if (Files.exists(destPluginBinDirectory)) {
IOUtils.rm(destPluginBinDirectory);
}
try {
Files.createDirectories(destPluginBinDirectory.getParent());
FileSystemUtils.move(sourcePluginBinDirectory, destPluginBinDirectory);
} catch (IOException e) {
throw new IOException("Could not move [" + sourcePluginBinDirectory + "] to [" + destPluginBinDirectory + "]", e);
}
if (Environment.getFileStore(destPluginBinDirectory).supportsFileAttributeView(PosixFileAttributeView.class)) {
PosixFileAttributes parentDirAttributes = Files.getFileAttributeView(destPluginBinDirectory.getParent(), PosixFileAttributeView.class).readAttributes();
//copy permissions from parent bin directory
Set<PosixFilePermission> filePermissions = new HashSet<>();
for (PosixFilePermission posixFilePermission : parentDirAttributes.permissions()) {
switch (posixFilePermission) {
case OWNER_EXECUTE:
case GROUP_EXECUTE:
case OTHERS_EXECUTE:
break;
default:
filePermissions.add(posixFilePermission);
}
}
// add file execute permissions to existing perms, so execution will work.
filePermissions.add(PosixFilePermission.OWNER_EXECUTE);
filePermissions.add(PosixFilePermission.GROUP_EXECUTE);
filePermissions.add(PosixFilePermission.OTHERS_EXECUTE);
Files.walkFileTree(destPluginBinDirectory, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
if (attrs.isRegularFile()) {
setPosixFileAttributes(file, parentDirAttributes.owner(), parentDirAttributes.group(), filePermissions);
}
return FileVisitResult.CONTINUE;
}
@Override
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
setPosixFileAttributes(dir, parentDirAttributes.owner(), parentDirAttributes.group(), parentDirAttributes.permissions());
return FileVisitResult.CONTINUE;
}
});
} else {
terminal.println(VERBOSE, "Skipping posix permissions - filestore doesn't support posix permission");
}
terminal.println(VERBOSE, "Installed %s into %s", pluginName, destPluginBinDirectory.toAbsolutePath());
}
}
/** we check whether we need to remove the top-level folder while extracting
* sometimes (e.g. github) the downloaded archive contains a top-level folder which needs to be removed
*/
private Path findPluginRoot(Path dir) throws IOException {
if (Files.exists(dir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) {
return dir;
} else {
final Path[] topLevelFiles = FileSystemUtils.files(dir);
if (topLevelFiles.length == 1 && Files.isDirectory(topLevelFiles[0])) {
Path subdir = topLevelFiles[0];
if (Files.exists(subdir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES))) {
return subdir;
}
}
}
throw new RuntimeException("Could not find plugin descriptor '" + PluginInfo.ES_PLUGIN_PROPERTIES + "' in plugin zip");
}
/** check a candidate plugin for jar hell before installing it */
private void jarHellCheck(Path candidate, boolean isolated) throws IOException {
// create list of current jars in classpath
final List<URL> jars = new ArrayList<>();
jars.addAll(Arrays.asList(JarHell.parseClassPath()));
// read existing bundles. this does some checks on the installation too.
List<Bundle> bundles = PluginsService.getPluginBundles(environment.pluginsFile());
// if we aren't isolated, we need to jarhellcheck against any other non-isolated plugins
// thats always the first bundle
if (isolated == false) {
jars.addAll(bundles.get(0).urls);
}
// add plugin jars to the list
Path pluginJars[] = FileSystemUtils.files(candidate, "*.jar");
for (Path jar : pluginJars) {
jars.add(jar.toUri().toURL());
}
// check combined (current classpath + new jars to-be-added)
try {
JarHell.checkJarHell(jars.toArray(new URL[jars.size()]));
} catch (Exception ex) {
throw new RuntimeException(ex);
}
}
private void unzipPlugin(Path zip, Path target) throws IOException {
Files.createDirectories(target);
try (ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
ZipEntry entry;
byte[] buffer = new byte[8192];
while ((entry = zipInput.getNextEntry()) != null) {
Path targetFile = target.resolve(entry.getName());
// be on the safe side: do not rely on that directories are always extracted
// before their children (although this makes sense, but is it guaranteed?)
Files.createDirectories(targetFile.getParent());
if (entry.isDirectory() == false) {
try (OutputStream out = Files.newOutputStream(targetFile)) {
int len;
while((len = zipInput.read(buffer)) >= 0) {
out.write(buffer, 0, len);
}
}
}
zipInput.closeEntry();
}
}
}
public void removePlugin(String name, Terminal terminal) throws IOException {
if (name == null) {
throw new IllegalArgumentException("plugin name must be supplied with remove [name].");
}
PluginHandle pluginHandle = PluginHandle.parse(name);
boolean removed = false;
checkForForbiddenName(pluginHandle.name);
Path pluginToDelete = pluginHandle.extractedDir(environment);
if (Files.exists(pluginToDelete)) {
terminal.println(VERBOSE, "Removing: %s", pluginToDelete);
try {
IOUtils.rm(pluginToDelete);
} catch (IOException ex){
throw new IOException("Unable to remove " + pluginHandle.name + ". Check file permissions on " +
pluginToDelete.toString(), ex);
}
removed = true;
}
Path binLocation = pluginHandle.binDir(environment);
if (Files.exists(binLocation)) {
terminal.println(VERBOSE, "Removing: %s", binLocation);
try {
IOUtils.rm(binLocation);
} catch (IOException ex){
throw new IOException("Unable to remove " + pluginHandle.name + ". Check file permissions on " +
binLocation.toString(), ex);
}
removed = true;
}
if (removed) {
terminal.println("Removed %s", name);
} else {
terminal.println("Plugin %s not found. Run \"plugin list\" to get list of installed plugins.", name);
}
}
static void checkForForbiddenName(String name) {
if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) {
throw new IllegalArgumentException("Illegal plugin name: " + name);
}
}
protected static void checkForOfficialPlugins(String name) {
// We make sure that users can use only new short naming for official plugins only
if (!OFFICIAL_PLUGINS.contains(name)) {
throw new IllegalArgumentException(name +
" is not an official plugin so you should install it using elasticsearch/" +
name + "/latest naming form.");
}
}
public Path[] getListInstalledPlugins() throws IOException {
if (!Files.exists(environment.pluginsFile())) {
return new Path[0];
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(environment.pluginsFile())) {
return StreamSupport.stream(stream.spliterator(), false).toArray(length -> new Path[length]);
}
}
public void listInstalledPlugins(Terminal terminal) throws IOException {
Path[] plugins = getListInstalledPlugins();
terminal.println("Installed plugins in %s:", environment.pluginsFile().toAbsolutePath());
if (plugins == null || plugins.length == 0) {
terminal.println(" - No plugin detected");
} else {
for (Path plugin : plugins) {
terminal.println(" - " + plugin.getFileName());
}
}
}
/**
* Helper class to extract properly user name, repository name, version and plugin name
* from plugin name given by a user.
*/
static class PluginHandle {
final String version;
final String user;
final String name;
PluginHandle(String name, String version, String user) {
this.version = version;
this.user = user;
this.name = name;
}
List<URL> urls() {
List<URL> urls = new ArrayList<>();
if (version != null) {
// Elasticsearch new download service uses groupId org.elasticsearch.plugin from 2.0.0
if (user == null) {
if (!Strings.isNullOrEmpty(System.getProperty(PROPERTY_SUPPORT_STAGING_URLS))) {
addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/staging/%s-%s/org/elasticsearch/plugin/%s/%s/%s-%s.zip", version, Build.CURRENT.shortHash(), name, version, name, version));
}
addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", name, version, name, version));
} else {
// Elasticsearch old download service
addUrl(urls, String.format(Locale.ROOT, "https://download.elastic.co/%1$s/%2$s/%2$s-%3$s.zip", user, name, version));
// Maven central repository
addUrl(urls, String.format(Locale.ROOT, "https://search.maven.org/remotecontent?filepath=%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version));
// Sonatype repository
addUrl(urls, String.format(Locale.ROOT, "https://oss.sonatype.org/service/local/repositories/releases/content/%1$s/%2$s/%3$s/%2$s-%3$s.zip", user.replace('.', '/'), name, version));
// Github repository
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/%3$s.zip", user, name, version));
}
}
if (user != null) {
// Github repository for master branch (assume site)
addUrl(urls, String.format(Locale.ROOT, "https://github.com/%1$s/%2$s/archive/master.zip", user, name));
}
return urls;
}
private static void addUrl(List<URL> urls, String url) {
try {
urls.add(new URL(url));
} catch (MalformedURLException e) {
// We simply ignore malformed URL
}
}
Path newDistroFile(Environment env) throws IOException {
return Files.createTempFile(env.tmpFile(), name, ".zip");
}
Tuple<URL, Path> newChecksumUrlAndFile(Environment env, URL originalUrl, String suffix) throws IOException {
URL newUrl = new URL(originalUrl.toString() + "." + suffix);
return new Tuple<>(newUrl, Files.createTempFile(env.tmpFile(), name, ".zip." + suffix));
}
Path extractedDir(Environment env) {
return env.pluginsFile().resolve(name);
}
Path binDir(Environment env) {
return env.binFile().resolve(name);
}
Path configDir(Environment env) {
return env.configFile().resolve(name);
}
static PluginHandle parse(String name) {
String[] elements = name.split("/");
// We first consider the simplest form: pluginname
String repo = elements[0];
String user = null;
String version = null;
// We consider the form: username/pluginname
if (elements.length > 1) {
user = elements[0];
repo = elements[1];
// We consider the form: username/pluginname/version
if (elements.length > 2) {
version = elements[2];
}
}
if (isOfficialPlugin(repo, user, version)) {
return new PluginHandle(repo, Version.CURRENT.number(), null);
}
return new PluginHandle(repo, version, user);
}
static boolean isOfficialPlugin(String repo, String user, String version) {
return version == null && user == null && !Strings.isNullOrEmpty(repo);
}
}
}

View File

@ -1,256 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.apache.commons.cli.CommandLine;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolConfig;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.logging.log4j.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.PluginManager.OutputMode;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLDecoder;
import java.util.Locale;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.cmd;
import static org.elasticsearch.common.cli.CliToolConfig.Builder.option;
public class PluginManagerCliParser extends CliTool {
// By default timeout is 0 which means no timeout
public static final TimeValue DEFAULT_TIMEOUT = TimeValue.timeValueMillis(0);
private static final CliToolConfig CONFIG = CliToolConfig.config("plugin", PluginManagerCliParser.class)
.cmds(ListPlugins.CMD, Install.CMD, Remove.CMD)
.build();
public static void main(String[] args) {
// initialize default for es.logger.level because we will not read the logging.yml
String loggerLevel = System.getProperty("es.logger.level", "INFO");
// Set the appender for all potential log files to terminal so that other components that use the logger print out the
// same terminal.
// The reason for this is that the plugin cli cannot be configured with a file appender because when the plugin command is
// executed there is no way of knowing where the logfiles should be placed. For example, if elasticsearch
// is run as service then the logs should be at /var/log/elasticsearch but when started from the tar they should be at es.home/logs.
// Therefore we print to Terminal.
Environment env = InternalSettingsPreparer.prepareEnvironment(Settings.builder()
.put("appender.terminal.type", "terminal")
.put("rootLogger", "${es.logger.level}, terminal")
.put("es.logger.level", loggerLevel)
.build(), Terminal.DEFAULT);
// configure but do not read the logging conf file
LogConfigurator.configure(env.settings(), false);
int status = new PluginManagerCliParser().execute(args).status();
exit(status);
}
@SuppressForbidden(reason = "Allowed to exit explicitly from #main()")
private static void exit(int status) {
System.exit(status);
}
public PluginManagerCliParser() {
super(CONFIG);
}
public PluginManagerCliParser(Terminal terminal) {
super(CONFIG, terminal);
}
@Override
protected Command parse(String cmdName, CommandLine cli) throws Exception {
switch (cmdName.toLowerCase(Locale.ROOT)) {
case Install.NAME:
return Install.parse(terminal, cli);
case ListPlugins.NAME:
return ListPlugins.parse(terminal, cli);
case Remove.NAME:
return Remove.parse(terminal, cli);
default:
assert false : "can't get here as cmd name is validated before this method is called";
return exitCmd(ExitStatus.USAGE);
}
}
/**
* List all installed plugins
*/
static class ListPlugins extends CliTool.Command {
private static final String NAME = "list";
private static final CliToolConfig.Cmd CMD = cmd(NAME, ListPlugins.class).build();
private final OutputMode outputMode;
public static Command parse(Terminal terminal, CommandLine cli) {
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
outputMode = OutputMode.SILENT;
}
if (cli.hasOption("v")) {
outputMode = OutputMode.VERBOSE;
}
return new ListPlugins(terminal, outputMode);
}
ListPlugins(Terminal terminal, OutputMode outputMode) {
super(terminal);
this.outputMode = outputMode;
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, null, outputMode, DEFAULT_TIMEOUT);
pluginManager.listInstalledPlugins(terminal);
return ExitStatus.OK;
}
}
/**
* Remove a plugin
*/
static class Remove extends CliTool.Command {
private static final String NAME = "remove";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Remove.class).build();
public static Command parse(Terminal terminal, CommandLine cli) {
String[] args = cli.getArgs();
if (args.length == 0) {
return exitCmd(ExitStatus.USAGE, terminal, "plugin name is missing (type -h for help)");
}
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
outputMode = OutputMode.SILENT;
}
if (cli.hasOption("v")) {
outputMode = OutputMode.VERBOSE;
}
return new Remove(terminal, outputMode, args[0]);
}
private OutputMode outputMode;
final String pluginName;
Remove(Terminal terminal, OutputMode outputMode, String pluginToRemove) {
super(terminal);
this.outputMode = outputMode;
this.pluginName = pluginToRemove;
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, null, outputMode, DEFAULT_TIMEOUT);
terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "...");
pluginManager.removePlugin(pluginName, terminal);
return ExitStatus.OK;
}
}
/**
* Installs a plugin
*/
static class Install extends Command {
private static final String NAME = "install";
private static final CliToolConfig.Cmd CMD = cmd(NAME, Install.class)
.options(option("t", "timeout").required(false).hasArg(false))
.options(option("b", "batch").required(false))
.build();
static Command parse(Terminal terminal, CommandLine cli) {
String[] args = cli.getArgs();
// install [plugin-name/url]
if ((args == null) || (args.length == 0)) {
return exitCmd(ExitStatus.USAGE, terminal, "plugin name or url is missing (type -h for help)");
}
String name = args[0];
URL optionalPluginUrl = null;
// try parsing cli argument as URL
try {
optionalPluginUrl = new URL(name);
name = null;
} catch (MalformedURLException e) {
// we tried to parse the cli argument as url and failed
// continue treating it as a symbolic plugin name like `analysis-icu` etc.
}
TimeValue timeout = TimeValue.parseTimeValue(cli.getOptionValue("t"), DEFAULT_TIMEOUT, "cli");
OutputMode outputMode = OutputMode.DEFAULT;
if (cli.hasOption("s")) {
outputMode = OutputMode.SILENT;
}
if (cli.hasOption("v")) {
outputMode = OutputMode.VERBOSE;
}
boolean batch = System.console() == null;
if (cli.hasOption("b")) {
batch = true;
}
return new Install(terminal, name, outputMode, optionalPluginUrl, timeout, batch);
}
final String name;
private OutputMode outputMode;
final URL url;
final TimeValue timeout;
final boolean batch;
Install(Terminal terminal, String name, OutputMode outputMode, URL url, TimeValue timeout, boolean batch) {
super(terminal);
this.name = name;
this.outputMode = outputMode;
this.url = url;
this.timeout = timeout;
this.batch = batch;
}
@Override
public ExitStatus execute(Settings settings, Environment env) throws Exception {
PluginManager pluginManager = new PluginManager(env, url, outputMode, timeout);
if (name != null) {
terminal.println("-> Installing " + Strings.coalesceToEmpty(name) + "...");
} else {
terminal.println("-> Installing from " + URLDecoder.decode(url.toString(), "UTF-8") + "...");
}
pluginManager.downloadAndExtract(name, terminal, batch);
return ExitStatus.OK;
}
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.plugins;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.cli.Terminal.Verbosity;
import org.elasticsearch.env.Environment;
@ -38,7 +39,7 @@ import java.util.Comparator;
import java.util.List;
class PluginSecurity {
/**
* Reads plugin policy, prints/confirms exceptions
*/
@ -49,7 +50,7 @@ class PluginSecurity {
terminal.print(Verbosity.VERBOSE, "plugin has a policy file with no additional permissions");
return;
}
// sort permissions in a reasonable order
Collections.sort(requested, new Comparator<Permission>() {
@Override
@ -80,7 +81,7 @@ class PluginSecurity {
return cmp;
}
});
terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@");
terminal.println(Verbosity.NORMAL, "@ WARNING: plugin requires additional permissions @");
terminal.println(Verbosity.NORMAL, "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@");
@ -98,11 +99,11 @@ class PluginSecurity {
}
}
}
/** Format permission type, name, and actions into a string */
static String formatPermission(Permission permission) {
StringBuilder sb = new StringBuilder();
String clazz = null;
if (permission instanceof UnresolvedPermission) {
clazz = ((UnresolvedPermission) permission).getUnresolvedType();
@ -110,7 +111,7 @@ class PluginSecurity {
clazz = permission.getClass().getName();
}
sb.append(clazz);
String name = null;
if (permission instanceof UnresolvedPermission) {
name = ((UnresolvedPermission) permission).getUnresolvedName();
@ -121,7 +122,7 @@ class PluginSecurity {
sb.append(' ');
sb.append(name);
}
String actions = null;
if (permission instanceof UnresolvedPermission) {
actions = ((UnresolvedPermission) permission).getUnresolvedActions();
@ -134,7 +135,7 @@ class PluginSecurity {
}
return sb.toString();
}
/**
* Parses plugin policy into a set of permissions
*/
@ -151,8 +152,8 @@ class PluginSecurity {
} catch (NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
PluginManager.tryToDeletePath(terminal, emptyPolicyFile);
IOUtils.rm(emptyPolicyFile);
// parse the plugin's policy file into a set of permissions
final Policy policy;
try {

View File

@ -0,0 +1,77 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import static org.elasticsearch.common.cli.Terminal.Verbosity.VERBOSE;
/**
* A command for the plugin cli to remove a plugin from elasticsearch.
*/
class RemovePluginCommand extends CliTool.Command {
private final String pluginName;
public RemovePluginCommand(Terminal terminal, String pluginName) {
super(terminal);
this.pluginName = pluginName;
}
@Override
public CliTool.ExitStatus execute(Settings settings, Environment env) throws Exception {
terminal.println("-> Removing " + Strings.coalesceToEmpty(pluginName) + "...");
Path pluginDir = env.pluginsFile().resolve(pluginName);
if (Files.exists(pluginDir) == false) {
throw new IllegalArgumentException("Plugin " + pluginName + " not found. Run 'plugin list' to get list of installed plugins.");
}
List<Path> pluginPaths = new ArrayList<>();
Path pluginBinDir = env.binFile().resolve(pluginName);
if (Files.exists(pluginBinDir)) {
if (Files.isDirectory(pluginBinDir) == false) {
throw new IllegalStateException("Bin dir for " + pluginName + " is not a directory");
}
pluginPaths.add(pluginBinDir);
terminal.println(VERBOSE, "Removing: %s", pluginBinDir);
}
terminal.println(VERBOSE, "Removing: %s", pluginDir);
Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
pluginPaths.add(tmpPluginDir);
IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()]));
return CliTool.ExitStatus.OK;
}
}

View File

@ -57,7 +57,7 @@ public class RestUtils {
if (fromIndex >= s.length()) {
return;
}
int queryStringLength = s.contains("#") ? s.indexOf("#") : s.length();
String name = null;

View File

@ -1132,7 +1132,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
// Use the same value for both checks since lastAccessTime can
// be modified by another thread between checks!
final long lastAccessTime = context.lastAccessTime();
if (lastAccessTime == -1l) { // its being processed or timeout is disabled
if (lastAccessTime == -1L) { // its being processed or timeout is disabled
continue;
}
if ((time - lastAccessTime > context.keepAlive())) {

View File

@ -110,7 +110,7 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue {
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalAvg(name, 0.0, 0l, formatter, pipelineAggregators(), metaData());
return new InternalAvg(name, 0.0, 0L, formatter, pipelineAggregators(), metaData());
}
public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly<ValuesSource.Numeric> {

View File

@ -114,7 +114,7 @@ public final class GeoCentroidAggregator extends MetricsAggregator {
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalGeoCentroid(name, null, 0l, pipelineAggregators(), metaData());
return new InternalGeoCentroid(name, null, 0L, pipelineAggregators(), metaData());
}
@Override

View File

@ -100,7 +100,7 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue {
@Override
public InternalAggregation buildEmptyAggregation() {
return new InternalValueCount(name, 0l, formatter, pipelineAggregators(), metaData());
return new InternalValueCount(name, 0L, formatter, pipelineAggregators(), metaData());
}
@Override

View File

@ -0,0 +1,493 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.search.suggest.phrase;
import org.apache.lucene.util.automaton.LevenshteinAutomata;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ObjectParser;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.search.suggest.SuggestUtils;
import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder.CandidateGenerator;
import java.io.IOException;
import java.util.HashSet;
import java.util.Objects;
import java.util.Set;
import java.util.function.Consumer;
public final class DirectCandidateGeneratorBuilder
implements Writeable<DirectCandidateGeneratorBuilder>, CandidateGenerator {
private static final String TYPE = "direct_generator";
static final DirectCandidateGeneratorBuilder PROTOTYPE = new DirectCandidateGeneratorBuilder("_na_");
static final ParseField DIRECT_GENERATOR_FIELD = new ParseField(TYPE);
static final ParseField FIELDNAME_FIELD = new ParseField("field");
static final ParseField PREFILTER_FIELD = new ParseField("pre_filter");
static final ParseField POSTFILTER_FIELD = new ParseField("post_filter");
static final ParseField SUGGESTMODE_FIELD = new ParseField("suggest_mode");
static final ParseField MIN_DOC_FREQ_FIELD = new ParseField("min_doc_freq");
static final ParseField ACCURACY_FIELD = new ParseField("accuracy");
static final ParseField SIZE_FIELD = new ParseField("size");
static final ParseField SORT_FIELD = new ParseField("sort");
static final ParseField STRING_DISTANCE_FIELD = new ParseField("string_distance");
static final ParseField MAX_EDITS_FIELD = new ParseField("max_edits");
static final ParseField MAX_INSPECTIONS_FIELD = new ParseField("max_inspections");
static final ParseField MAX_TERM_FREQ_FIELD = new ParseField("max_term_freq");
static final ParseField PREFIX_LENGTH_FIELD = new ParseField("prefix_length");
static final ParseField MIN_WORD_LENGTH_FIELD = new ParseField("min_word_length");
private final String field;
private String preFilter;
private String postFilter;
private String suggestMode;
private Float accuracy;
private Integer size;
private String sort;
private String stringDistance;
private Integer maxEdits;
private Integer maxInspections;
private Float maxTermFreq;
private Integer prefixLength;
private Integer minWordLength;
private Float minDocFreq;
/**
* @param field Sets from what field to fetch the candidate suggestions from.
*/
public DirectCandidateGeneratorBuilder(String field) {
this.field = field;
}
/**
* Quasi copy-constructor that takes all values from the generator
* passed in, but uses different field name. Needed by parser because we
* need to buffer the field name but read all other properties to a
* temporary object.
*/
private static DirectCandidateGeneratorBuilder replaceField(String field, DirectCandidateGeneratorBuilder other) {
DirectCandidateGeneratorBuilder generator = new DirectCandidateGeneratorBuilder(field);
generator.preFilter = other.preFilter;
generator.postFilter = other.postFilter;
generator.suggestMode = other.suggestMode;
generator.accuracy = other.accuracy;
generator.size = other.size;
generator.sort = other.sort;
generator.stringDistance = other.stringDistance;
generator.maxEdits = other.maxEdits;
generator.maxInspections = other.maxInspections;
generator.maxTermFreq = other.maxTermFreq;
generator.prefixLength = other.prefixLength;
generator.minWordLength = other.minWordLength;
generator.minDocFreq = other.minDocFreq;
return generator;
}
/**
* The global suggest mode controls what suggested terms are included or
* controls for what suggest text tokens, terms should be suggested for.
* Three possible values can be specified:
* <ol>
* <li><code>missing</code> - Only suggest terms in the suggest text
* that aren't in the index. This is the default.
* <li><code>popular</code> - Only suggest terms that occur in more docs
* then the original suggest text term.
* <li><code>always</code> - Suggest any matching suggest terms based on
* tokens in the suggest text.
* </ol>
*/
public DirectCandidateGeneratorBuilder suggestMode(String suggestMode) {
this.suggestMode = suggestMode;
return this;
}
/**
* Sets how similar the suggested terms at least need to be compared to
* the original suggest text tokens. A value between 0 and 1 can be
* specified. This value will be compared to the string distance result
* of each candidate spelling correction.
* <p>
* Default is <tt>0.5</tt>
*/
public DirectCandidateGeneratorBuilder accuracy(float accuracy) {
this.accuracy = accuracy;
return this;
}
/**
* Sets the maximum suggestions to be returned per suggest text term.
*/
public DirectCandidateGeneratorBuilder size(int size) {
if (size <= 0) {
throw new IllegalArgumentException("Size must be positive");
}
this.size = size;
return this;
}
/**
* Sets how to sort the suggest terms per suggest text token. Two
* possible values:
* <ol>
* <li><code>score</code> - Sort should first be based on score, then
* document frequency and then the term itself.
* <li><code>frequency</code> - Sort should first be based on document
* frequency, then score and then the term itself.
* </ol>
* <p>
* What the score is depends on the suggester being used.
*/
public DirectCandidateGeneratorBuilder sort(String sort) {
this.sort = sort;
return this;
}
/**
* Sets what string distance implementation to use for comparing how
* similar suggested terms are. Four possible values can be specified:
* <ol>
* <li><code>internal</code> - This is the default and is based on
* <code>damerau_levenshtein</code>, but highly optimized for comparing
* string distance for terms inside the index.
* <li><code>damerau_levenshtein</code> - String distance algorithm
* based on Damerau-Levenshtein algorithm.
* <li><code>levenstein</code> - String distance algorithm based on
* Levenstein edit distance algorithm.
* <li><code>jarowinkler</code> - String distance algorithm based on
* Jaro-Winkler algorithm.
* <li><code>ngram</code> - String distance algorithm based on character
* n-grams.
* </ol>
*/
public DirectCandidateGeneratorBuilder stringDistance(String stringDistance) {
this.stringDistance = stringDistance;
return this;
}
/**
* Sets the maximum edit distance candidate suggestions can have in
* order to be considered as a suggestion. Can only be a value between 1
* and 2. Any other value result in an bad request error being thrown.
* Defaults to <tt>2</tt>.
*/
public DirectCandidateGeneratorBuilder maxEdits(Integer maxEdits) {
if (maxEdits < 1 || maxEdits > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("Illegal max_edits value " + maxEdits);
}
this.maxEdits = maxEdits;
return this;
}
/**
* A factor that is used to multiply with the size in order to inspect
* more candidate suggestions. Can improve accuracy at the cost of
* performance. Defaults to <tt>5</tt>.
*/
public DirectCandidateGeneratorBuilder maxInspections(Integer maxInspections) {
this.maxInspections = maxInspections;
return this;
}
/**
* Sets a maximum threshold in number of documents a suggest text token
* can exist in order to be corrected. Can be a relative percentage
* number (e.g 0.4) or an absolute number to represent document
* frequencies. If an value higher than 1 is specified then fractional
* can not be specified. Defaults to <tt>0.01</tt>.
* <p>
* This can be used to exclude high frequency terms from being
* suggested. High frequency terms are usually spelled correctly on top
* of this this also improves the suggest performance.
*/
public DirectCandidateGeneratorBuilder maxTermFreq(float maxTermFreq) {
this.maxTermFreq = maxTermFreq;
return this;
}
/**
* Sets the number of minimal prefix characters that must match in order
* be a candidate suggestion. Defaults to 1. Increasing this number
* improves suggest performance. Usually misspellings don't occur in the
* beginning of terms.
*/
public DirectCandidateGeneratorBuilder prefixLength(int prefixLength) {
this.prefixLength = prefixLength;
return this;
}
/**
* The minimum length a suggest text term must have in order to be
* corrected. Defaults to <tt>4</tt>.
*/
public DirectCandidateGeneratorBuilder minWordLength(int minWordLength) {
this.minWordLength = minWordLength;
return this;
}
/**
* Sets a minimal threshold in number of documents a suggested term
* should appear in. This can be specified as an absolute number or as a
* relative percentage of number of documents. This can improve quality
* by only suggesting high frequency terms. Defaults to 0f and is not
* enabled. If a value higher than 1 is specified then the number cannot
* be fractional.
*/
public DirectCandidateGeneratorBuilder minDocFreq(float minDocFreq) {
this.minDocFreq = minDocFreq;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator.
* This filter is applied to the original token before candidates are generated.
*/
public DirectCandidateGeneratorBuilder preFilter(String preFilter) {
this.preFilter = preFilter;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the generated tokens
* before they are passed to the actual phrase scorer.
*/
public DirectCandidateGeneratorBuilder postFilter(String postFilter) {
this.postFilter = postFilter;
return this;
}
/**
* gets the type identifier of this {@link CandidateGenerator}
*/
@Override
public String getType() {
return TYPE;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
outputFieldIfNotNull(field, FIELDNAME_FIELD, builder);
outputFieldIfNotNull(accuracy, ACCURACY_FIELD, builder);
outputFieldIfNotNull(maxEdits, MAX_EDITS_FIELD, builder);
outputFieldIfNotNull(maxInspections, MAX_INSPECTIONS_FIELD, builder);
outputFieldIfNotNull(maxTermFreq, MAX_TERM_FREQ_FIELD, builder);
outputFieldIfNotNull(minWordLength, MIN_WORD_LENGTH_FIELD, builder);
outputFieldIfNotNull(minDocFreq, MIN_DOC_FREQ_FIELD, builder);
outputFieldIfNotNull(preFilter, PREFILTER_FIELD, builder);
outputFieldIfNotNull(prefixLength, PREFIX_LENGTH_FIELD, builder);
outputFieldIfNotNull(postFilter, POSTFILTER_FIELD, builder);
outputFieldIfNotNull(suggestMode, SUGGESTMODE_FIELD, builder);
outputFieldIfNotNull(size, SIZE_FIELD, builder);
outputFieldIfNotNull(sort, SORT_FIELD, builder);
outputFieldIfNotNull(stringDistance, STRING_DISTANCE_FIELD, builder);
builder.endObject();
return builder;
}
private static <T> void outputFieldIfNotNull(T value, ParseField field, XContentBuilder builder) throws IOException {
if (value != null) {
builder.field(field.getPreferredName(), value);
}
}
private static ObjectParser<Tuple<Set<String>, DirectCandidateGeneratorBuilder>, QueryParseContext> PARSER = new ObjectParser<>(TYPE);
static {
PARSER.declareString((tp, s) -> tp.v1().add(s), FIELDNAME_FIELD);
PARSER.declareString((tp, s) -> tp.v2().preFilter(s), PREFILTER_FIELD);
PARSER.declareString((tp, s) -> tp.v2().postFilter(s), POSTFILTER_FIELD);
PARSER.declareString((tp, s) -> tp.v2().suggestMode(s), SUGGESTMODE_FIELD);
PARSER.declareFloat((tp, f) -> tp.v2().minDocFreq(f), MIN_DOC_FREQ_FIELD);
PARSER.declareFloat((tp, f) -> tp.v2().accuracy(f), ACCURACY_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().size(i), SIZE_FIELD);
PARSER.declareString((tp, s) -> tp.v2().sort(s), SORT_FIELD);
PARSER.declareString((tp, s) -> tp.v2().stringDistance(s), STRING_DISTANCE_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().maxInspections(i), MAX_INSPECTIONS_FIELD);
PARSER.declareFloat((tp, f) -> tp.v2().maxTermFreq(f), MAX_TERM_FREQ_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().maxEdits(i), MAX_EDITS_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().minWordLength(i), MIN_WORD_LENGTH_FIELD);
PARSER.declareInt((tp, i) -> tp.v2().prefixLength(i), PREFIX_LENGTH_FIELD);
}
@Override
public DirectCandidateGeneratorBuilder fromXContent(QueryParseContext parseContext) throws IOException {
DirectCandidateGeneratorBuilder tempGenerator = new DirectCandidateGeneratorBuilder("_na_");
Set<String> tmpFieldName = new HashSet<>(1); // bucket for the field
// name, needed as
// constructor arg
// later
PARSER.parse(parseContext.parser(),
new Tuple<Set<String>, DirectCandidateGeneratorBuilder>(tmpFieldName, tempGenerator));
if (tmpFieldName.size() != 1) {
throw new IllegalArgumentException("[" + TYPE + "] expects exactly one field parameter, but found " + tmpFieldName);
}
return replaceField(tmpFieldName.iterator().next(), tempGenerator);
}
public PhraseSuggestionContext.DirectCandidateGenerator build(QueryShardContext context) throws IOException {
MapperService mapperService = context.getMapperService();
PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator();
generator.setField(this.field);
transferIfNotNull(this.size, generator::size);
if (this.preFilter != null) {
generator.preFilter(mapperService.analysisService().analyzer(this.preFilter));
if (generator.preFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.preFilter + "] doesn't exists");
}
}
if (this.postFilter != null) {
generator.postFilter(mapperService.analysisService().analyzer(this.postFilter));
if (generator.postFilter() == null) {
throw new IllegalArgumentException("Analyzer [" + this.postFilter + "] doesn't exists");
}
}
transferIfNotNull(this.accuracy, generator::accuracy);
if (this.suggestMode != null) {
generator.suggestMode(SuggestUtils.resolveSuggestMode(this.suggestMode));
}
if (this.sort != null) {
generator.sort(SuggestUtils.resolveSort(this.sort));
}
if (this.stringDistance != null) {
generator.stringDistance(SuggestUtils.resolveDistance(this.stringDistance));
}
transferIfNotNull(this.maxEdits, generator::maxEdits);
if (generator.maxEdits() < 1 || generator.maxEdits() > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) {
throw new IllegalArgumentException("Illegal max_edits value " + generator.maxEdits());
}
transferIfNotNull(this.maxInspections, generator::maxInspections);
transferIfNotNull(this.maxTermFreq, generator::maxTermFreq);
transferIfNotNull(this.prefixLength, generator::prefixLength);
transferIfNotNull(this.minWordLength, generator::minQueryLength);
transferIfNotNull(this.minDocFreq, generator::minDocFreq);
return generator;
}
private static <T> void transferIfNotNull(T value, Consumer<T> consumer) {
if (value != null) {
consumer.accept(value);
}
}
@Override
public final String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.prettyPrint();
toXContent(builder, EMPTY_PARAMS);
return builder.string();
} catch (Exception e) {
return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}";
}
}
@Override
public DirectCandidateGeneratorBuilder readFrom(StreamInput in) throws IOException {
DirectCandidateGeneratorBuilder cg = new DirectCandidateGeneratorBuilder(in.readString());
cg.suggestMode = in.readOptionalString();
if (in.readBoolean()) {
cg.accuracy = in.readFloat();
}
cg.size = in.readOptionalVInt();
cg.sort = in.readOptionalString();
cg.stringDistance = in.readOptionalString();
cg.maxEdits = in.readOptionalVInt();
cg.maxInspections = in.readOptionalVInt();
if (in.readBoolean()) {
cg.maxTermFreq = in.readFloat();
}
cg.prefixLength = in.readOptionalVInt();
cg.minWordLength = in.readOptionalVInt();
if (in.readBoolean()) {
cg.minDocFreq = in.readFloat();
}
cg.preFilter = in.readOptionalString();
cg.postFilter = in.readOptionalString();
return cg;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(field);
out.writeOptionalString(suggestMode);
out.writeBoolean(accuracy != null);
if (accuracy != null) {
out.writeFloat(accuracy);
}
out.writeOptionalVInt(size);
out.writeOptionalString(sort);
out.writeOptionalString(stringDistance);
out.writeOptionalVInt(maxEdits);
out.writeOptionalVInt(maxInspections);
out.writeBoolean(maxTermFreq != null);
if (maxTermFreq != null) {
out.writeFloat(maxTermFreq);
}
out.writeOptionalVInt(prefixLength);
out.writeOptionalVInt(minWordLength);
out.writeBoolean(minDocFreq != null);
if (minDocFreq != null) {
out.writeFloat(minDocFreq);
}
out.writeOptionalString(preFilter);
out.writeOptionalString(postFilter);
}
@Override
public final int hashCode() {
return Objects.hash(field, preFilter, postFilter, suggestMode, accuracy,
size, sort, stringDistance, maxEdits, maxInspections,
maxTermFreq, prefixLength, minWordLength, minDocFreq);
}
@Override
public final boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
DirectCandidateGeneratorBuilder other = (DirectCandidateGeneratorBuilder) obj;
return Objects.equals(field, other.field) &&
Objects.equals(preFilter, other.preFilter) &&
Objects.equals(postFilter, other.postFilter) &&
Objects.equals(suggestMode, other.suggestMode) &&
Objects.equals(accuracy, other.accuracy) &&
Objects.equals(size, other.size) &&
Objects.equals(sort, other.sort) &&
Objects.equals(stringDistance, other.stringDistance) &&
Objects.equals(maxEdits, other.maxEdits) &&
Objects.equals(maxInspections, other.maxInspections) &&
Objects.equals(maxTermFreq, other.maxTermFreq) &&
Objects.equals(prefixLength, other.prefixLength) &&
Objects.equals(minWordLength, other.minWordLength) &&
Objects.equals(minDocFreq, other.minDocFreq);
}
}

View File

@ -98,18 +98,10 @@ public final class PhraseSuggestParser implements SuggestContextParser {
}
}
} else if (token == Token.START_ARRAY) {
if ("direct_generator".equals(fieldName) || "directGenerator".equals(fieldName)) {
if (parseFieldMatcher.match(fieldName, DirectCandidateGeneratorBuilder.DIRECT_GENERATOR_FIELD)) {
// for now we only have a single type of generators
while ((token = parser.nextToken()) == Token.START_OBJECT) {
PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator();
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName();
}
if (token.isValue()) {
parseCandidateGenerator(parser, mapperService, fieldName, generator, parseFieldMatcher);
}
}
PhraseSuggestionContext.DirectCandidateGenerator generator = parseCandidateGenerator(parser, mapperService, parseFieldMatcher);
verifyGenerator(generator);
suggestion.addGenerator(generator);
}
@ -323,34 +315,44 @@ public final class PhraseSuggestParser implements SuggestContextParser {
}
}
private void parseCandidateGenerator(XContentParser parser, MapperService mapperService, String fieldName,
PhraseSuggestionContext.DirectCandidateGenerator generator, ParseFieldMatcher parseFieldMatcher) throws IOException {
if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator, parseFieldMatcher)) {
if ("field".equals(fieldName)) {
generator.setField(parser.text());
if (mapperService.fullName(generator.field()) == null) {
throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]");
static PhraseSuggestionContext.DirectCandidateGenerator parseCandidateGenerator(XContentParser parser, MapperService mapperService,
ParseFieldMatcher parseFieldMatcher) throws IOException {
XContentParser.Token token;
String fieldName = null;
PhraseSuggestionContext.DirectCandidateGenerator generator = new PhraseSuggestionContext.DirectCandidateGenerator();
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName();
}
if (token.isValue()) {
if (!SuggestUtils.parseDirectSpellcheckerSettings(parser, fieldName, generator, parseFieldMatcher)) {
if ("field".equals(fieldName)) {
generator.setField(parser.text());
if (mapperService.fullName(generator.field()) == null) {
throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]");
}
} else if ("size".equals(fieldName)) {
generator.size(parser.intValue());
} else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.preFilter(analyzer);
} else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.postFilter(analyzer);
} else {
throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]");
}
}
} else if ("size".equals(fieldName)) {
generator.size(parser.intValue());
} else if ("pre_filter".equals(fieldName) || "preFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.preFilter(analyzer);
} else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) {
String analyzerName = parser.text();
Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName);
if (analyzer == null) {
throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists");
}
generator.postFilter(analyzer);
} else {
throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]");
}
}
return generator;
}
}

View File

@ -278,13 +278,13 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
}
/**
* Creates a new {@link DirectCandidateGenerator}
* Creates a new {@link DirectCandidateGeneratorBuilder}
*
* @param field
* the field this candidate generator operates on.
*/
public static DirectCandidateGenerator candidateGenerator(String field) {
return new DirectCandidateGenerator(field);
public static DirectCandidateGeneratorBuilder candidateGenerator(String field) {
return new DirectCandidateGeneratorBuilder(field);
}
/**
@ -644,267 +644,11 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder<PhraseSugge
}
/**
* {@link CandidateGenerator} base class.
* {@link CandidateGenerator} interface.
*/
public static abstract class CandidateGenerator implements ToXContent {
private final String type;
public CandidateGenerator(String type) {
this.type = type;
}
public String getType() {
return type;
}
public interface CandidateGenerator extends ToXContent {
String getType();
CandidateGenerator fromXContent(QueryParseContext parseContext) throws IOException;
}
/**
*
*
*/
public static final class DirectCandidateGenerator extends CandidateGenerator {
private final String field;
private String preFilter;
private String postFilter;
private String suggestMode;
private Float accuracy;
private Integer size;
private String sort;
private String stringDistance;
private Integer maxEdits;
private Integer maxInspections;
private Float maxTermFreq;
private Integer prefixLength;
private Integer minWordLength;
private Float minDocFreq;
/**
* @param field Sets from what field to fetch the candidate suggestions from.
*/
public DirectCandidateGenerator(String field) {
super("direct_generator");
this.field = field;
}
/**
* The global suggest mode controls what suggested terms are included or
* controls for what suggest text tokens, terms should be suggested for.
* Three possible values can be specified:
* <ol>
* <li><code>missing</code> - Only suggest terms in the suggest text
* that aren't in the index. This is the default.
* <li><code>popular</code> - Only suggest terms that occur in more docs
* then the original suggest text term.
* <li><code>always</code> - Suggest any matching suggest terms based on
* tokens in the suggest text.
* </ol>
*/
public DirectCandidateGenerator suggestMode(String suggestMode) {
this.suggestMode = suggestMode;
return this;
}
/**
* Sets how similar the suggested terms at least need to be compared to
* the original suggest text tokens. A value between 0 and 1 can be
* specified. This value will be compared to the string distance result
* of each candidate spelling correction.
* <p>
* Default is <tt>0.5</tt>
*/
public DirectCandidateGenerator accuracy(float accuracy) {
this.accuracy = accuracy;
return this;
}
/**
* Sets the maximum suggestions to be returned per suggest text term.
*/
public DirectCandidateGenerator size(int size) {
if (size <= 0) {
throw new IllegalArgumentException("Size must be positive");
}
this.size = size;
return this;
}
/**
* Sets how to sort the suggest terms per suggest text token. Two
* possible values:
* <ol>
* <li><code>score</code> - Sort should first be based on score, then
* document frequency and then the term itself.
* <li><code>frequency</code> - Sort should first be based on document
* frequency, then scotr and then the term itself.
* </ol>
* <p>
* What the score is depends on the suggester being used.
*/
public DirectCandidateGenerator sort(String sort) {
this.sort = sort;
return this;
}
/**
* Sets what string distance implementation to use for comparing how
* similar suggested terms are. Four possible values can be specified:
* <ol>
* <li><code>internal</code> - This is the default and is based on
* <code>damerau_levenshtein</code>, but highly optimized for comparing
* string distance for terms inside the index.
* <li><code>damerau_levenshtein</code> - String distance algorithm
* based on Damerau-Levenshtein algorithm.
* <li><code>levenstein</code> - String distance algorithm based on
* Levenstein edit distance algorithm.
* <li><code>jarowinkler</code> - String distance algorithm based on
* Jaro-Winkler algorithm.
* <li><code>ngram</code> - String distance algorithm based on character
* n-grams.
* </ol>
*/
public DirectCandidateGenerator stringDistance(String stringDistance) {
this.stringDistance = stringDistance;
return this;
}
/**
* Sets the maximum edit distance candidate suggestions can have in
* order to be considered as a suggestion. Can only be a value between 1
* and 2. Any other value result in an bad request error being thrown.
* Defaults to <tt>2</tt>.
*/
public DirectCandidateGenerator maxEdits(Integer maxEdits) {
this.maxEdits = maxEdits;
return this;
}
/**
* A factor that is used to multiply with the size in order to inspect
* more candidate suggestions. Can improve accuracy at the cost of
* performance. Defaults to <tt>5</tt>.
*/
public DirectCandidateGenerator maxInspections(Integer maxInspections) {
this.maxInspections = maxInspections;
return this;
}
/**
* Sets a maximum threshold in number of documents a suggest text token
* can exist in order to be corrected. Can be a relative percentage
* number (e.g 0.4) or an absolute number to represent document
* frequencies. If an value higher than 1 is specified then fractional
* can not be specified. Defaults to <tt>0.01</tt>.
* <p>
* This can be used to exclude high frequency terms from being
* suggested. High frequency terms are usually spelled correctly on top
* of this this also improves the suggest performance.
*/
public DirectCandidateGenerator maxTermFreq(float maxTermFreq) {
this.maxTermFreq = maxTermFreq;
return this;
}
/**
* Sets the number of minimal prefix characters that must match in order
* be a candidate suggestion. Defaults to 1. Increasing this number
* improves suggest performance. Usually misspellings don't occur in the
* beginning of terms.
*/
public DirectCandidateGenerator prefixLength(int prefixLength) {
this.prefixLength = prefixLength;
return this;
}
/**
* The minimum length a suggest text term must have in order to be
* corrected. Defaults to <tt>4</tt>.
*/
public DirectCandidateGenerator minWordLength(int minWordLength) {
this.minWordLength = minWordLength;
return this;
}
/**
* Sets a minimal threshold in number of documents a suggested term
* should appear in. This can be specified as an absolute number or as a
* relative percentage of number of documents. This can improve quality
* by only suggesting high frequency terms. Defaults to 0f and is not
* enabled. If a value higher than 1 is specified then the number cannot
* be fractional.
*/
public DirectCandidateGenerator minDocFreq(float minDocFreq) {
this.minDocFreq = minDocFreq;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the tokens passed to this candidate generator.
* This filter is applied to the original token before candidates are generated.
*/
public DirectCandidateGenerator preFilter(String preFilter) {
this.preFilter = preFilter;
return this;
}
/**
* Sets a filter (analyzer) that is applied to each of the generated tokens
* before they are passed to the actual phrase scorer.
*/
public DirectCandidateGenerator postFilter(String postFilter) {
this.postFilter = postFilter;
return this;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (field != null) {
builder.field("field", field);
}
if (suggestMode != null) {
builder.field("suggest_mode", suggestMode);
}
if (accuracy != null) {
builder.field("accuracy", accuracy);
}
if (size != null) {
builder.field("size", size);
}
if (sort != null) {
builder.field("sort", sort);
}
if (stringDistance != null) {
builder.field("string_distance", stringDistance);
}
if (maxEdits != null) {
builder.field("max_edits", maxEdits);
}
if (maxInspections != null) {
builder.field("max_inspections", maxInspections);
}
if (maxTermFreq != null) {
builder.field("max_term_freq", maxTermFreq);
}
if (prefixLength != null) {
builder.field("prefix_length", prefixLength);
}
if (minWordLength != null) {
builder.field("min_word_length", minWordLength);
}
if (minDocFreq != null) {
builder.field("min_doc_freq", minDocFreq);
}
if (preFilter != null) {
builder.field("pre_filter", preFilter);
}
if (postFilter != null) {
builder.field("post_filter", postFilter);
}
builder.endObject();
return builder;
}
}
}

View File

@ -21,13 +21,21 @@ package org.elasticsearch.transport;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import java.util.List;
import static java.util.Collections.emptyList;
/**
* a collection of settings related to transport components, which are also needed in org.elasticsearch.bootstrap.Security
* This class should only contain static code which is *safe* to load before the security manager is enforced.
*/
final public class TransportSettings {
public static final Setting<List<String>> HOST = Setting.listSetting("transport.host", emptyList(), s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> PUBLISH_HOST = Setting.listSetting("transport.publish_host", HOST, s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> BIND_HOST = Setting.listSetting("transport.bind_host", HOST, s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<String> PORT = new Setting<>("transport.tcp.port", "9300-9400", s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> PUBLISH_PORT = Setting.intSetting("transport.publish_port", -1, -1, false, Setting.Scope.CLUSTER);
public static final String DEFAULT_PROFILE = "default";
public static final Setting<Settings> TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER);

View File

@ -119,12 +119,6 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static java.util.Collections.unmodifiableMap;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_BLOCKING_SERVER;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_KEEP_ALIVE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_NO_DELAY;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_RECEIVE_BUFFER_SIZE;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_REUSE_ADDRESS;
import static org.elasticsearch.common.network.NetworkService.TcpSettings.TCP_SEND_BUFFER_SIZE;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseConnectionException;
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
@ -158,8 +152,16 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final Setting<Integer> CONNECTIONS_PER_NODE_PING = Setting.intSetting("transport.connections_per_node.ping", 1, 1, false, Setting.Scope.CLUSTER);
// the scheduled internal ping interval setting, defaults to disabled (-1)
public static final Setting<TimeValue> PING_SCHEDULE = Setting.timeSetting("transport.ping_schedule", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport." + TcpSettings.TCP_BLOCKING_CLIENT.getKey(), TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport." + TcpSettings.TCP_CONNECT_TIMEOUT.getKey(), TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("transport.tcp.blocking_client", TcpSettings.TCP_BLOCKING_CLIENT, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("transport.tcp.connect_timeout", TcpSettings.TCP_CONNECT_TIMEOUT, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("transport.tcp_no_delay", TcpSettings.TCP_NO_DELAY, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("transport.tcp.keep_alive", TcpSettings.TCP_KEEP_ALIVE, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("transport.tcp.blocking_server", TcpSettings.TCP_BLOCKING_SERVER, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("transport.tcp.reuse_address", TcpSettings.TCP_REUSE_ADDRESS, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("transport.tcp.receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> NETTY_MAX_CUMULATION_BUFFER_CAPACITY = Setting.byteSizeSetting("transport.netty.max_cumulation_buffer_capacity", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS = Setting.intSetting("transport.netty.max_composite_buffer_components", -1, -1, false, Setting.Scope.CLUSTER);
@ -179,7 +181,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
public static final Setting<ByteSizeValue> NETTY_RECEIVE_PREDICTOR_MAX = Setting.byteSizeSetting("transport.netty.receive_predictor_max", NETTY_RECEIVE_PREDICTOR_SIZE, false, Setting.Scope.CLUSTER);
public static final Setting<Integer> NETTY_BOSS_COUNT = Setting.intSetting("transport.netty.boss_count", 1, 1, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
protected final NetworkService networkService;
protected final Version version;
@ -284,7 +285,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
boolean success = false;
try {
clientBootstrap = createClientBootstrap();
if (NETWORK_SERVER.get(settings)) {
if (NetworkService.NETWORK_SERVER.get(settings)) {
final OpenChannelsHandler openChannels = new OpenChannelsHandler(logger);
this.serverOpenChannels = openChannels;
@ -356,25 +357,25 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
clientBootstrap.setPipelineFactory(configureClientChannelPipelineFactory());
clientBootstrap.setOption("connectTimeoutMillis", connectTimeout.millis());
boolean tcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
boolean tcpNoDelay = TCP_NO_DELAY.get(settings);
clientBootstrap.setOption("tcpNoDelay", tcpNoDelay);
boolean tcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
boolean tcpKeepAlive = TCP_KEEP_ALIVE.get(settings);
clientBootstrap.setOption("keepAlive", tcpKeepAlive);
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.get(settings);
if (tcpSendBufferSize.bytes() > 0) {
clientBootstrap.setOption("sendBufferSize", tcpSendBufferSize.bytes());
}
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.get(settings);
if (tcpReceiveBufferSize.bytes() > 0) {
clientBootstrap.setOption("receiveBufferSize", tcpReceiveBufferSize.bytes());
}
clientBootstrap.setOption("receiveBufferSizePredictorFactory", receiveBufferSizePredictorFactory);
boolean reuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
boolean reuseAddress = TCP_REUSE_ADDRESS.get(settings);
clientBootstrap.setOption("reuseAddress", reuseAddress);
return clientBootstrap;
@ -383,31 +384,31 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
private Settings createFallbackSettings() {
Settings.Builder fallbackSettingsBuilder = settingsBuilder();
String fallbackBindHost = settings.get("transport.netty.bind_host", settings.get("transport.bind_host", settings.get("transport.host")));
if (fallbackBindHost != null) {
fallbackSettingsBuilder.put("bind_host", fallbackBindHost);
List<String> fallbackBindHost = TransportSettings.BIND_HOST.get(settings);
if (fallbackBindHost.isEmpty() == false) {
fallbackSettingsBuilder.putArray("bind_host", fallbackBindHost);
}
String fallbackPublishHost = settings.get("transport.netty.publish_host", settings.get("transport.publish_host", settings.get("transport.host")));
if (fallbackPublishHost != null) {
fallbackSettingsBuilder.put("publish_host", fallbackPublishHost);
List<String> fallbackPublishHost = TransportSettings.PUBLISH_HOST.get(settings);
if (fallbackPublishHost.isEmpty() == false) {
fallbackSettingsBuilder.putArray("publish_host", fallbackPublishHost);
}
boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TCP_NO_DELAY.get(settings));
boolean fallbackTcpNoDelay = settings.getAsBoolean("transport.netty.tcp_no_delay", TcpSettings.TCP_NO_DELAY.get(settings));
fallbackSettingsBuilder.put("tcp_no_delay", fallbackTcpNoDelay);
boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TCP_KEEP_ALIVE.get(settings));
boolean fallbackTcpKeepAlive = settings.getAsBoolean("transport.netty.tcp_keep_alive", TcpSettings.TCP_KEEP_ALIVE.get(settings));
fallbackSettingsBuilder.put("tcp_keep_alive", fallbackTcpKeepAlive);
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TCP_REUSE_ADDRESS.get(settings));
boolean fallbackReuseAddress = settings.getAsBoolean("transport.netty.reuse_address", TcpSettings.TCP_REUSE_ADDRESS.get(settings));
fallbackSettingsBuilder.put("reuse_address", fallbackReuseAddress);
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.get(settings));
ByteSizeValue fallbackTcpSendBufferSize = settings.getAsBytesSize("transport.netty.tcp_send_buffer_size", TcpSettings.TCP_SEND_BUFFER_SIZE.get(settings));
if (fallbackTcpSendBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_send_buffer_size", fallbackTcpSendBufferSize);
}
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.get(settings));
ByteSizeValue fallbackTcpBufferSize = settings.getAsBytesSize("transport.netty.tcp_receive_buffer_size", TcpSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings));
if (fallbackTcpBufferSize.bytes() >= 0) {
fallbackSettingsBuilder.put("tcp_receive_buffer_size", fallbackTcpBufferSize);
}
@ -495,7 +496,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
final String[] publishHosts;
if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
publishHosts = settings.getAsArray("transport.netty.publish_host", settings.getAsArray("transport.publish_host", settings.getAsArray("transport.host", null)));
publishHosts = TransportSettings.PUBLISH_HOST.get(settings).toArray(Strings.EMPTY_ARRAY);
} else {
publishHosts = profileSettings.getAsArray("publish_host", boundAddressesHostStrings);
}
@ -507,15 +508,15 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
throw new BindTransportException("Failed to resolve publish address", e);
}
Integer publishPort;
int publishPort;
if (TransportSettings.DEFAULT_PROFILE.equals(name)) {
publishPort = settings.getAsInt("transport.netty.publish_port", settings.getAsInt("transport.publish_port", null));
publishPort = TransportSettings.PUBLISH_PORT.get(settings);
} else {
publishPort = profileSettings.getAsInt("publish_port", null);
publishPort = profileSettings.getAsInt("publish_port", -1);
}
// if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress
if (publishPort == null) {
if (publishPort < 0) {
for (InetSocketAddress boundAddress : boundAddresses) {
InetAddress boundInetAddress = boundAddress.getAddress();
if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) {
@ -526,7 +527,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
// if port still not matches, just take port of first bound address
if (publishPort == null) {
if (publishPort < 0) {
// TODO: In case of DEFAULT_PROFILE we should probably fail here, as publish address does not match any bound address
// In case of a custom profile, we might use the publish address of the default profile
publishPort = boundAddresses.get(0).getPort();
@ -538,15 +539,15 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
private void createServerBootstrap(String name, Settings settings) {
boolean blockingServer = settings.getAsBoolean("transport.tcp.blocking_server", TCP_BLOCKING_SERVER.get(settings));
boolean blockingServer = TCP_BLOCKING_SERVER.get(settings);
String port = settings.get("port");
String bindHost = settings.get("bind_host");
String publishHost = settings.get("publish_host");
String tcpNoDelay = settings.get("tcp_no_delay");
String tcpKeepAlive = settings.get("tcp_keep_alive");
boolean reuseAddress = settings.getAsBoolean("reuse_address", NetworkUtils.defaultReuseAddress());
ByteSizeValue tcpSendBufferSize = settings.getAsBytesSize("tcp_send_buffer_size", TCP_SEND_BUFFER_SIZE.getDefault(settings));
ByteSizeValue tcpReceiveBufferSize = settings.getAsBytesSize("tcp_receive_buffer_size", TCP_RECEIVE_BUFFER_SIZE.getDefault(settings));
ByteSizeValue tcpSendBufferSize = TCP_SEND_BUFFER_SIZE.getDefault(settings);
ByteSizeValue tcpReceiveBufferSize = TCP_RECEIVE_BUFFER_SIZE.getDefault(settings);
logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], connect_timeout[{}], connections_per_node[{}/{}/{}/{}/{}], receive_predictor[{}->{}]",
name, workerCount, port, bindHost, publishHost, compress, connectTimeout, connectionsPerNodeRecovery, connectionsPerNodeBulk, connectionsPerNodeReg, connectionsPerNodeState, connectionsPerNodePing, receivePredictorMin, receivePredictorMax);

View File

@ -13,16 +13,11 @@ DESCRIPTION
Officially supported or commercial plugins require just the plugin name:
plugin install analysis-icu
plugin install shield
plugin install x-pack
Plugins from GitHub require 'username/repository' or 'username/repository/version':
Plugins from Maven Central require 'groupId:artifactId:version':
plugin install lmenezes/elasticsearch-kopf
plugin install lmenezes/elasticsearch-kopf/1.5.7
Plugins from Maven Central or Sonatype require 'groupId/artifactId/version':
plugin install org.elasticsearch/elasticsearch-mapper-attachments/2.6.0
plugin install org.elasticsearch:mapper-attachments:3.0.0
Plugins can be installed from a custom URL or file location as follows:
@ -57,8 +52,6 @@ OFFICIAL PLUGINS
OPTIONS
-t,--timeout Timeout until the plugin download is abort
-v,--verbose Verbose output
-h,--help Shows this message

View File

@ -286,12 +286,12 @@ public class ESExceptionTests extends ESTestCase {
public void testSerializeUnknownException() throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
ParsingException ParsingException = new ParsingException(1, 2, "foobar", null);
Throwable ex = new Throwable("wtf", ParsingException);
Throwable ex = new Throwable("eggplant", ParsingException);
out.writeThrowable(ex);
StreamInput in = StreamInput.wrap(out.bytes());
Throwable throwable = in.readThrowable();
assertEquals("wtf", throwable.getMessage());
assertEquals("throwable: eggplant", throwable.getMessage());
assertTrue(throwable instanceof ElasticsearchException);
ParsingException e = (ParsingException)throwable.getCause();
assertEquals(ParsingException.getIndex(), e.getIndex());
@ -329,7 +329,9 @@ public class ESExceptionTests extends ESTestCase {
StreamInput in = StreamInput.wrap(out.bytes());
ElasticsearchException e = in.readThrowable();
assertEquals(e.getMessage(), ex.getMessage());
assertEquals(ex.getCause().getClass().getName(), e.getCause().getMessage(), ex.getCause().getMessage());
assertTrue("Expected: " + e.getCause().getMessage() + " to contain: " +
ex.getCause().getClass().getName() + " but it didn't",
e.getCause().getMessage().contains(ex.getCause().getMessage()));
if (ex.getCause().getClass() != Throwable.class) { // throwable is not directly mapped
assertEquals(e.getCause().getClass(), ex.getCause().getClass());
} else {

View File

@ -543,9 +543,9 @@ public class ExceptionSerializationTests extends ESTestCase {
public void testNotSerializableExceptionWrapper() throws IOException {
NotSerializableExceptionWrapper ex = serialize(new NotSerializableExceptionWrapper(new NullPointerException()));
assertEquals("{\"type\":\"null_pointer_exception\",\"reason\":null}", toXContent(ex));
assertEquals("{\"type\":\"null_pointer_exception\",\"reason\":\"null_pointer_exception: null\"}", toXContent(ex));
ex = serialize(new NotSerializableExceptionWrapper(new IllegalArgumentException("nono!")));
assertEquals("{\"type\":\"illegal_argument_exception\",\"reason\":\"nono!\"}", toXContent(ex));
assertEquals("{\"type\":\"illegal_argument_exception\",\"reason\":\"illegal_argument_exception: nono!\"}", toXContent(ex));
Throwable[] unknowns = new Throwable[]{
new Exception("foobar"),
@ -586,7 +586,7 @@ public class ExceptionSerializationTests extends ESTestCase {
ElasticsearchException serialize = serialize((ElasticsearchException) uhe);
assertTrue(serialize instanceof NotSerializableExceptionWrapper);
NotSerializableExceptionWrapper e = (NotSerializableExceptionWrapper) serialize;
assertEquals("msg", e.getMessage());
assertEquals("unknown_header_exception: msg", e.getMessage());
assertEquals(2, e.getHeader("foo").size());
assertEquals("foo", e.getHeader("foo").get(0));
assertEquals("bar", e.getHeader("foo").get(1));

View File

@ -119,7 +119,7 @@ public class HotThreadsIT extends ESIntegTestCase {
.setQuery(matchAllQuery())
.setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2"))))
.get(),
3l);
3L);
}
latch.await();
assertThat(hasErrors.get(), is(false));

View File

@ -93,7 +93,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
ensureYellow();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.YELLOW));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0L));
assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
@ -104,7 +104,7 @@ public class ClusterStatsIT extends ESIntegTestCase {
refresh(); // make the doc visible
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.getStatus(), Matchers.equalTo(ClusterHealthStatus.GREEN));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l));
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1L));
assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
@ -141,10 +141,10 @@ public class ClusterStatsIT extends ESIntegTestCase {
ensureYellow("test1");
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
String msg = response.toString();
assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000l)); // 1 Jan 2000
assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l));
assertThat(msg, response.getTimestamp(), Matchers.greaterThan(946681200000L)); // 1 Jan 2000
assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0L));
assertThat(msg, response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l));
assertThat(msg, response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0L));
assertThat(msg, response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
assertThat(msg, response.nodesStats.getVersions().size(), Matchers.greaterThan(0));

View File

@ -54,7 +54,7 @@ import static org.hamcrest.core.IsNull.notNullValue;
public class CreateIndexIT extends ESIntegTestCase {
public void testCreationDateGivenFails() {
try {
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4l)).get();
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4L)).get();
fail();
} catch (IllegalArgumentException ex) {
assertEquals("unknown setting [index.creation_date]", ex.getMessage());

View File

@ -93,7 +93,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
assertThat(shardStores.values().size(), equalTo(2));
for (ObjectCursor<List<IndicesShardStoresResponse.StoreStatus>> shardStoreStatuses : shardStores.values()) {
for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) {
assertThat(storeStatus.getVersion(), greaterThan(-1l));
assertThat(storeStatus.getVersion(), greaterThan(-1L));
assertThat(storeStatus.getAllocationId(), notNullValue());
assertThat(storeStatus.getNode(), notNullValue());
assertThat(storeStatus.getStoreException(), nullValue());
@ -191,10 +191,10 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
for (IndicesShardStoresResponse.StoreStatus status : shardStatus.value) {
if (corruptedShardIDMap.containsKey(shardStatus.key)
&& corruptedShardIDMap.get(shardStatus.key).contains(status.getNode().name())) {
assertThat(status.getVersion(), greaterThanOrEqualTo(0l));
assertThat(status.getVersion(), greaterThanOrEqualTo(0L));
assertThat(status.getStoreException(), notNullValue());
} else {
assertThat(status.getVersion(), greaterThanOrEqualTo(0l));
assertThat(status.getVersion(), greaterThanOrEqualTo(0L));
assertNull(status.getStoreException());
}
}

View File

@ -66,11 +66,11 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
IndicesStatsResponse rsp = client().admin().indices().prepareStats("test").get();
SegmentsStats stats = rsp.getIndex("test").getTotal().getSegments();
assertThat(stats.getTermsMemoryInBytes(), greaterThan(0l));
assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0l));
assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0l));
assertThat(stats.getNormsMemoryInBytes(), greaterThan(0l));
assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0l));
assertThat(stats.getTermsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getStoredFieldsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getTermVectorsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getNormsMemoryInBytes(), greaterThan(0L));
assertThat(stats.getDocValuesMemoryInBytes(), greaterThan(0L));
// now check multiple segments stats are merged together
client().prepareIndex("test", "doc", "2").setSource("foo", "bar").get();
@ -93,7 +93,7 @@ public class IndicesStatsTests extends ESSingleNodeTestCase {
for (ShardStats shardStats : rsp.getIndex("test").getShards()) {
final CommitStats commitStats = shardStats.getCommitStats();
assertNotNull(commitStats);
assertThat(commitStats.getGeneration(), greaterThan(0l));
assertThat(commitStats.getGeneration(), greaterThan(0L));
assertThat(commitStats.getId(), notNullValue());
assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
assertThat(commitStats.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));

View File

@ -99,7 +99,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l));
assertThat(terms.size(), equalTo(8L));
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) -1));
assertThat(terms.getDocCount(), Matchers.equalTo(-1));
assertThat(terms.getSumDocFreq(), equalTo((long) -1));
@ -158,7 +158,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l));
assertThat(terms.size(), equalTo(8L));
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));
@ -214,7 +214,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase {
Fields fields = response.getFields();
assertThat(fields.size(), equalTo(1));
Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l));
assertThat(terms.size(), equalTo(8L));
assertThat(terms.getSumTotalTermFreq(), Matchers.equalTo((long) (9 * numDocs)));
assertThat(terms.getDocCount(), Matchers.equalTo(numDocs));
assertThat(terms.getSumDocFreq(), equalTo((long) numDocs * values.length));

View File

@ -317,7 +317,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(fields.size(), equalTo(ft.storeTermVectors() ? 1 : 0));
if (ft.storeTermVectors()) {
Terms terms = fields.terms("field");
assertThat(terms.size(), equalTo(8l));
assertThat(terms.size(), equalTo(8L));
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
@ -637,7 +637,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
int[][] endOffset = {{15}, {43}, {19}, {25}, {39}, {30}, {9}, {3, 34}};
Terms terms = fields.terms(fieldName);
assertThat(terms.size(), equalTo(8l));
assertThat(terms.size(), equalTo(8L));
TermsEnum iterator = terms.iterator();
for (int j = 0; j < values.length; j++) {
String string = values[j];
@ -1087,12 +1087,12 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(Versions.MATCH_ANY).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getVersion(), equalTo(1l));
assertThat(response.getVersion(), equalTo(1L));
response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getVersion(), equalTo(1l));
assertThat(response.getVersion(), equalTo(1L));
try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).get();
@ -1109,13 +1109,13 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(1l));
assertThat(response.getVersion(), equalTo(1L));
response = client().prepareTermVectors(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(1l));
assertThat(response.getVersion(), equalTo(1L));
try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(2).setRealtime(false).get();
@ -1134,7 +1134,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l));
assertThat(response.getVersion(), equalTo(2L));
try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).get();
@ -1147,7 +1147,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l));
assertThat(response.getVersion(), equalTo(2L));
// From Lucene index:
refresh();
@ -1157,7 +1157,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l));
assertThat(response.getVersion(), equalTo(2L));
try {
client().prepareGet(indexOrAlias(), "type1", "1").setVersion(1).setRealtime(false).get();
@ -1170,7 +1170,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase {
assertThat(response.isExists(), equalTo(true));
assertThat(response.getId(), equalTo("1"));
assertThat(response.getIndex(), equalTo("test"));
assertThat(response.getVersion(), equalTo(2l));
assertThat(response.getVersion(), equalTo(2L));
}
public void testFilterLength() throws ExecutionException, InterruptedException, IOException {

View File

@ -58,7 +58,7 @@ public class BroadcastActionsIT extends ESIntegTestCase {
SearchResponse countResponse = client().prepareSearch("test").setSize(0)
.setQuery(termQuery("_type", "type1"))
.get();
assertThat(countResponse.getHits().totalHits(), equalTo(2l));
assertThat(countResponse.getHits().totalHits(), equalTo(2L));
assertThat(countResponse.getTotalShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));

View File

@ -122,11 +122,11 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true));
GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(1).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(1l));
assertThat(get.getVersion(), equalTo(1L));
client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet();
get = client().prepareGet("test", "type1", id).setRouting(routingKey).setVersion(2).get();
assertThat("Document with ID " + id + " should exist but doesn't", get.isExists(), is(true));
assertThat(get.getVersion(), equalTo(2l));
assertThat(get.getVersion(), equalTo(2L));
}
assertVersionCreated(compatibilityVersion(), "test");
@ -416,30 +416,30 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
client().prepareIndex(indexName, "type1", "4").setSource(jsonBuilder().startObject().startObject("obj2").field("obj2_val", "1").endObject().field("y2", "y_2").field("field3", "value3_4").endObject()));
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field1")).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(existsQuery("field1"))).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_exists_:field1")).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field2")).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("field3")).get();
assertHitCount(countResponse, 1l);
assertHitCount(countResponse, 1L);
// wildcard check
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("x*")).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
// object check
countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get();
assertHitCount(countResponse, 2l);
assertHitCount(countResponse, 2L);
if (!backwardsCluster().upgradeOneNode()) {
break;
@ -598,7 +598,7 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase {
assertThat(termVectorsResponse.isExists(), equalTo(true));
Fields fields = termVectorsResponse.getFields();
assertThat(fields.size(), equalTo(1));
assertThat(fields.terms("field").size(), equalTo(8l));
assertThat(fields.terms("field").size(), equalTo(8L));
}
public void testIndicesStats() {

View File

@ -332,7 +332,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
}
}
SearchResponse test = client().prepareSearch(indexName).get();
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l));
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
}
void assertBasicSearchWorks(String indexName) {

View File

@ -49,7 +49,7 @@ public class StaticIndexBackwardCompatibilityIT extends ESIntegTestCase {
assertEquals(index, getIndexResponse.indices()[0]);
ensureYellow(index);
SearchResponse test = client().prepareSearch(index).get();
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1l));
assertThat(test.getHits().getTotalHits(), greaterThanOrEqualTo(1L));
}
}

View File

@ -32,6 +32,8 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
@ -48,7 +50,7 @@ public class TransportClientIT extends ESIntegTestCase {
}
public void testNodeVersionIsUpdated() {
public void testNodeVersionIsUpdated() throws IOException {
TransportClient client = (TransportClient) internalCluster().client();
TransportClientNodesService nodeService = client.nodeService();
Node node = new Node(Settings.builder()

View File

@ -632,7 +632,7 @@ public class ClusterServiceIT extends ESIntegTestCase {
controlSources = new HashSet<>(Arrays.asList("1", "2", "3", "4", "5"));
for (PendingClusterTask task : response) {
if (controlSources.remove(task.getSource().string())) {
assertThat(task.getTimeInQueueInMillis(), greaterThan(0l));
assertThat(task.getTimeInQueueInMillis(), greaterThan(0L));
}
}
assertTrue(controlSources.isEmpty());

View File

@ -115,8 +115,8 @@ public class DiskUsageTests extends ESTestCase {
assertEquals(2, shardSizes.size());
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_0)));
assertTrue(shardSizes.containsKey(ClusterInfo.shardIdentifierFromRouting(test_1)));
assertEquals(100l, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_0)).longValue());
assertEquals(1000l, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_1)).longValue());
assertEquals(100L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_0)).longValue());
assertEquals(1000L, shardSizes.get(ClusterInfo.shardIdentifierFromRouting(test_1)).longValue());
assertEquals(2, routingToPath.size());
assertTrue(routingToPath.containsKey(test_0));

View File

@ -117,7 +117,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
logger.info("--> verify we the data back");
for (int i = 0; i < 10; i++) {
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100l));
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100L));
}
internalCluster().stopCurrentMasterNode();
@ -279,7 +279,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
setMinimumMasterNodes(2);
// make sure it has been processed on all nodes (master node spawns a secondary cluster state update task)
for (Client client : internalCluster()) {
for (Client client : internalCluster().getClients()) {
assertThat(client.admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setLocal(true).get().isTimedOut(),
equalTo(false));
}
@ -303,7 +303,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase {
assertTrue(awaitBusy(
() -> {
boolean success = true;
for (Client client : internalCluster()) {
for (Client client : internalCluster().getClients()) {
boolean clientHasNoMasterBlock = hasNoMasterBlock.test(client);
if (logger.isDebugEnabled()) {
logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", client, clientHasNoMasterBlock);

View File

@ -248,10 +248,10 @@ public class NoMasterNodeIT extends ESIntegTestCase {
assertExists(getResponse);
SearchResponse countResponse = client().prepareSearch("test1").setSize(0).get();
assertHitCount(countResponse, 1l);
assertHitCount(countResponse, 1L);
SearchResponse searchResponse = client().prepareSearch("test1").get();
assertHitCount(searchResponse, 1l);
assertHitCount(searchResponse, 1L);
countResponse = client().prepareSearch("test2").setSize(0).get();
assertThat(countResponse.getTotalShards(), equalTo(2));

View File

@ -58,7 +58,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
}
client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100l));
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100L));
logger.info("--> decommission the second node");
client().admin().cluster().prepareUpdateSettings()
@ -77,7 +77,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
}
client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100l));
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100L));
}
public void testDisablingAllocationFiltering() throws Exception {
@ -99,7 +99,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
}
client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100l));
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().totalHits(), equalTo(100L));
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test");
int numShardsOnNode1 = 0;

View File

@ -55,7 +55,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.settings(settings(Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(2)
.creationDate(2l))
.creationDate(2L))
.put(IndexMetaData.builder("test5")
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
.numberOfShards(1)
@ -66,12 +66,12 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
.numberOfShards(1)
.numberOfReplicas(2)
.creationDate(2l))
.creationDate(2L))
.put(IndexMetaData.builder("test7")
.settings(settings(Version.CURRENT))
.numberOfShards(1)
.numberOfReplicas(2)
.creationDate(2l)
.creationDate(2L)
.putMapping("mapping1", MAPPING_SOURCE1)
.putMapping("mapping2", MAPPING_SOURCE2))
.put(IndexMetaData.builder("test8")
@ -84,7 +84,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.putAlias(newAliasMetaDataBuilder("alias2")))
.put(IndexMetaData.builder("test9")
.settings(settings(Version.CURRENT).put("setting1", "value1").put("setting2", "value2"))
.creationDate(2l)
.creationDate(2L)
.numberOfShards(1)
.numberOfReplicas(2)
.putMapping("mapping1", MAPPING_SOURCE1)
@ -125,7 +125,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
.settings(settings(Version.CURRENT)
.put("setting1", "value1")
.put("setting2", "value2"))
.creationDate(2l)
.creationDate(2L)
.numberOfShards(1)
.numberOfReplicas(2)
.putMapping("mapping1", MAPPING_SOURCE1)
@ -152,14 +152,14 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
IndexMetaData indexMetaData = parsedMetaData.index("test1");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(3));
assertThat(indexMetaData.getMappings().size(), equalTo(0));
indexMetaData = parsedMetaData.index("test2");
assertThat(indexMetaData.getNumberOfShards(), equalTo(2));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(3));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -168,13 +168,13 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test3");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(3));
assertThat(indexMetaData.getMappings().size(), equalTo(1));
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
indexMetaData = parsedMetaData.index("test4");
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
assertThat(indexMetaData.getCreationDate(), equalTo(2L));
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(4));
@ -183,7 +183,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test5");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -194,7 +194,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test6");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
assertThat(indexMetaData.getCreationDate(), equalTo(2L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(6));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -203,7 +203,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test7");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
assertThat(indexMetaData.getCreationDate(), equalTo(2L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(4));
assertThat(indexMetaData.getMappings().size(), equalTo(2));
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
@ -212,7 +212,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test8");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -226,7 +226,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test9");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
assertThat(indexMetaData.getCreationDate(), equalTo(2L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(6));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -240,7 +240,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test10");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -254,7 +254,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test11");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
assertThat(indexMetaData.getCreationDate(), equalTo(-1L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
@ -272,7 +272,7 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
indexMetaData = parsedMetaData.index("test12");
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
assertThat(indexMetaData.getCreationDate(), equalTo(2L));
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(6));
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));

View File

@ -122,7 +122,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available");
ensureYellow("test");
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l);
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2L);
}
public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exception {
@ -171,7 +171,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
logger.info("--> check that the stale primary shard gets allocated and that documents are available");
ensureYellow("test");
assertHitCount(client().prepareSearch("test").setSize(0).setQuery(matchAllQuery()).get(), useStaleReplica ? 1l : 0l);
assertHitCount(client().prepareSearch("test").setSize(0).setQuery(matchAllQuery()).get(), useStaleReplica ? 1L : 0L);
}
public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() throws ExecutionException, InterruptedException {
@ -200,6 +200,6 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
internalCluster().fullRestart();
logger.info("--> checking that index still gets allocated with only 1 shard copy being available");
ensureYellow("test");
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 1l);
assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 1L);
}
}

View File

@ -221,7 +221,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue());
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0l));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0L));
}
/**
@ -252,7 +252,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getMessage(), equalTo("test fail"));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getDetails(), equalTo("test fail"));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0l));
assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0L));
}
/**
@ -276,9 +276,9 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
UnassignedInfo unassignedInfo = new UnassignedInfo(RandomPicks.randomFrom(getRandom(), reasons), null);
long delay = unassignedInfo.updateDelay(unassignedInfo.getUnassignedTimeInNanos() + 1, // add 1 tick delay
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "10h").build(), Settings.EMPTY);
assertThat(delay, equalTo(0l));
assertThat(delay, equalTo(0L));
delay = unassignedInfo.getLastComputedLeftDelayNanos();
assertThat(delay, equalTo(0l));
assertThat(delay, equalTo(0L));
}
/**

View File

@ -64,14 +64,14 @@ public class ShardVersioningTests extends ESAllocationTestCase {
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(1l));
assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(1L));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1L));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
}
@ -84,17 +84,17 @@ public class ShardVersioningTests extends ESAllocationTestCase {
for (int i = 0; i < routingTable.index("test1").shards().size(); i++) {
assertThat(routingTable.index("test1").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test1").shard(i).primaryShard().state(), equalTo(STARTED));
assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(2l));
assertThat(routingTable.index("test1").shard(i).primaryShard().version(), equalTo(2L));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).version(), equalTo(2l));
assertThat(routingTable.index("test1").shard(i).replicaShards().get(0).version(), equalTo(2L));
}
for (int i = 0; i < routingTable.index("test2").shards().size(); i++) {
assertThat(routingTable.index("test2").shard(i).shards().size(), equalTo(2));
assertThat(routingTable.index("test2").shard(i).primaryShard().state(), equalTo(INITIALIZING));
assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1l));
assertThat(routingTable.index("test2").shard(i).primaryShard().version(), equalTo(1L));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).state(), equalTo(UNASSIGNED));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).version(), equalTo(1l));
assertThat(routingTable.index("test2").shard(i).replicaShards().get(0).version(), equalTo(1L));
}
}
}

View File

@ -239,20 +239,20 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
ShardRoutingHelper.initialize(test_2, "node1");
ShardRoutingHelper.moveToStarted(test_2);
assertEquals(1000l, DiskThresholdDecider.getShardSize(test_2, info));
assertEquals(100l, DiskThresholdDecider.getShardSize(test_1, info));
assertEquals(10l, DiskThresholdDecider.getShardSize(test_0, info));
assertEquals(1000L, DiskThresholdDecider.getShardSize(test_2, info));
assertEquals(100L, DiskThresholdDecider.getShardSize(test_1, info));
assertEquals(10L, DiskThresholdDecider.getShardSize(test_0, info));
RoutingNode node = new RoutingNode("node1", new DiscoveryNode("node1", LocalTransportAddress.PROTO, Version.CURRENT), Arrays.asList(test_0, test_1.buildTargetRelocatingShard(), test_2));
assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null"));
assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null"));
assertEquals(0l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/some/other/dev"));
assertEquals(0l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/some/other/dev"));
assertEquals(100L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null"));
assertEquals(90L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null"));
assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/some/other/dev"));
assertEquals(0L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/some/other/dev"));
ShardRouting test_3 = ShardRouting.newUnassigned(index, 3, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
ShardRoutingHelper.initialize(test_3, "node1");
ShardRoutingHelper.moveToStarted(test_3);
assertEquals(0l, DiskThresholdDecider.getShardSize(test_3, info));
assertEquals(0L, DiskThresholdDecider.getShardSize(test_3, info));
ShardRouting other_0 = ShardRouting.newUnassigned(new Index("other", "_NA_"), 0, null, randomBoolean(), new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
@ -263,11 +263,11 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
node = new RoutingNode("node1", new DiscoveryNode("node1", LocalTransportAddress.PROTO, Version.CURRENT), Arrays.asList(test_0, test_1.buildTargetRelocatingShard(), test_2, other_0.buildTargetRelocatingShard()));
if (other_0.primary()) {
assertEquals(10100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null"));
assertEquals(10090l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null"));
assertEquals(10100L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null"));
assertEquals(10090L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null"));
} else {
assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null"));
assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null"));
assertEquals(100L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false, "/dev/null"));
assertEquals(90L, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true, "/dev/null"));
}
}

View File

@ -95,7 +95,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertAcked(response);
assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s"));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)));
@ -118,7 +118,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertAcked(response);
assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s"));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
assertFalse(discoverySettings.getPublishDiff());
response = client().admin().cluster()
.prepareUpdateSettings()
@ -138,7 +138,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertAcked(response);
assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s"));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)));
@ -162,7 +162,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertAcked(response);
assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s"));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
assertFalse(discoverySettings.getPublishDiff());
response = client().admin().cluster()
.prepareUpdateSettings()
@ -254,7 +254,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertAcked(response);
assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s"));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
try {
client().admin().cluster()
@ -266,7 +266,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.publish_timeout] with value [whatever] as a time value: unit is missing or unrecognized");
}
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
try {
client().admin().cluster()
@ -278,7 +278,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s");
}
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l));
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
}
public void testClusterUpdateSettingsWithBlocks() {

View File

@ -48,91 +48,6 @@ public class FileSystemUtilsTests extends ESTestCase {
dst = createTempDir();
Files.createDirectories(src);
Files.createDirectories(dst);
// We first copy sources test files from src/test/resources
// Because after when the test runs, src files are moved to their destination
final Path path = getDataPath("/org/elasticsearch/common/io/copyappend");
FileSystemUtils.copyDirectoryRecursively(path, src);
}
public void testMoveOverExistingFileAndAppend() throws IOException {
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dst, ".new");
assertFileContent(dst, "file1.txt", "version1");
assertFileContent(dst, "dir/file2.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dst, ".new");
assertFileContent(dst, "file1.txt", "version1");
assertFileContent(dst, "dir/file2.txt", "version1");
assertFileContent(dst, "file1.txt.new", "version2");
assertFileContent(dst, "dir/file2.txt.new", "version2");
assertFileContent(dst, "file3.txt", "version1");
assertFileContent(dst, "dir/subdir/file4.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dst, ".new");
assertFileContent(dst, "file1.txt", "version1");
assertFileContent(dst, "dir/file2.txt", "version1");
assertFileContent(dst, "file1.txt.new", "version3");
assertFileContent(dst, "dir/file2.txt.new", "version3");
assertFileContent(dst, "file3.txt", "version1");
assertFileContent(dst, "dir/subdir/file4.txt", "version1");
assertFileContent(dst, "file3.txt.new", "version2");
assertFileContent(dst, "dir/subdir/file4.txt.new", "version2");
assertFileContent(dst, "dir/subdir/file5.txt", "version1");
}
public void testMoveOverExistingFileAndIgnore() throws IOException {
Path dest = createTempDir();
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v1"), dest, null);
assertFileContent(dest, "file1.txt", "version1");
assertFileContent(dest, "dir/file2.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v2"), dest, null);
assertFileContent(dest, "file1.txt", "version1");
assertFileContent(dest, "dir/file2.txt", "version1");
assertFileContent(dest, "file1.txt.new", null);
assertFileContent(dest, "dir/file2.txt.new", null);
assertFileContent(dest, "file3.txt", "version1");
assertFileContent(dest, "dir/subdir/file4.txt", "version1");
FileSystemUtils.moveFilesWithoutOverwriting(src.resolve("v3"), dest, null);
assertFileContent(dest, "file1.txt", "version1");
assertFileContent(dest, "dir/file2.txt", "version1");
assertFileContent(dest, "file1.txt.new", null);
assertFileContent(dest, "dir/file2.txt.new", null);
assertFileContent(dest, "file3.txt", "version1");
assertFileContent(dest, "dir/subdir/file4.txt", "version1");
assertFileContent(dest, "file3.txt.new", null);
assertFileContent(dest, "dir/subdir/file4.txt.new", null);
assertFileContent(dest, "dir/subdir/file5.txt", "version1");
}
public void testMoveFilesDoesNotCreateSameFileWithSuffix() throws Exception {
Path[] dirs = new Path[] { createTempDir(), createTempDir(), createTempDir()};
for (Path dir : dirs) {
Files.write(dir.resolve("file1.txt"), "file1".getBytes(StandardCharsets.UTF_8));
Files.createDirectory(dir.resolve("dir"));
Files.write(dir.resolve("dir").resolve("file2.txt"), "file2".getBytes(StandardCharsets.UTF_8));
}
FileSystemUtils.moveFilesWithoutOverwriting(dirs[0], dst, ".new");
assertFileContent(dst, "file1.txt", "file1");
assertFileContent(dst, "dir/file2.txt", "file2");
// do the same operation again, make sure, no .new files have been added
FileSystemUtils.moveFilesWithoutOverwriting(dirs[1], dst, ".new");
assertFileContent(dst, "file1.txt", "file1");
assertFileContent(dst, "dir/file2.txt", "file2");
assertFileNotExists(dst.resolve("file1.txt.new"));
assertFileNotExists(dst.resolve("dir").resolve("file2.txt.new"));
// change file content, make sure it gets updated
Files.write(dirs[2].resolve("dir").resolve("file2.txt"), "UPDATED".getBytes(StandardCharsets.UTF_8));
FileSystemUtils.moveFilesWithoutOverwriting(dirs[2], dst, ".new");
assertFileContent(dst, "file1.txt", "file1");
assertFileContent(dst, "dir/file2.txt", "file2");
assertFileContent(dst, "dir/file2.txt.new", "UPDATED");
}
public void testAppend() {

View File

@ -62,7 +62,7 @@ public class MoreLikeThisQueryTests extends ESTestCase {
mltQuery.setMinTermFrequency(1);
mltQuery.setMinDocFreq(1);
long count = searcher.count(mltQuery);
assertThat(count, equalTo(2l));
assertThat(count, equalTo(2L));
reader.close();
indexWriter.close();

View File

@ -51,7 +51,7 @@ public class InputStreamIndexInputTests extends ESTestCase {
for (int i = 0; i < 3; i++) {
InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
assertThat(input.getFilePointer(), lessThan(input.length()));
assertThat(is.actualSizeToRead(), equalTo(1l));
assertThat(is.actualSizeToRead(), equalTo(1L));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(-1));
}
@ -59,14 +59,14 @@ public class InputStreamIndexInputTests extends ESTestCase {
for (int i = 0; i < 3; i++) {
InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
assertThat(input.getFilePointer(), lessThan(input.length()));
assertThat(is.actualSizeToRead(), equalTo(1l));
assertThat(is.actualSizeToRead(), equalTo(1L));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(-1));
}
assertThat(input.getFilePointer(), equalTo(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
assertThat(is.actualSizeToRead(), equalTo(0l));
assertThat(is.actualSizeToRead(), equalTo(0L));
assertThat(is.read(), equalTo(-1));
}
@ -89,7 +89,7 @@ public class InputStreamIndexInputTests extends ESTestCase {
for (int i = 0; i < 3; i++) {
assertThat(input.getFilePointer(), lessThan(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
assertThat(is.actualSizeToRead(), equalTo(1l));
assertThat(is.actualSizeToRead(), equalTo(1L));
assertThat(is.read(read), equalTo(1));
assertThat(read[0], equalTo((byte) 1));
}
@ -97,14 +97,14 @@ public class InputStreamIndexInputTests extends ESTestCase {
for (int i = 0; i < 3; i++) {
assertThat(input.getFilePointer(), lessThan(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
assertThat(is.actualSizeToRead(), equalTo(1l));
assertThat(is.actualSizeToRead(), equalTo(1L));
assertThat(is.read(read), equalTo(1));
assertThat(read[0], equalTo((byte) 2));
}
assertThat(input.getFilePointer(), equalTo(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 1);
assertThat(is.actualSizeToRead(), equalTo(0l));
assertThat(is.actualSizeToRead(), equalTo(0L));
assertThat(is.read(read), equalTo(-1));
}
@ -124,28 +124,28 @@ public class InputStreamIndexInputTests extends ESTestCase {
assertThat(input.getFilePointer(), lessThan(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(-1));
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(), equalTo(1));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(-1));
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(2));
assertThat(is.read(), equalTo(-1));
assertThat(input.getFilePointer(), equalTo(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(0l));
assertThat(is.actualSizeToRead(), equalTo(0L));
assertThat(is.read(), equalTo(-1));
}
@ -167,28 +167,28 @@ public class InputStreamIndexInputTests extends ESTestCase {
assertThat(input.getFilePointer(), lessThan(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(read), equalTo(2));
assertThat(read[0], equalTo((byte) 1));
assertThat(read[1], equalTo((byte) 1));
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(read), equalTo(2));
assertThat(read[0], equalTo((byte) 1));
assertThat(read[1], equalTo((byte) 2));
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(read), equalTo(2));
assertThat(read[0], equalTo((byte) 2));
assertThat(read[1], equalTo((byte) 2));
assertThat(input.getFilePointer(), equalTo(input.length()));
is = new InputStreamIndexInput(input, 2);
assertThat(is.actualSizeToRead(), equalTo(0l));
assertThat(is.actualSizeToRead(), equalTo(0L));
assertThat(is.read(read), equalTo(-1));
}
@ -210,7 +210,7 @@ public class InputStreamIndexInputTests extends ESTestCase {
assertThat(input.getFilePointer(), lessThan(input.length()));
InputStreamIndexInput is = new InputStreamIndexInput(input, 4);
assertThat(is.actualSizeToRead(), equalTo(4l));
assertThat(is.actualSizeToRead(), equalTo(4L));
assertThat(is.read(read), equalTo(4));
assertThat(read[0], equalTo((byte) 1));
assertThat(read[1], equalTo((byte) 1));
@ -219,14 +219,14 @@ public class InputStreamIndexInputTests extends ESTestCase {
assertThat(input.getFilePointer(), lessThan(input.length()));
is = new InputStreamIndexInput(input, 4);
assertThat(is.actualSizeToRead(), equalTo(2l));
assertThat(is.actualSizeToRead(), equalTo(2L));
assertThat(is.read(read), equalTo(2));
assertThat(read[0], equalTo((byte) 2));
assertThat(read[1], equalTo((byte) 2));
assertThat(input.getFilePointer(), equalTo(input.length()));
is = new InputStreamIndexInput(input, 4);
assertThat(is.actualSizeToRead(), equalTo(0l));
assertThat(is.actualSizeToRead(), equalTo(0L));
assertThat(is.read(read), equalTo(-1));
}

View File

@ -93,8 +93,8 @@ public class VersionsTests extends ESTestCase {
doc.add(new NumericDocValuesField(VersionFieldMapper.NAME, 1));
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1L));
doc = new Document();
Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE);
@ -103,8 +103,8 @@ public class VersionsTests extends ESTestCase {
doc.add(version);
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2L));
// test reuse of uid field
doc = new Document();
@ -114,8 +114,8 @@ public class VersionsTests extends ESTestCase {
writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3L));
writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
directoryReader = reopen(directoryReader);
@ -146,16 +146,16 @@ public class VersionsTests extends ESTestCase {
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(writer, true), new ShardId("foo", "_na_", 1));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5L));
version.setLongValue(6L);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
version.setLongValue(7L);
writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs);
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7l));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7L));
assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7L));
writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1"));
directoryReader = reopen(directoryReader);
@ -184,8 +184,8 @@ public class VersionsTests extends ESTestCase {
writer.commit();
directoryReader = reopen(directoryReader);
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2l));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1L));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "2")), equalTo(2L));
assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "3")), equalTo(Versions.NOT_FOUND));
directoryReader.close();
writer.close();

View File

@ -101,8 +101,8 @@ public class TimeZoneRoundingTests extends ESTestCase {
int timezoneOffset = -2;
Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forOffsetHours(timezoneOffset))
.build();
assertThat(tzRounding.round(0), equalTo(0l - TimeValue.timeValueHours(24 + timezoneOffset).millis()));
assertThat(tzRounding.nextRoundingValue(0l - TimeValue.timeValueHours(24 + timezoneOffset).millis()), equalTo(0l - TimeValue
assertThat(tzRounding.round(0), equalTo(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()));
assertThat(tzRounding.nextRoundingValue(0L - TimeValue.timeValueHours(24 + timezoneOffset).millis()), equalTo(0L - TimeValue
.timeValueHours(timezoneOffset).millis()));
tzRounding = TimeZoneRounding.builder(DateTimeUnit.DAY_OF_MONTH).timeZone(DateTimeZone.forID("-08:00")).build();
@ -135,8 +135,8 @@ public class TimeZoneRoundingTests extends ESTestCase {
public void testTimeTimeZoneRounding() {
// hour unit
Rounding tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forOffsetHours(-2)).build();
assertThat(tzRounding.round(0), equalTo(0l));
assertThat(tzRounding.nextRoundingValue(0l), equalTo(TimeValue.timeValueHours(1l).getMillis()));
assertThat(tzRounding.round(0), equalTo(0L));
assertThat(tzRounding.nextRoundingValue(0L), equalTo(TimeValue.timeValueHours(1L).getMillis()));
tzRounding = TimeZoneRounding.builder(DateTimeUnit.HOUR_OF_DAY).timeZone(DateTimeZone.forOffsetHours(-2)).build();
assertThat(tzRounding.round(utc("2009-02-03T01:01:01")), equalTo(utc("2009-02-03T01:00:00")));

View File

@ -34,47 +34,47 @@ import static org.hamcrest.Matchers.equalTo;
*/
public class ByteSizeUnitTests extends ESTestCase {
public void testBytes() {
assertThat(BYTES.toBytes(1), equalTo(1l));
assertThat(BYTES.toKB(1024), equalTo(1l));
assertThat(BYTES.toMB(1024 * 1024), equalTo(1l));
assertThat(BYTES.toGB(1024 * 1024 * 1024), equalTo(1l));
assertThat(BYTES.toBytes(1), equalTo(1L));
assertThat(BYTES.toKB(1024), equalTo(1L));
assertThat(BYTES.toMB(1024 * 1024), equalTo(1L));
assertThat(BYTES.toGB(1024 * 1024 * 1024), equalTo(1L));
}
public void testKB() {
assertThat(KB.toBytes(1), equalTo(1024l));
assertThat(KB.toKB(1), equalTo(1l));
assertThat(KB.toMB(1024), equalTo(1l));
assertThat(KB.toGB(1024 * 1024), equalTo(1l));
assertThat(KB.toBytes(1), equalTo(1024L));
assertThat(KB.toKB(1), equalTo(1L));
assertThat(KB.toMB(1024), equalTo(1L));
assertThat(KB.toGB(1024 * 1024), equalTo(1L));
}
public void testMB() {
assertThat(MB.toBytes(1), equalTo(1024l * 1024));
assertThat(MB.toKB(1), equalTo(1024l));
assertThat(MB.toMB(1), equalTo(1l));
assertThat(MB.toGB(1024), equalTo(1l));
assertThat(MB.toBytes(1), equalTo(1024L * 1024));
assertThat(MB.toKB(1), equalTo(1024L));
assertThat(MB.toMB(1), equalTo(1L));
assertThat(MB.toGB(1024), equalTo(1L));
}
public void testGB() {
assertThat(GB.toBytes(1), equalTo(1024l * 1024 * 1024));
assertThat(GB.toKB(1), equalTo(1024l * 1024));
assertThat(GB.toMB(1), equalTo(1024l));
assertThat(GB.toGB(1), equalTo(1l));
assertThat(GB.toBytes(1), equalTo(1024L * 1024 * 1024));
assertThat(GB.toKB(1), equalTo(1024L * 1024));
assertThat(GB.toMB(1), equalTo(1024L));
assertThat(GB.toGB(1), equalTo(1L));
}
public void testTB() {
assertThat(TB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024));
assertThat(TB.toKB(1), equalTo(1024l * 1024 * 1024));
assertThat(TB.toMB(1), equalTo(1024l * 1024));
assertThat(TB.toGB(1), equalTo(1024l));
assertThat(TB.toTB(1), equalTo(1l));
assertThat(TB.toBytes(1), equalTo(1024L * 1024 * 1024 * 1024));
assertThat(TB.toKB(1), equalTo(1024L * 1024 * 1024));
assertThat(TB.toMB(1), equalTo(1024L * 1024));
assertThat(TB.toGB(1), equalTo(1024L));
assertThat(TB.toTB(1), equalTo(1L));
}
public void testPB() {
assertThat(PB.toBytes(1), equalTo(1024l * 1024 * 1024 * 1024 * 1024));
assertThat(PB.toKB(1), equalTo(1024l * 1024 * 1024 * 1024));
assertThat(PB.toMB(1), equalTo(1024l * 1024 * 1024));
assertThat(PB.toGB(1), equalTo(1024l * 1024));
assertThat(PB.toTB(1), equalTo(1024l));
assertThat(PB.toPB(1), equalTo(1l));
assertThat(PB.toBytes(1), equalTo(1024L * 1024 * 1024 * 1024 * 1024));
assertThat(PB.toKB(1), equalTo(1024L * 1024 * 1024 * 1024));
assertThat(PB.toMB(1), equalTo(1024L * 1024 * 1024));
assertThat(PB.toGB(1), equalTo(1024L * 1024));
assertThat(PB.toTB(1), equalTo(1024L));
assertThat(PB.toPB(1), equalTo(1L));
}
}

View File

@ -32,15 +32,15 @@ import static org.hamcrest.Matchers.is;
*/
public class ByteSizeValueTests extends ESTestCase {
public void testActualPeta() {
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).bytes(), equalTo(4503599627370496l));
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.PB).bytes(), equalTo(4503599627370496L));
}
public void testActualTera() {
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).bytes(), equalTo(4398046511104l));
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.TB).bytes(), equalTo(4398046511104L));
}
public void testActual() {
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296l));
MatcherAssert.assertThat(new ByteSizeValue(4, ByteSizeUnit.GB).bytes(), equalTo(4294967296L));
}
public void testSimple() {

View File

@ -40,7 +40,7 @@ public class FuzzinessTests extends ESTestCase {
assertThat(Fuzziness.build(randomFrom(options)).asInt(), equalTo(1));
assertThat(Fuzziness.build(randomFrom(options)).asFloat(), equalTo(1f));
assertThat(Fuzziness.build(randomFrom(options)).asDouble(), equalTo(1d));
assertThat(Fuzziness.build(randomFrom(options)).asLong(), equalTo(1l));
assertThat(Fuzziness.build(randomFrom(options)).asLong(), equalTo(1L));
assertThat(Fuzziness.build(randomFrom(options)).asShort(), equalTo((short) 1));
}
@ -143,7 +143,7 @@ public class FuzzinessTests extends ESTestCase {
assertThat(Fuzziness.AUTO.asInt(), equalTo(1));
assertThat(Fuzziness.AUTO.asFloat(), equalTo(1f));
assertThat(Fuzziness.AUTO.asDouble(), equalTo(1d));
assertThat(Fuzziness.AUTO.asLong(), equalTo(1l));
assertThat(Fuzziness.AUTO.asLong(), equalTo(1L));
assertThat(Fuzziness.AUTO.asShort(), equalTo((short) 1));
assertThat(Fuzziness.AUTO.asTimeValue(), equalTo(TimeValue.parseTimeValue("1ms", TimeValue.timeValueMillis(1), "fuzziness")));

View File

@ -29,7 +29,7 @@ import static org.hamcrest.Matchers.is;
public class SizeValueTests extends ESTestCase {
public void testThatConversionWorks() {
SizeValue sizeValue = new SizeValue(1000);
assertThat(sizeValue.kilo(), is(1l));
assertThat(sizeValue.kilo(), is(1L));
assertThat(sizeValue.toString(), is("1k"));
sizeValue = new SizeValue(1000, SizeUnit.KILO);

View File

@ -60,7 +60,7 @@ public class TimeValueTests extends ESTestCase {
}
public void testMinusOne() {
assertThat(new TimeValue(-1).nanos(), lessThan(0l));
assertThat(new TimeValue(-1).nanos(), lessThan(0L));
}
public void testParseTimeValue() {

View File

@ -315,8 +315,8 @@ public class ObjectParserTests extends ESTestCase {
assertArrayEquals(parse.double_array_field.toArray(), Arrays.asList(2.1d).toArray());
assertEquals(parse.double_field, 2.1d, 0.0d);
assertArrayEquals(parse.long_array_field.toArray(), Arrays.asList(4l).toArray());
assertEquals(parse.long_field, 4l);
assertArrayEquals(parse.long_array_field.toArray(), Arrays.asList(4L).toArray());
assertEquals(parse.long_field, 4L);
assertArrayEquals(parse.string_array_field.toArray(), Arrays.asList("5").toArray());
assertEquals(parse.string_field, "5");

View File

@ -63,7 +63,7 @@ public class SimpleJodaTests extends ESTestCase {
DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC);
long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
}
public void testUpperBound() {
@ -79,20 +79,20 @@ public class SimpleJodaTests extends ESTestCase {
public void testIsoDateFormatDateOptionalTimeUTC() {
DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
long millis = formatter.parseMillis("1970-01-01T00:00:00Z");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
millis = formatter.parseMillis("1970-01-01T00:00:00.001Z");
assertThat(millis, equalTo(1l));
assertThat(millis, equalTo(1L));
millis = formatter.parseMillis("1970-01-01T00:00:00.1Z");
assertThat(millis, equalTo(100l));
assertThat(millis, equalTo(100L));
millis = formatter.parseMillis("1970-01-01T00:00:00.1");
assertThat(millis, equalTo(100l));
assertThat(millis, equalTo(100L));
millis = formatter.parseMillis("1970-01-01T00:00:00");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
millis = formatter.parseMillis("1970-01-01");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
millis = formatter.parseMillis("1970");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
try {
formatter.parseMillis("1970 kuku");
@ -109,15 +109,15 @@ public class SimpleJodaTests extends ESTestCase {
public void testIsoVsCustom() {
DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC);
long millis = formatter.parseMillis("1970-01-01T00:00:00");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
formatter = DateTimeFormat.forPattern("yyyy/MM/dd HH:mm:ss").withZone(DateTimeZone.UTC);
millis = formatter.parseMillis("1970/01/01 00:00:00");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
FormatDateTimeFormatter formatter2 = Joda.forPattern("yyyy/MM/dd HH:mm:ss");
millis = formatter2.parser().parseMillis("1970/01/01 00:00:00");
assertThat(millis, equalTo(0l));
assertThat(millis, equalTo(0L));
}
public void testWriteAndParse() {
@ -345,19 +345,19 @@ public class SimpleJodaTests extends ESTestCase {
public void testThatEpochParserIsIdempotent() {
FormatDateTimeFormatter formatter = Joda.forPattern("epoch_millis");
DateTime dateTime = formatter.parser().parseDateTime("1234567890123");
assertThat(dateTime.getMillis(), is(1234567890123l));
assertThat(dateTime.getMillis(), is(1234567890123L));
dateTime = formatter.printer().parseDateTime("1234567890456");
assertThat(dateTime.getMillis(), is(1234567890456l));
assertThat(dateTime.getMillis(), is(1234567890456L));
dateTime = formatter.parser().parseDateTime("1234567890789");
assertThat(dateTime.getMillis(), is(1234567890789l));
assertThat(dateTime.getMillis(), is(1234567890789L));
FormatDateTimeFormatter secondsFormatter = Joda.forPattern("epoch_second");
DateTime secondsDateTime = secondsFormatter.parser().parseDateTime("1234567890");
assertThat(secondsDateTime.getMillis(), is(1234567890000l));
assertThat(secondsDateTime.getMillis(), is(1234567890000L));
secondsDateTime = secondsFormatter.printer().parseDateTime("1234567890");
assertThat(secondsDateTime.getMillis(), is(1234567890000l));
assertThat(secondsDateTime.getMillis(), is(1234567890000L));
secondsDateTime = secondsFormatter.parser().parseDateTime("1234567890");
assertThat(secondsDateTime.getMillis(), is(1234567890000l));
assertThat(secondsDateTime.getMillis(), is(1234567890000L));
}
public void testThatDefaultFormatterChecksForCorrectYearLength() throws Exception {

View File

@ -469,7 +469,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
int shard = MathUtils.mod(Murmur3HashFunction.hash(id), numPrimaries);
logger.trace("[{}] indexing id [{}] through node [{}] targeting shard [{}]", name, id, node, shard);
IndexResponse response = client.prepareIndex("test", "type", id).setSource("{}").setTimeout("1s").get();
assertThat(response.getVersion(), equalTo(1l));
assertThat(response.getVersion(), equalTo(1L));
ackedDocs.put(id, node);
logger.trace("[{}] indexed id [{}] through node [{}]", name, id, node);
} catch (ElasticsearchException e) {
@ -728,14 +728,14 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
IndexResponse indexResponse = internalCluster().client(notIsolatedNode).prepareIndex("test", "type").setSource("field", "value").get();
assertThat(indexResponse.getVersion(), equalTo(1l));
assertThat(indexResponse.getVersion(), equalTo(1L));
logger.info("Verifying if document exists via node[" + notIsolatedNode + "]");
GetResponse getResponse = internalCluster().client(notIsolatedNode).prepareGet("test", "type", indexResponse.getId())
.setPreference("_local")
.get();
assertThat(getResponse.isExists(), is(true));
assertThat(getResponse.getVersion(), equalTo(1l));
assertThat(getResponse.getVersion(), equalTo(1L));
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
scheme.stopDisrupting();
@ -749,7 +749,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
.setPreference("_local")
.get();
assertThat(getResponse.isExists(), is(true));
assertThat(getResponse.getVersion(), equalTo(1l));
assertThat(getResponse.getVersion(), equalTo(1L));
assertThat(getResponse.getId(), equalTo(indexResponse.getId()));
}
}
@ -1049,7 +1049,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
// wait for relocation to finish
endRelocationLatch.await();
// now search for the documents and see if we get a reply
assertThat(client().prepareSearch().setSize(0).get().getHits().totalHits(), equalTo(100l));
assertThat(client().prepareSearch().setSize(0).get().getHits().totalHits(), equalTo(100L));
}
public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception {

View File

@ -157,14 +157,14 @@ public class DocumentActionsIT extends ESIntegTestCase {
// test successful
SearchResponse countResponse = client().prepareSearch("test").setSize(0).setQuery(termQuery("_type", "type1")).execute().actionGet();
assertNoFailures(countResponse);
assertThat(countResponse.getHits().totalHits(), equalTo(2l));
assertThat(countResponse.getHits().totalHits(), equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
// count with no query is a match all one
countResponse = client().prepareSearch("test").setSize(0).execute().actionGet();
assertThat("Failures " + countResponse.getShardFailures(), countResponse.getShardFailures() == null ? 0 : countResponse.getShardFailures().length, equalTo(0));
assertThat(countResponse.getHits().totalHits(), equalTo(2l));
assertThat(countResponse.getHits().totalHits(), equalTo(2L));
assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries));
assertThat(countResponse.getFailedShards(), equalTo(0));
}

View File

@ -144,32 +144,32 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
// default:
FieldStatsResponse response = client().prepareFieldStats().setFields("value").get();
assertAllSuccessful(response);
assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(-10l));
assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(300l));
assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(-10L));
assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(300L));
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(-10l));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(300l));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(-10L));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(300L));
// Level: cluster
response = client().prepareFieldStats().setFields("value").setLevel("cluster").get();
assertAllSuccessful(response);
assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(-10l));
assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(300l));
assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(-10L));
assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(300L));
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(-10l));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(300l));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(-10L));
assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(300L));
// Level: indices
response = client().prepareFieldStats().setFields("value").setLevel("indices").get();
assertAllSuccessful(response);
assertThat(response.getAllFieldStats(), nullValue());
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(3));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(-10l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(100l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(-10L));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(100L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200L));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201L));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300L));
// Illegal level option:
try {
@ -189,8 +189,8 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
));
ensureGreen("test1", "test2");
client().prepareIndex("test1", "test").setSource("value", 1l).get();
client().prepareIndex("test1", "test").setSource("value", 2l).get();
client().prepareIndex("test1", "test").setSource("value", 1L).get();
client().prepareIndex("test1", "test").setSource("value", 2L).get();
client().prepareIndex("test2", "test").setSource("value", "a").get();
client().prepareIndex("test2", "test").setSource("value", "b").get();
refresh();
@ -205,8 +205,8 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
FieldStatsResponse response = client().prepareFieldStats().setFields("value").setLevel("indices").get();
assertAllSuccessful(response);
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(2l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(2L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(new BytesRef("a")));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(new BytesRef("b")));
}
@ -235,8 +235,8 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
assertAllSuccessful(response);
assertThat(response.getAllFieldStats(), nullValue());
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201L));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300L));
response = client().prepareFieldStats()
.setFields("value")
@ -246,10 +246,10 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
assertAllSuccessful(response);
assertThat(response.getAllFieldStats(), nullValue());
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(-10l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(100l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(-10L));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(100L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200L));
response = client().prepareFieldStats()
.setFields("value")
@ -259,10 +259,10 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
assertAllSuccessful(response);
assertThat(response.getAllFieldStats(), nullValue());
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200L));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201L));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300L));
response = client().prepareFieldStats()
.setFields("value")
@ -290,8 +290,8 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
assertAllSuccessful(response);
assertThat(response.getAllFieldStats(), nullValue());
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(101L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(200L));
response = client().prepareFieldStats()
.setFields("value")
@ -301,8 +301,8 @@ public class FieldStatsIntegrationIT extends ESIntegTestCase {
assertAllSuccessful(response);
assertThat(response.getAllFieldStats(), nullValue());
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300l));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(201L));
assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(300L));
}
public void testIncompatibleFilter() throws Exception {

View File

@ -66,9 +66,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
}
public void testLong() {
testNumberRange("field1", "long", 312321312312412l, 312321312312422l);
testNumberRange("field1", "long", 312321312312412L, 312321312312422L);
testNumberRange("field1", "long", -5, 5);
testNumberRange("field1", "long", -312321312312422l, -312321312312412l);
testNumberRange("field1", "long", -312321312312422L, -312321312312412L);
}
public void testString() {
@ -79,8 +79,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
client().admin().indices().prepareRefresh().get();
FieldStatsResponse result = client().prepareFieldStats().setFields("field").get();
assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11l));
assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11l));
assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11L));
assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11L));
assertThat(result.getAllFieldStats().get("field").getDensity(), equalTo(100));
assertThat(result.getAllFieldStats().get("field").getMinValue(), equalTo(new BytesRef(String.format(Locale.ENGLISH, "%03d", 0))));
assertThat(result.getAllFieldStats().get("field").getMaxValue(), equalTo(new BytesRef(String.format(Locale.ENGLISH, "%03d", 10))));
@ -97,8 +97,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
client().admin().indices().prepareRefresh().get();
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l));
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l));
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1d));
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9d));
@ -114,8 +114,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
client().admin().indices().prepareRefresh().get();
FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get();
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l));
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l));
assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11L));
assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11L));
assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100));
assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(-1f));
assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(9f));
@ -144,44 +144,44 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
public void testMerge() {
List<FieldStats> stats = new ArrayList<>();
stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L));
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L));
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L));
FieldStats stat = new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l);
FieldStats stat = new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L);
for (FieldStats otherStat : stats) {
stat.append(otherStat);
}
assertThat(stat.getMaxDoc(), equalTo(4l));
assertThat(stat.getDocCount(), equalTo(4l));
assertThat(stat.getSumDocFreq(), equalTo(4l));
assertThat(stat.getSumTotalTermFreq(), equalTo(4l));
assertThat(stat.getMaxDoc(), equalTo(4L));
assertThat(stat.getDocCount(), equalTo(4L));
assertThat(stat.getSumDocFreq(), equalTo(4L));
assertThat(stat.getSumTotalTermFreq(), equalTo(4L));
}
public void testMerge_notAvailable() {
List<FieldStats> stats = new ArrayList<>();
stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l));
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L));
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L));
stats.add(new FieldStats.Long(1, 1L, 1L, 1L, 1L, 1L));
FieldStats stat = new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l);
FieldStats stat = new FieldStats.Long(1, -1L, -1L, -1L, 1L, 1L);
for (FieldStats otherStat : stats) {
stat.append(otherStat);
}
assertThat(stat.getMaxDoc(), equalTo(4l));
assertThat(stat.getDocCount(), equalTo(-1l));
assertThat(stat.getSumDocFreq(), equalTo(-1l));
assertThat(stat.getSumTotalTermFreq(), equalTo(-1l));
assertThat(stat.getMaxDoc(), equalTo(4L));
assertThat(stat.getDocCount(), equalTo(-1L));
assertThat(stat.getSumDocFreq(), equalTo(-1L));
assertThat(stat.getSumTotalTermFreq(), equalTo(-1L));
stats.add(new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l));
stats.add(new FieldStats.Long(1, -1L, -1L, -1L, 1L, 1L));
stat = stats.remove(0);
for (FieldStats otherStat : stats) {
stat.append(otherStat);
}
assertThat(stat.getMaxDoc(), equalTo(4l));
assertThat(stat.getDocCount(), equalTo(-1l));
assertThat(stat.getSumDocFreq(), equalTo(-1l));
assertThat(stat.getSumTotalTermFreq(), equalTo(-1l));
assertThat(stat.getMaxDoc(), equalTo(4L));
assertThat(stat.getDocCount(), equalTo(-1L));
assertThat(stat.getSumDocFreq(), equalTo(-1L));
assertThat(stat.getSumTotalTermFreq(), equalTo(-1L));
}
public void testInvalidField() {
@ -213,9 +213,9 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
public void testNumberFiltering() {
createIndex("test1", Settings.EMPTY, "type", "value", "type=long");
client().prepareIndex("test1", "test").setSource("value", 1l).get();
client().prepareIndex("test1", "test").setSource("value", 1L).get();
createIndex("test2", Settings.EMPTY, "type", "value", "type=long");
client().prepareIndex("test2", "test").setSource("value", 3l).get();
client().prepareIndex("test2", "test").setSource("value", 3L).get();
client().admin().indices().prepareRefresh().get();
FieldStatsResponse response = client().prepareFieldStats()
@ -223,8 +223,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
.setLevel("indices")
.get();
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
response = client().prepareFieldStats()
.setFields("value")
@ -246,7 +246,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
.setLevel("indices")
.get();
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
response = client().prepareFieldStats()
.setFields("value")
@ -254,7 +254,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
.setLevel("indices")
.get();
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
response = client().prepareFieldStats()
.setFields("value")
@ -269,7 +269,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
.setLevel("indices")
.get();
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
response = client().prepareFieldStats()
.setFields("value")
@ -277,7 +277,7 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
.setLevel("indices")
.get();
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
response = client().prepareFieldStats()
.setFields("value")
@ -292,8 +292,8 @@ public class FieldStatsTests extends ESSingleNodeTestCase {
.setLevel("indices")
.get();
assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1l));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3l));
assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(1L));
assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(3L));
response = client().prepareFieldStats()
.setFields("value")

View File

@ -232,7 +232,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
logger.info("--> verify 1 doc in the index");
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
}
logger.info("--> closing test index...");
@ -250,9 +250,9 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
assertThat(health.isTimedOut(), equalTo(false));
logger.info("--> verify 1 doc in the index");
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
}
}
@ -268,7 +268,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
logger.info("--> verify 1 doc in the index");
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
}
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));
@ -328,7 +328,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
logger.info("--> verify 1 doc in the index");
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1l);
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 1L);
}
assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(true));

Some files were not shown because too many files have changed in this diff Show More