Merge branch 'master' into feature/query-refactoring
Conflicts: core/src/main/java/org/elasticsearch/transport/local/LocalTransport.java
This commit is contained in:
commit
b763265f67
|
@ -122,6 +122,7 @@ final class Security {
|
||||||
addPath(policy, environment.libFile(), "read,readlink");
|
addPath(policy, environment.libFile(), "read,readlink");
|
||||||
addPath(policy, environment.pluginsFile(), "read,readlink");
|
addPath(policy, environment.pluginsFile(), "read,readlink");
|
||||||
addPath(policy, environment.configFile(), "read,readlink");
|
addPath(policy, environment.configFile(), "read,readlink");
|
||||||
|
addPath(policy, environment.scriptsFile(), "read,readlink");
|
||||||
// read-write dirs
|
// read-write dirs
|
||||||
addPath(policy, environment.tmpFile(), "read,readlink,write,delete");
|
addPath(policy, environment.tmpFile(), "read,readlink,write,delete");
|
||||||
addPath(policy, environment.logsFile(), "read,readlink,write,delete");
|
addPath(policy, environment.logsFile(), "read,readlink,write,delete");
|
||||||
|
|
|
@ -19,9 +19,9 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster;
|
package org.elasticsearch.cluster;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -30,10 +30,16 @@ import java.util.Map;
|
||||||
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
|
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
|
||||||
* for the key used in the shardSizes map
|
* for the key used in the shardSizes map
|
||||||
*/
|
*/
|
||||||
public class ClusterInfo {
|
public final class ClusterInfo {
|
||||||
|
|
||||||
private final Map<String, DiskUsage> usages;
|
private final Map<String, DiskUsage> usages;
|
||||||
final Map<String, Long> shardSizes;
|
final Map<String, Long> shardSizes;
|
||||||
|
public static final ClusterInfo EMPTY = new ClusterInfo();
|
||||||
|
|
||||||
|
private ClusterInfo() {
|
||||||
|
this.usages = Collections.emptyMap();
|
||||||
|
this.shardSizes = Collections.emptyMap();
|
||||||
|
}
|
||||||
|
|
||||||
public ClusterInfo(Map<String, DiskUsage> usages, Map<String, Long> shardSizes) {
|
public ClusterInfo(Map<String, DiskUsage> usages, Map<String, Long> shardSizes) {
|
||||||
this.usages = usages;
|
this.usages = usages;
|
||||||
|
|
|
@ -25,8 +25,6 @@ package org.elasticsearch.cluster;
|
||||||
*/
|
*/
|
||||||
public interface ClusterInfoService {
|
public interface ClusterInfoService {
|
||||||
|
|
||||||
public static ClusterInfoService EMPTY = EmptyClusterInfoService.getInstance();
|
|
||||||
|
|
||||||
/** The latest cluster information */
|
/** The latest cluster information */
|
||||||
public ClusterInfo getClusterInfo();
|
public ClusterInfo getClusterInfo();
|
||||||
|
|
||||||
|
|
|
@ -27,24 +27,15 @@ import org.elasticsearch.common.settings.Settings;
|
||||||
* ClusterInfoService that provides empty maps for disk usage and shard sizes
|
* ClusterInfoService that provides empty maps for disk usage and shard sizes
|
||||||
*/
|
*/
|
||||||
public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService {
|
public class EmptyClusterInfoService extends AbstractComponent implements ClusterInfoService {
|
||||||
|
public final static EmptyClusterInfoService INSTANCE = new EmptyClusterInfoService();
|
||||||
private final static class Holder {
|
|
||||||
private final static EmptyClusterInfoService instance = new EmptyClusterInfoService();
|
|
||||||
}
|
|
||||||
private final ClusterInfo emptyClusterInfo;
|
|
||||||
|
|
||||||
private EmptyClusterInfoService() {
|
private EmptyClusterInfoService() {
|
||||||
super(Settings.EMPTY);
|
super(Settings.EMPTY);
|
||||||
emptyClusterInfo = new ClusterInfo(ImmutableMap.<String, DiskUsage>of(), ImmutableMap.<String, Long>of());
|
|
||||||
}
|
|
||||||
|
|
||||||
public static EmptyClusterInfoService getInstance() {
|
|
||||||
return Holder.instance;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ClusterInfo getClusterInfo() {
|
public ClusterInfo getClusterInfo() {
|
||||||
return emptyClusterInfo;
|
return ClusterInfo.EMPTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster;
|
package org.elasticsearch.cluster;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.LatchedActionListener;
|
import org.elasticsearch.action.LatchedActionListener;
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||||
|
@ -32,7 +31,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||||
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
|
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
|
||||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
|
@ -66,8 +64,8 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
|
|
||||||
private volatile TimeValue updateFrequency;
|
private volatile TimeValue updateFrequency;
|
||||||
|
|
||||||
private volatile ImmutableMap<String, DiskUsage> usages;
|
private volatile Map<String, DiskUsage> usages;
|
||||||
private volatile ImmutableMap<String, Long> shardSizes;
|
private volatile Map<String, Long> shardSizes;
|
||||||
private volatile boolean isMaster = false;
|
private volatile boolean isMaster = false;
|
||||||
private volatile boolean enabled;
|
private volatile boolean enabled;
|
||||||
private volatile TimeValue fetchTimeout;
|
private volatile TimeValue fetchTimeout;
|
||||||
|
@ -83,8 +81,8 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
|
TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
|
||||||
ThreadPool threadPool) {
|
ThreadPool threadPool) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.usages = ImmutableMap.of();
|
this.usages = Collections.emptyMap();
|
||||||
this.shardSizes = ImmutableMap.of();
|
this.shardSizes = Collections.emptyMap();
|
||||||
this.transportNodesStatsAction = transportNodesStatsAction;
|
this.transportNodesStatsAction = transportNodesStatsAction;
|
||||||
this.transportIndicesStatsAction = transportIndicesStatsAction;
|
this.transportIndicesStatsAction = transportIndicesStatsAction;
|
||||||
this.clusterService = clusterService;
|
this.clusterService = clusterService;
|
||||||
|
@ -201,7 +199,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
}
|
}
|
||||||
Map<String, DiskUsage> newUsages = new HashMap<>(usages);
|
Map<String, DiskUsage> newUsages = new HashMap<>(usages);
|
||||||
newUsages.remove(removedNode.getId());
|
newUsages.remove(removedNode.getId());
|
||||||
usages = ImmutableMap.copyOf(newUsages);
|
usages = Collections.unmodifiableMap(newUsages);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -332,7 +330,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
newUsages.put(nodeId, new DiskUsage(nodeId, nodeName, total, available));
|
newUsages.put(nodeId, new DiskUsage(nodeId, nodeName, total, available));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
usages = ImmutableMap.copyOf(newUsages);
|
usages = Collections.unmodifiableMap(newUsages);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -348,7 +346,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
logger.warn("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
|
logger.warn("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
|
||||||
}
|
}
|
||||||
// we empty the usages list, to be safe - we don't know what's going on.
|
// we empty the usages list, to be safe - we don't know what's going on.
|
||||||
usages = ImmutableMap.of();
|
usages = Collections.emptyMap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -366,7 +364,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
}
|
}
|
||||||
newShardSizes.put(sid, size);
|
newShardSizes.put(sid, size);
|
||||||
}
|
}
|
||||||
shardSizes = ImmutableMap.copyOf(newShardSizes);
|
shardSizes = Collections.unmodifiableMap(newShardSizes);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -382,7 +380,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||||
logger.warn("Failed to execute IndicesStatsAction for ClusterInfoUpdateJob", e);
|
logger.warn("Failed to execute IndicesStatsAction for ClusterInfoUpdateJob", e);
|
||||||
}
|
}
|
||||||
// we empty the usages list, to be safe - we don't know what's going on.
|
// we empty the usages list, to be safe - we don't know what's going on.
|
||||||
shardSizes = ImmutableMap.of();
|
shardSizes = Collections.emptyMap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
import org.elasticsearch.cluster.DiskUsage;
|
import org.elasticsearch.cluster.DiskUsage;
|
||||||
|
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||||
|
@ -226,7 +227,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
// It's okay the Client is null here, because the empty cluster info
|
// It's okay the Client is null here, because the empty cluster info
|
||||||
// service will never actually call the listener where the client is
|
// service will never actually call the listener where the client is
|
||||||
// needed. Also this constructor is only used for tests
|
// needed. Also this constructor is only used for tests
|
||||||
this(settings, new NodeSettingsService(settings), ClusterInfoService.EMPTY, null);
|
this(settings, new NodeSettingsService(settings), EmptyClusterInfoService.INSTANCE, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
|
@ -312,7 +313,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
|
* If subtractShardsMovingAway is set then the size of shards moving away is subtracted from the total size
|
||||||
* of all shards
|
* of all shards
|
||||||
*/
|
*/
|
||||||
public long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo, boolean subtractShardsMovingAway) {
|
public static long sizeOfRelocatingShards(RoutingNode node, ClusterInfo clusterInfo, boolean subtractShardsMovingAway) {
|
||||||
long totalSize = 0;
|
long totalSize = 0;
|
||||||
for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) {
|
for (ShardRouting routing : node.shardsWithState(ShardRoutingState.RELOCATING, ShardRoutingState.INITIALIZING)) {
|
||||||
if (routing.initializing() && routing.relocatingNodeId() != null) {
|
if (routing.initializing() && routing.relocatingNodeId() != null) {
|
||||||
|
@ -324,7 +325,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
return totalSize;
|
return totalSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getShardSize(ShardRouting routing, ClusterInfo clusterInfo) {
|
static long getShardSize(ShardRouting routing, ClusterInfo clusterInfo) {
|
||||||
Long shardSize = clusterInfo.getShardSize(routing);
|
Long shardSize = clusterInfo.getShardSize(routing);
|
||||||
return shardSize == null ? 0 : shardSize;
|
return shardSize == null ? 0 : shardSize;
|
||||||
}
|
}
|
||||||
|
@ -419,8 +420,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Secondly, check that allocating the shard to this node doesn't put it above the high watermark
|
// Secondly, check that allocating the shard to this node doesn't put it above the high watermark
|
||||||
Long shardSize = allocation.clusterInfo().getShardSize(shardRouting);
|
final long shardSize = getShardSize(shardRouting, allocation.clusterInfo());
|
||||||
shardSize = shardSize == null ? 0 : shardSize;
|
|
||||||
double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize);
|
double freeSpaceAfterShard = freeDiskPercentageAfterShardAssigned(usage, shardSize);
|
||||||
long freeBytesAfterShard = freeBytes - shardSize;
|
long freeBytesAfterShard = freeBytes - shardSize;
|
||||||
if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) {
|
if (freeBytesAfterShard < freeBytesThresholdHigh.bytes()) {
|
||||||
|
|
|
@ -154,7 +154,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
||||||
protected void doStart() {
|
protected void doStart() {
|
||||||
add(localNodeMasterListeners);
|
add(localNodeMasterListeners);
|
||||||
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
||||||
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(daemonThreadFactory(settings, UPDATE_THREAD_NAME));
|
this.updateTasksExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME));
|
||||||
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
|
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
|
||||||
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
||||||
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
||||||
|
|
|
@ -19,12 +19,13 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.http.client;
|
package org.elasticsearch.common.http.client;
|
||||||
|
|
||||||
|
import com.google.common.base.Charsets;
|
||||||
|
import com.google.common.base.Strings;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.common.Base64;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.SuppressForbidden;
|
|
||||||
import org.elasticsearch.common.cli.Terminal;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.*;
|
||||||
|
@ -266,6 +267,17 @@ public class HttpDownloadHelper {
|
||||||
connection.setIfModifiedSince(timestamp);
|
connection.setIfModifiedSince(timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// in case the plugin manager is its own project, this can become an authenticator
|
||||||
|
boolean isSecureProcotol = "https".equalsIgnoreCase(aSource.getProtocol());
|
||||||
|
boolean isAuthInfoSet = !Strings.isNullOrEmpty(aSource.getUserInfo());
|
||||||
|
if (isAuthInfoSet) {
|
||||||
|
if (!isSecureProcotol) {
|
||||||
|
throw new IOException("Basic auth is only supported for HTTPS!");
|
||||||
|
}
|
||||||
|
String basicAuth = Base64.encodeBytes(aSource.getUserInfo().getBytes(Charsets.UTF_8));
|
||||||
|
connection.setRequestProperty("Authorization", "Basic " + basicAuth);
|
||||||
|
}
|
||||||
|
|
||||||
if (connection instanceof HttpURLConnection) {
|
if (connection instanceof HttpURLConnection) {
|
||||||
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
|
((HttpURLConnection) connection).setInstanceFollowRedirects(false);
|
||||||
((HttpURLConnection) connection).setUseCaches(true);
|
((HttpURLConnection) connection).setUseCaches(true);
|
||||||
|
|
|
@ -27,9 +27,7 @@ import java.util.concurrent.ThreadPoolExecutor;
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class EsAbortPolicy implements XRejectedExecutionHandler {
|
public class EsAbortPolicy implements XRejectedExecutionHandler {
|
||||||
|
|
||||||
private final CounterMetric rejected = new CounterMetric();
|
private final CounterMetric rejected = new CounterMetric();
|
||||||
public static final String SHUTTING_DOWN_KEY = "(shutting down)";
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
|
public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
|
||||||
|
@ -49,16 +47,7 @@ public class EsAbortPolicy implements XRejectedExecutionHandler {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rejected.inc();
|
rejected.inc();
|
||||||
StringBuilder sb = new StringBuilder("rejected execution ");
|
throw new EsRejectedExecutionException("rejected execution of " + r + " on " + executor, executor.isShutdown());
|
||||||
if (executor.isShutdown()) {
|
|
||||||
sb.append(SHUTTING_DOWN_KEY + " ");
|
|
||||||
} else {
|
|
||||||
if (executor.getQueue() instanceof SizeBlockingQueue) {
|
|
||||||
sb.append("(queue capacity ").append(((SizeBlockingQueue) executor.getQueue()).capacity()).append(") ");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sb.append("on ").append(r.toString());
|
|
||||||
throw new EsRejectedExecutionException(sb.toString());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -54,30 +54,30 @@ public class EsExecutors {
|
||||||
return settings.getAsInt(PROCESSORS, defaultValue);
|
return settings.getAsInt(PROCESSORS, defaultValue);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(ThreadFactory threadFactory) {
|
public static PrioritizedEsThreadPoolExecutor newSinglePrioritizing(String name, ThreadFactory threadFactory) {
|
||||||
return new PrioritizedEsThreadPoolExecutor(1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory);
|
return new PrioritizedEsThreadPoolExecutor(name, 1, 1, 0L, TimeUnit.MILLISECONDS, threadFactory);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static EsThreadPoolExecutor newScaling(int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
|
public static EsThreadPoolExecutor newScaling(String name, int min, int max, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
|
||||||
ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>();
|
ExecutorScalingQueue<Runnable> queue = new ExecutorScalingQueue<>();
|
||||||
// we force the execution, since we might run into concurrency issues in offer for ScalingBlockingQueue
|
// we force the execution, since we might run into concurrency issues in offer for ScalingBlockingQueue
|
||||||
EsThreadPoolExecutor executor = new EsThreadPoolExecutor(min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy());
|
EsThreadPoolExecutor executor = new EsThreadPoolExecutor(name, min, max, keepAliveTime, unit, queue, threadFactory, new ForceQueuePolicy());
|
||||||
queue.executor = executor;
|
queue.executor = executor;
|
||||||
return executor;
|
return executor;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static EsThreadPoolExecutor newCached(long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
|
public static EsThreadPoolExecutor newCached(String name, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
|
||||||
return new EsThreadPoolExecutor(0, Integer.MAX_VALUE, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new EsAbortPolicy());
|
return new EsThreadPoolExecutor(name, 0, Integer.MAX_VALUE, keepAliveTime, unit, new SynchronousQueue<Runnable>(), threadFactory, new EsAbortPolicy());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static EsThreadPoolExecutor newFixed(int size, int queueCapacity, ThreadFactory threadFactory) {
|
public static EsThreadPoolExecutor newFixed(String name, int size, int queueCapacity, ThreadFactory threadFactory) {
|
||||||
BlockingQueue<Runnable> queue;
|
BlockingQueue<Runnable> queue;
|
||||||
if (queueCapacity < 0) {
|
if (queueCapacity < 0) {
|
||||||
queue = ConcurrentCollections.newBlockingQueue();
|
queue = ConcurrentCollections.newBlockingQueue();
|
||||||
} else {
|
} else {
|
||||||
queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);
|
queue = new SizeBlockingQueue<>(ConcurrentCollections.<Runnable>newBlockingQueue(), queueCapacity);
|
||||||
}
|
}
|
||||||
return new EsThreadPoolExecutor(size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy());
|
return new EsThreadPoolExecutor(name, size, size, 0, TimeUnit.MILLISECONDS, queue, threadFactory, new EsAbortPolicy());
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String threadName(Settings settings, String ... names) {
|
public static String threadName(Settings settings, String ... names) {
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.util.concurrent;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -28,17 +29,25 @@ import java.io.IOException;
|
||||||
/**
|
/**
|
||||||
*/
|
*/
|
||||||
public class EsRejectedExecutionException extends ElasticsearchException {
|
public class EsRejectedExecutionException extends ElasticsearchException {
|
||||||
|
private final boolean isExecutorShutdown;
|
||||||
|
|
||||||
|
public EsRejectedExecutionException(String message, boolean isExecutorShutdown) {
|
||||||
|
super(message);
|
||||||
|
this.isExecutorShutdown = isExecutorShutdown;
|
||||||
|
}
|
||||||
|
|
||||||
public EsRejectedExecutionException(String message) {
|
public EsRejectedExecutionException(String message) {
|
||||||
super(message);
|
this(message, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
public EsRejectedExecutionException() {
|
public EsRejectedExecutionException() {
|
||||||
super((String)null);
|
super((String)null);
|
||||||
|
this.isExecutorShutdown = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public EsRejectedExecutionException(Throwable e) {
|
public EsRejectedExecutionException(Throwable e) {
|
||||||
super(null, e);
|
super(null, e);
|
||||||
|
this.isExecutorShutdown = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -48,5 +57,24 @@ public class EsRejectedExecutionException extends ElasticsearchException {
|
||||||
|
|
||||||
public EsRejectedExecutionException(StreamInput in) throws IOException{
|
public EsRejectedExecutionException(StreamInput in) throws IOException{
|
||||||
super(in);
|
super(in);
|
||||||
|
isExecutorShutdown = in.readBoolean();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
super.writeTo(out);
|
||||||
|
out.writeBoolean(isExecutorShutdown);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the thread pool that rejected the execution was terminated
|
||||||
|
* shortly after the rejection. Its possible that this returns false and the
|
||||||
|
* thread pool has since been terminated but if this returns false then the
|
||||||
|
* termination wasn't a factor in this rejection. Conversely if this returns
|
||||||
|
* true the shutdown was probably a factor in this rejection but might have
|
||||||
|
* been triggered just after the action rejection.
|
||||||
|
*/
|
||||||
|
public boolean isExecutorShutdown() {
|
||||||
|
return isExecutorShutdown;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,13 +33,18 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
|
||||||
private volatile ShutdownListener listener;
|
private volatile ShutdownListener listener;
|
||||||
|
|
||||||
private final Object monitor = new Object();
|
private final Object monitor = new Object();
|
||||||
|
/**
|
||||||
|
* Name used in error reporting.
|
||||||
|
*/
|
||||||
|
private final String name;
|
||||||
|
|
||||||
EsThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
|
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
|
||||||
this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new EsAbortPolicy());
|
this(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, new EsAbortPolicy());
|
||||||
}
|
}
|
||||||
|
|
||||||
EsThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler) {
|
EsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory, XRejectedExecutionHandler handler) {
|
||||||
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
|
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue, threadFactory, handler);
|
||||||
|
this.name = name;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void shutdown(ShutdownListener listener) {
|
public void shutdown(ShutdownListener listener) {
|
||||||
|
@ -93,4 +98,22 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder b = new StringBuilder();
|
||||||
|
b.append(getClass().getSimpleName()).append('[');
|
||||||
|
b.append(name).append(", ");
|
||||||
|
if (getQueue() instanceof SizeBlockingQueue) {
|
||||||
|
@SuppressWarnings("rawtypes")
|
||||||
|
SizeBlockingQueue queue = (SizeBlockingQueue) getQueue();
|
||||||
|
b.append("queue capacity = ").append(queue.capacity()).append(", ");
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* ThreadPoolExecutor has some nice information in its toString but we
|
||||||
|
* can't get at it easily without just getting the toString.
|
||||||
|
*/
|
||||||
|
b.append(super.toString()).append(']');
|
||||||
|
return b.toString();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,8 +41,8 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor {
|
||||||
private AtomicLong insertionOrder = new AtomicLong();
|
private AtomicLong insertionOrder = new AtomicLong();
|
||||||
private Queue<Runnable> current = ConcurrentCollections.newQueue();
|
private Queue<Runnable> current = ConcurrentCollections.newQueue();
|
||||||
|
|
||||||
PrioritizedEsThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
|
PrioritizedEsThreadPoolExecutor(String name, int corePoolSize, int maximumPoolSize, long keepAliveTime, TimeUnit unit, ThreadFactory threadFactory) {
|
||||||
super(corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<Runnable>(), threadFactory);
|
super(name, corePoolSize, maximumPoolSize, keepAliveTime, unit, new PriorityBlockingQueue<Runnable>(), threadFactory);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Pending[] getPending() {
|
public Pending[] getPending() {
|
||||||
|
|
|
@ -136,7 +136,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
|
||||||
transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest.class, ThreadPool.Names.SAME, new UnicastPingRequestHandler());
|
transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest.class, ThreadPool.Names.SAME, new UnicastPingRequestHandler());
|
||||||
|
|
||||||
ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
|
ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]");
|
||||||
unicastConnectExecutor = EsExecutors.newScaling(0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory);
|
unicastConnectExecutor = EsExecutors.newScaling("unicast_connect", 0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -53,6 +53,8 @@ public class Environment {
|
||||||
|
|
||||||
private final Path configFile;
|
private final Path configFile;
|
||||||
|
|
||||||
|
private final Path scriptsFile;
|
||||||
|
|
||||||
private final Path pluginsFile;
|
private final Path pluginsFile;
|
||||||
|
|
||||||
/** location of bin/, used by plugin manager */
|
/** location of bin/, used by plugin manager */
|
||||||
|
@ -100,6 +102,12 @@ public class Environment {
|
||||||
configFile = homeFile.resolve("config");
|
configFile = homeFile.resolve("config");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (settings.get("path.scripts") != null) {
|
||||||
|
scriptsFile = PathUtils.get(cleanPath(settings.get("path.scripts")));
|
||||||
|
} else {
|
||||||
|
scriptsFile = configFile.resolve("scripts");
|
||||||
|
}
|
||||||
|
|
||||||
if (settings.get("path.plugins") != null) {
|
if (settings.get("path.plugins") != null) {
|
||||||
pluginsFile = PathUtils.get(cleanPath(settings.get("path.plugins")));
|
pluginsFile = PathUtils.get(cleanPath(settings.get("path.plugins")));
|
||||||
} else {
|
} else {
|
||||||
|
@ -233,6 +241,13 @@ public class Environment {
|
||||||
return configFile;
|
return configFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Location of on-disk scripts
|
||||||
|
*/
|
||||||
|
public Path scriptsFile() {
|
||||||
|
return scriptsFile;
|
||||||
|
}
|
||||||
|
|
||||||
public Path pluginsFile() {
|
public Path pluginsFile() {
|
||||||
return pluginsFile;
|
return pluginsFile;
|
||||||
}
|
}
|
||||||
|
|
|
@ -135,7 +135,7 @@ public final class EngineConfig {
|
||||||
|
|
||||||
private static final String DEFAULT_CODEC_NAME = "default";
|
private static final String DEFAULT_CODEC_NAME = "default";
|
||||||
private TranslogConfig translogConfig;
|
private TranslogConfig translogConfig;
|
||||||
|
private boolean create = false;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
|
* Creates a new {@link org.elasticsearch.index.engine.EngineConfig}
|
||||||
|
@ -433,4 +433,20 @@ public final class EngineConfig {
|
||||||
public TranslogConfig getTranslogConfig() {
|
public TranslogConfig getTranslogConfig() {
|
||||||
return translogConfig;
|
return translogConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Iff set to <code>true</code> the engine will create a new lucene index when opening the engine.
|
||||||
|
* Otherwise the lucene index writer is opened in append mode. The default is <code>false</code>
|
||||||
|
*/
|
||||||
|
public void setCreate(boolean create) {
|
||||||
|
this.create = create;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Iff <code>true</code> the engine should create a new lucene index when opening the engine.
|
||||||
|
* Otherwise the lucene index writer should be opened in append mode. The default is <code>false</code>
|
||||||
|
*/
|
||||||
|
public boolean isCreate() {
|
||||||
|
return create;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -118,14 +118,11 @@ public class InternalEngine extends Engine {
|
||||||
for (int i = 0; i < dirtyLocks.length; i++) {
|
for (int i = 0; i < dirtyLocks.length; i++) {
|
||||||
dirtyLocks[i] = new Object();
|
dirtyLocks[i] = new Object();
|
||||||
}
|
}
|
||||||
|
|
||||||
throttle = new IndexThrottle();
|
throttle = new IndexThrottle();
|
||||||
this.searcherFactory = new SearchFactory(logger, isClosed, engineConfig);
|
this.searcherFactory = new SearchFactory(logger, isClosed, engineConfig);
|
||||||
final Translog.TranslogGeneration translogGeneration;
|
final Translog.TranslogGeneration translogGeneration;
|
||||||
try {
|
try {
|
||||||
// TODO: would be better if ES could tell us "from above" whether this shard was already here, instead of using Lucene's API
|
final boolean create = engineConfig.isCreate();
|
||||||
// (which relies on IO ops, directory listing, and has had scary bugs in the past):
|
|
||||||
boolean create = !Lucene.indexExists(store.directory());
|
|
||||||
writer = createWriter(create);
|
writer = createWriter(create);
|
||||||
indexWriter = writer;
|
indexWriter = writer;
|
||||||
translog = openTranslog(engineConfig, writer, create || skipInitialTranslogRecovery || engineConfig.forceNewTranslog());
|
translog = openTranslog(engineConfig, writer, create || skipInitialTranslogRecovery || engineConfig.forceNewTranslog());
|
||||||
|
|
|
@ -828,14 +828,13 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
/**
|
/**
|
||||||
* After the store has been recovered, we need to start the engine in order to apply operations
|
* After the store has been recovered, we need to start the engine in order to apply operations
|
||||||
*/
|
*/
|
||||||
public Map<String, Mapping> performTranslogRecovery() {
|
public Map<String, Mapping> performTranslogRecovery(boolean indexExists) {
|
||||||
final Map<String, Mapping> recoveredTypes = internalPerformTranslogRecovery(false);
|
final Map<String, Mapping> recoveredTypes = internalPerformTranslogRecovery(false, indexExists);
|
||||||
assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
|
assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
|
||||||
return recoveredTypes;
|
return recoveredTypes;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private Map<String, Mapping> internalPerformTranslogRecovery(boolean skipTranslogRecovery) {
|
private Map<String, Mapping> internalPerformTranslogRecovery(boolean skipTranslogRecovery, boolean indexExists) {
|
||||||
if (state != IndexShardState.RECOVERING) {
|
if (state != IndexShardState.RECOVERING) {
|
||||||
throw new IndexShardNotRecoveringException(shardId, state);
|
throw new IndexShardNotRecoveringException(shardId, state);
|
||||||
}
|
}
|
||||||
|
@ -852,6 +851,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
// we disable deletes since we allow for operations to be executed against the shard while recovering
|
// we disable deletes since we allow for operations to be executed against the shard while recovering
|
||||||
// but we need to make sure we don't loose deletes until we are done recovering
|
// but we need to make sure we don't loose deletes until we are done recovering
|
||||||
engineConfig.setEnableGcDeletes(false);
|
engineConfig.setEnableGcDeletes(false);
|
||||||
|
engineConfig.setCreate(indexExists == false);
|
||||||
createNewEngine(skipTranslogRecovery, engineConfig);
|
createNewEngine(skipTranslogRecovery, engineConfig);
|
||||||
return engineConfig.getTranslogRecoveryPerformer().getRecoveredTypes();
|
return engineConfig.getTranslogRecoveryPerformer().getRecoveredTypes();
|
||||||
}
|
}
|
||||||
|
@ -860,12 +860,10 @@ public class IndexShard extends AbstractIndexShardComponent {
|
||||||
* After the store has been recovered, we need to start the engine. This method starts a new engine but skips
|
* After the store has been recovered, we need to start the engine. This method starts a new engine but skips
|
||||||
* the replay of the transaction log which is required in cases where we restore a previous index or recover from
|
* the replay of the transaction log which is required in cases where we restore a previous index or recover from
|
||||||
* a remote peer.
|
* a remote peer.
|
||||||
*
|
|
||||||
* @param wipeTranslogs if set to <code>true</code> all skipped / uncommitted translogs are removed.
|
|
||||||
*/
|
*/
|
||||||
public void skipTranslogRecovery(boolean wipeTranslogs) throws IOException {
|
public void skipTranslogRecovery() throws IOException {
|
||||||
assert engineUnsafe() == null : "engine was already created";
|
assert engineUnsafe() == null : "engine was already created";
|
||||||
Map<String, Mapping> recoveredTypes = internalPerformTranslogRecovery(true);
|
Map<String, Mapping> recoveredTypes = internalPerformTranslogRecovery(true, true);
|
||||||
assert recoveredTypes.isEmpty();
|
assert recoveredTypes.isEmpty();
|
||||||
assert recoveryState.getTranslog().recoveredOperations() == 0;
|
assert recoveryState.getTranslog().recoveredOperations() == 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,7 @@ public final class ShadowIndexShard extends IndexShard {
|
||||||
protected Engine newEngine(boolean skipInitialTranslogRecovery, EngineConfig config) {
|
protected Engine newEngine(boolean skipInitialTranslogRecovery, EngineConfig config) {
|
||||||
assert this.shardRouting.primary() == false;
|
assert this.shardRouting.primary() == false;
|
||||||
assert skipInitialTranslogRecovery : "can not recover from gateway";
|
assert skipInitialTranslogRecovery : "can not recover from gateway";
|
||||||
|
config.setCreate(false); // hardcoded - we always expect an index to be present
|
||||||
return engineFactory.newReadOnlyEngine(config);
|
return engineFactory.newReadOnlyEngine(config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -246,7 +246,7 @@ public class StoreRecoveryService extends AbstractIndexShardComponent implements
|
||||||
recoveryState.getTranslog().totalOperations(0);
|
recoveryState.getTranslog().totalOperations(0);
|
||||||
recoveryState.getTranslog().totalOperationsOnStart(0);
|
recoveryState.getTranslog().totalOperationsOnStart(0);
|
||||||
}
|
}
|
||||||
typesToUpdate = indexShard.performTranslogRecovery();
|
typesToUpdate = indexShard.performTranslogRecovery(indexShouldExists);
|
||||||
|
|
||||||
indexShard.finalizeRecovery();
|
indexShard.finalizeRecovery();
|
||||||
String indexName = indexShard.shardId().index().name();
|
String indexName = indexShard.shardId().index().name();
|
||||||
|
@ -318,7 +318,7 @@ public class StoreRecoveryService extends AbstractIndexShardComponent implements
|
||||||
snapshotShardId = new ShardId(restoreSource.index(), shardId.id());
|
snapshotShardId = new ShardId(restoreSource.index(), shardId.id());
|
||||||
}
|
}
|
||||||
indexShardRepository.restore(restoreSource.snapshotId(), restoreSource.version(), shardId, snapshotShardId, recoveryState);
|
indexShardRepository.restore(restoreSource.snapshotId(), restoreSource.version(), shardId, snapshotShardId, recoveryState);
|
||||||
indexShard.skipTranslogRecovery(true);
|
indexShard.skipTranslogRecovery();
|
||||||
indexShard.finalizeRecovery();
|
indexShard.finalizeRecovery();
|
||||||
indexShard.postRecovery("restore done");
|
indexShard.postRecovery("restore done");
|
||||||
restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), shardId);
|
restoreService.indexShardRestoreCompleted(restoreSource.snapshotId(), shardId);
|
||||||
|
|
|
@ -209,7 +209,7 @@ public class TranslogRecoveryPerformer {
|
||||||
query = queryParserService.parseQuery(source).query();
|
query = queryParserService.parseQuery(source).query();
|
||||||
} catch (QueryParsingException ex) {
|
} catch (QueryParsingException ex) {
|
||||||
// for BWC we try to parse directly the query since pre 1.0.0.Beta2 we didn't require a top level query field
|
// for BWC we try to parse directly the query since pre 1.0.0.Beta2 we didn't require a top level query field
|
||||||
if ( queryParserService.getIndexCreatedVersion().onOrBefore(Version.V_1_0_0_Beta2)) {
|
if (queryParserService.getIndexCreatedVersion().onOrBefore(Version.V_1_0_0_Beta2)) {
|
||||||
try {
|
try {
|
||||||
XContentParser parser = XContentHelper.createParser(source);
|
XContentParser parser = XContentHelper.createParser(source);
|
||||||
ParsedQuery parse = queryParserService.parse(parser);
|
ParsedQuery parse = queryParserService.parse(parser);
|
||||||
|
|
|
@ -125,9 +125,11 @@ public class RecoverySettings extends AbstractComponent implements Closeable {
|
||||||
|
|
||||||
|
|
||||||
this.concurrentStreams = settings.getAsInt("indices.recovery.concurrent_streams", settings.getAsInt("index.shard.recovery.concurrent_streams", 3));
|
this.concurrentStreams = settings.getAsInt("indices.recovery.concurrent_streams", settings.getAsInt("index.shard.recovery.concurrent_streams", 3));
|
||||||
this.concurrentStreamPool = EsExecutors.newScaling(0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
|
this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS,
|
||||||
|
EsExecutors.daemonThreadFactory(settings, "[recovery_stream]"));
|
||||||
this.concurrentSmallFileStreams = settings.getAsInt("indices.recovery.concurrent_small_file_streams", settings.getAsInt("index.shard.recovery.concurrent_small_file_streams", 2));
|
this.concurrentSmallFileStreams = settings.getAsInt("indices.recovery.concurrent_small_file_streams", settings.getAsInt("index.shard.recovery.concurrent_small_file_streams", 2));
|
||||||
this.concurrentSmallFileStreamPool = EsExecutors.newScaling(0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
|
this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60,
|
||||||
|
TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]"));
|
||||||
|
|
||||||
this.maxBytesPerSec = settings.getAsBytesSize("indices.recovery.max_bytes_per_sec", settings.getAsBytesSize("indices.recovery.max_size_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)));
|
this.maxBytesPerSec = settings.getAsBytesSize("indices.recovery.max_bytes_per_sec", settings.getAsBytesSize("indices.recovery.max_size_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB)));
|
||||||
if (maxBytesPerSec.bytes() <= 0) {
|
if (maxBytesPerSec.bytes() <= 0) {
|
||||||
|
|
|
@ -435,6 +435,7 @@ public class RecoverySourceHandler {
|
||||||
exception.addSuppressed(remoteException);
|
exception.addSuppressed(remoteException);
|
||||||
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
|
logger.warn("{} Remote file corruption during finalization on node {}, recovering {}. local checksum OK",
|
||||||
corruptIndexException, shard.shardId(), request.targetNode());
|
corruptIndexException, shard.shardId(), request.targetNode());
|
||||||
|
throw exception;
|
||||||
} else {
|
} else {
|
||||||
throw remoteException;
|
throw remoteException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -274,7 +274,7 @@ public class RecoveryTarget extends AbstractComponent {
|
||||||
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) {
|
||||||
final RecoveryStatus recoveryStatus = statusRef.status();
|
final RecoveryStatus recoveryStatus = statusRef.status();
|
||||||
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
recoveryStatus.state().getTranslog().totalOperations(request.totalTranslogOps());
|
||||||
recoveryStatus.indexShard().skipTranslogRecovery(false);
|
recoveryStatus.indexShard().skipTranslogRecovery();
|
||||||
}
|
}
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
}
|
}
|
||||||
|
@ -406,9 +406,13 @@ public class RecoveryTarget extends AbstractComponent {
|
||||||
logger.debug("Failed to clean lucene index", e);
|
logger.debug("Failed to clean lucene index", e);
|
||||||
ex.addSuppressed(e);
|
ex.addSuppressed(e);
|
||||||
}
|
}
|
||||||
throw new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
RecoveryFailedException rfe = new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
||||||
|
recoveryStatus.fail(rfe, true);
|
||||||
|
throw rfe;
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
throw new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
RecoveryFailedException rfe = new RecoveryFailedException(recoveryStatus.state(), "failed to clean after recovery", ex);
|
||||||
|
recoveryStatus.fail(rfe, true);
|
||||||
|
throw rfe;
|
||||||
}
|
}
|
||||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.plugins;
|
||||||
import com.google.common.base.Strings;
|
import com.google.common.base.Strings;
|
||||||
import com.google.common.collect.ImmutableSet;
|
import com.google.common.collect.ImmutableSet;
|
||||||
import com.google.common.collect.Iterators;
|
import com.google.common.collect.Iterators;
|
||||||
|
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
|
@ -132,6 +131,12 @@ public class PluginManager {
|
||||||
// first, try directly from the URL provided
|
// first, try directly from the URL provided
|
||||||
if (url != null) {
|
if (url != null) {
|
||||||
URL pluginUrl = new URL(url);
|
URL pluginUrl = new URL(url);
|
||||||
|
boolean isSecureProcotol = "https".equalsIgnoreCase(pluginUrl.getProtocol());
|
||||||
|
boolean isAuthInfoSet = !Strings.isNullOrEmpty(pluginUrl.getUserInfo());
|
||||||
|
if (isAuthInfoSet && !isSecureProcotol) {
|
||||||
|
throw new IOException("Basic auth is only supported for HTTPS!");
|
||||||
|
}
|
||||||
|
|
||||||
terminal.println("Trying %s ...", pluginUrl.toExternalForm());
|
terminal.println("Trying %s ...", pluginUrl.toExternalForm());
|
||||||
try {
|
try {
|
||||||
downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout);
|
downloadHelper.download(pluginUrl, pluginFile, progress, this.timeout);
|
||||||
|
@ -425,7 +430,10 @@ public class PluginManager {
|
||||||
// Elasticsearch new download service uses groupId org.elasticsearch.plugins from 2.0.0
|
// Elasticsearch new download service uses groupId org.elasticsearch.plugins from 2.0.0
|
||||||
if (user == null) {
|
if (user == null) {
|
||||||
// TODO Update to https
|
// TODO Update to https
|
||||||
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/org.elasticsearch.plugins/%1$s/%1$s-%2$s.zip", repo, version));
|
if (Version.CURRENT.snapshot()) {
|
||||||
|
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/snapshot/org/elasticsearch/plugin/%s/%s-SNAPSHOT/%s-%s-SNAPSHOT.zip", repo, version, repo, version));
|
||||||
|
}
|
||||||
|
addUrl(urls, String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip", repo, version, repo, version));
|
||||||
} else {
|
} else {
|
||||||
// Elasticsearch old download service
|
// Elasticsearch old download service
|
||||||
// TODO Update to https
|
// TODO Update to https
|
||||||
|
|
|
@ -171,7 +171,7 @@ public class ScriptService extends AbstractComponent implements Closeable {
|
||||||
this.scriptModes = new ScriptModes(this.scriptEnginesByLang, scriptContextRegistry, settings);
|
this.scriptModes = new ScriptModes(this.scriptEnginesByLang, scriptContextRegistry, settings);
|
||||||
|
|
||||||
// add file watcher for static scripts
|
// add file watcher for static scripts
|
||||||
scriptsDirectory = env.configFile().resolve("scripts");
|
scriptsDirectory = env.scriptsFile();
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("Using scripts directory [{}] ", scriptsDirectory);
|
logger.trace("Using scripts directory [{}] ", scriptsDirectory);
|
||||||
}
|
}
|
||||||
|
|
|
@ -151,7 +151,9 @@ public final class DirectCandidateGenerator extends CandidateGenerator {
|
||||||
|
|
||||||
if (posIncAttr.getPositionIncrement() > 0 && result.get().bytesEquals(candidate.term)) {
|
if (posIncAttr.getPositionIncrement() > 0 && result.get().bytesEquals(candidate.term)) {
|
||||||
BytesRef term = result.toBytesRef();
|
BytesRef term = result.toBytesRef();
|
||||||
long freq = frequency(term);
|
// We should not use frequency(term) here because it will analyze the term again
|
||||||
|
// If preFilter and postFilter are the same analyzer it would fail.
|
||||||
|
long freq = internalFrequency(term);
|
||||||
candidates.add(new Candidate(result.toBytesRef(), freq, candidate.stringDistance, score(candidate.frequency, candidate.stringDistance, dictSize), false));
|
candidates.add(new Candidate(result.toBytesRef(), freq, candidate.stringDistance, score(candidate.frequency, candidate.stringDistance, dictSize), false));
|
||||||
} else {
|
} else {
|
||||||
candidates.add(new Candidate(result.toBytesRef(), candidate.frequency, nonErrorLikelihood, score(candidate.frequency, candidate.stringDistance, dictSize), false));
|
candidates.add(new Candidate(result.toBytesRef(), candidate.frequency, nonErrorLikelihood, score(candidate.frequency, candidate.stringDistance, dictSize), false));
|
||||||
|
|
|
@ -336,7 +336,7 @@ public class ThreadPool extends AbstractComponent {
|
||||||
} else {
|
} else {
|
||||||
logger.debug("creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
|
logger.debug("creating thread_pool [{}], type [{}], keep_alive [{}]", name, type, keepAlive);
|
||||||
}
|
}
|
||||||
Executor executor = EsExecutors.newCached(keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
|
Executor executor = EsExecutors.newCached(name, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
|
||||||
return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
|
return new ExecutorHolder(executor, new Info(name, type, -1, -1, keepAlive, null));
|
||||||
} else if ("fixed".equals(type)) {
|
} else if ("fixed".equals(type)) {
|
||||||
int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
|
int defaultSize = defaultSettings.getAsInt("size", EsExecutors.boundedNumberOfProcessors(settings));
|
||||||
|
@ -371,7 +371,7 @@ public class ThreadPool extends AbstractComponent {
|
||||||
int size = settings.getAsInt("size", defaultSize);
|
int size = settings.getAsInt("size", defaultSize);
|
||||||
SizeValue queueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", defaultQueueSize)));
|
SizeValue queueSize = getAsSizeOrUnbounded(settings, "capacity", getAsSizeOrUnbounded(settings, "queue", getAsSizeOrUnbounded(settings, "queue_size", defaultQueueSize)));
|
||||||
logger.debug("creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize);
|
logger.debug("creating thread_pool [{}], type [{}], size [{}], queue_size [{}]", name, type, size, queueSize);
|
||||||
Executor executor = EsExecutors.newFixed(size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
|
Executor executor = EsExecutors.newFixed(name, size, queueSize == null ? -1 : (int) queueSize.singles(), threadFactory);
|
||||||
return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
|
return new ExecutorHolder(executor, new Info(name, type, size, size, null, queueSize));
|
||||||
} else if ("scaling".equals(type)) {
|
} else if ("scaling".equals(type)) {
|
||||||
TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
|
TimeValue defaultKeepAlive = defaultSettings.getAsTime("keep_alive", timeValueMinutes(5));
|
||||||
|
@ -415,7 +415,7 @@ public class ThreadPool extends AbstractComponent {
|
||||||
} else {
|
} else {
|
||||||
logger.debug("creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
|
logger.debug("creating thread_pool [{}], type [{}], min [{}], size [{}], keep_alive [{}]", name, type, min, size, keepAlive);
|
||||||
}
|
}
|
||||||
Executor executor = EsExecutors.newScaling(min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
|
Executor executor = EsExecutors.newScaling(name, min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory);
|
||||||
return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
|
return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null));
|
||||||
}
|
}
|
||||||
throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]");
|
throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]");
|
||||||
|
|
|
@ -82,7 +82,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
|
||||||
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
|
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
|
||||||
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
|
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
|
||||||
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX);
|
final ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(this.settings, LOCAL_TRANSPORT_THREAD_NAME_PREFIX);
|
||||||
this.workers = EsExecutors.newFixed(workerCount, queueSize, threadFactory);
|
this.workers = EsExecutors.newFixed(LOCAL_TRANSPORT_THREAD_NAME_PREFIX, workerCount, queueSize, threadFactory);
|
||||||
this.namedWriteableRegistry = namedWriteableRegistry;
|
this.namedWriteableRegistry = namedWriteableRegistry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -74,6 +74,7 @@ public class SecurityTests extends ESTestCase {
|
||||||
Settings.Builder settingsBuilder = Settings.builder();
|
Settings.Builder settingsBuilder = Settings.builder();
|
||||||
settingsBuilder.put("path.home", esHome.resolve("home").toString());
|
settingsBuilder.put("path.home", esHome.resolve("home").toString());
|
||||||
settingsBuilder.put("path.conf", esHome.resolve("conf").toString());
|
settingsBuilder.put("path.conf", esHome.resolve("conf").toString());
|
||||||
|
settingsBuilder.put("path.scripts", esHome.resolve("scripts").toString());
|
||||||
settingsBuilder.put("path.plugins", esHome.resolve("plugins").toString());
|
settingsBuilder.put("path.plugins", esHome.resolve("plugins").toString());
|
||||||
settingsBuilder.putArray("path.data", esHome.resolve("data1").toString(), esHome.resolve("data2").toString());
|
settingsBuilder.putArray("path.data", esHome.resolve("data1").toString(), esHome.resolve("data2").toString());
|
||||||
settingsBuilder.put("path.logs", esHome.resolve("logs").toString());
|
settingsBuilder.put("path.logs", esHome.resolve("logs").toString());
|
||||||
|
@ -109,6 +110,8 @@ public class SecurityTests extends ESTestCase {
|
||||||
assertExactPermissions(new FilePermission(environment.libFile().toString(), "read,readlink"), permissions);
|
assertExactPermissions(new FilePermission(environment.libFile().toString(), "read,readlink"), permissions);
|
||||||
// config file: ro
|
// config file: ro
|
||||||
assertExactPermissions(new FilePermission(environment.configFile().toString(), "read,readlink"), permissions);
|
assertExactPermissions(new FilePermission(environment.configFile().toString(), "read,readlink"), permissions);
|
||||||
|
// scripts file: ro
|
||||||
|
assertExactPermissions(new FilePermission(environment.scriptsFile().toString(), "read,readlink"), permissions);
|
||||||
// plugins: ro
|
// plugins: ro
|
||||||
assertExactPermissions(new FilePermission(environment.pluginsFile().toString(), "read,readlink"), permissions);
|
assertExactPermissions(new FilePermission(environment.pluginsFile().toString(), "read,readlink"), permissions);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.cluster.routing;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A helper class that allows access to package private APIs for testing.
|
||||||
|
*/
|
||||||
|
public class ShardRoutingHelper {
|
||||||
|
|
||||||
|
public static void relocate(ShardRouting routing, String nodeId) {
|
||||||
|
routing.relocate(nodeId);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void moveToStarted(ShardRouting routing) {
|
||||||
|
routing.moveToStarted();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void initialize(ShardRouting routing, String nodeId) {
|
||||||
|
routing.initialize(nodeId);
|
||||||
|
}
|
||||||
|
}
|
|
@ -23,6 +23,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
@ -408,7 +409,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase {
|
||||||
unassigned.clear();
|
unassigned.clear();
|
||||||
return changed;
|
return changed;
|
||||||
}
|
}
|
||||||
}), ClusterInfoService.EMPTY);
|
}), EmptyClusterInfoService.INSTANCE);
|
||||||
MetaData.Builder metaDataBuilder = MetaData.builder();
|
MetaData.Builder metaDataBuilder = MetaData.builder();
|
||||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
|
RoutingTable.Builder routingTableBuilder = RoutingTable.builder();
|
||||||
IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1);
|
IndexMetaData.Builder indexMeta = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(1);
|
||||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing.allocation;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData.Builder;
|
import org.elasticsearch.cluster.metadata.MetaData.Builder;
|
||||||
|
@ -61,7 +62,7 @@ public class RandomAllocationDeciderTests extends ESAllocationTestCase {
|
||||||
RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom());
|
RandomAllocationDecider randomAllocationDecider = new RandomAllocationDecider(getRandom());
|
||||||
AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(Settings.EMPTY,
|
AllocationService strategy = new AllocationService(settingsBuilder().build(), new AllocationDeciders(Settings.EMPTY,
|
||||||
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY),
|
new HashSet<>(Arrays.asList(new SameShardAllocationDecider(Settings.EMPTY),
|
||||||
randomAllocationDecider))), new ShardsAllocators(NoopGatewayAllocator.INSTANCE), ClusterInfoService.EMPTY);
|
randomAllocationDecider))), new ShardsAllocators(NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE);
|
||||||
int indices = scaledRandomIntBetween(1, 20);
|
int indices = scaledRandomIntBetween(1, 20);
|
||||||
Builder metaBuilder = MetaData.builder();
|
Builder metaBuilder = MetaData.builder();
|
||||||
int maxNumReplicas = 1;
|
int maxNumReplicas = 1;
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||||
|
|
||||||
import com.google.common.base.Predicate;
|
import com.google.common.base.Predicate;
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
|
@ -44,10 +43,7 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.*;
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
|
import static org.elasticsearch.cluster.routing.ShardRoutingState.*;
|
||||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||||
|
@ -77,7 +73,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
Map<String, Long> shardSizes = new HashMap<>();
|
Map<String, Long> shardSizes = new HashMap<>();
|
||||||
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
||||||
shardSizes.put("[test][0][r]", 10L);
|
shardSizes.put("[test][0][r]", 10L);
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
|
|
||||||
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
||||||
new HashSet<>(Arrays.asList(
|
new HashSet<>(Arrays.asList(
|
||||||
|
@ -272,7 +268,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
Map<String, Long> shardSizes = new HashMap<>();
|
Map<String, Long> shardSizes = new HashMap<>();
|
||||||
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
||||||
shardSizes.put("[test][0][r]", 10L);
|
shardSizes.put("[test][0][r]", 10L);
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
|
|
||||||
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
||||||
new HashSet<>(Arrays.asList(
|
new HashSet<>(Arrays.asList(
|
||||||
|
@ -334,7 +330,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
|
|
||||||
// Make node without the primary now habitable to replicas
|
// Make node without the primary now habitable to replicas
|
||||||
usages.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", 100, 35)); // 65% used
|
usages.put(nodeWithoutPrimary, new DiskUsage(nodeWithoutPrimary, "", 100, 35)); // 65% used
|
||||||
final ClusterInfo clusterInfo2 = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo2 = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
cis = new ClusterInfoService() {
|
cis = new ClusterInfoService() {
|
||||||
@Override
|
@Override
|
||||||
public ClusterInfo getClusterInfo() {
|
public ClusterInfo getClusterInfo() {
|
||||||
|
@ -533,7 +529,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
|
|
||||||
Map<String, Long> shardSizes = new HashMap<>();
|
Map<String, Long> shardSizes = new HashMap<>();
|
||||||
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
|
|
||||||
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
||||||
new HashSet<>(Arrays.asList(
|
new HashSet<>(Arrays.asList(
|
||||||
|
@ -600,7 +596,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
Map<String, Long> shardSizes = new HashMap<>();
|
Map<String, Long> shardSizes = new HashMap<>();
|
||||||
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
shardSizes.put("[test][0][p]", 10L); // 10 bytes
|
||||||
shardSizes.put("[test][0][r]", 10L); // 10 bytes
|
shardSizes.put("[test][0][r]", 10L); // 10 bytes
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
|
|
||||||
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
||||||
new HashSet<>(Arrays.asList(
|
new HashSet<>(Arrays.asList(
|
||||||
|
@ -704,7 +700,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
shardSizes.put("[test][0][r]", 14L);
|
shardSizes.put("[test][0][r]", 14L);
|
||||||
shardSizes.put("[test2][0][p]", 1L); // 1 bytes
|
shardSizes.put("[test2][0][p]", 1L); // 1 bytes
|
||||||
shardSizes.put("[test2][0][r]", 1L);
|
shardSizes.put("[test2][0][r]", 1L);
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
|
|
||||||
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY,
|
||||||
new HashSet<>(Arrays.asList(
|
new HashSet<>(Arrays.asList(
|
||||||
|
@ -807,7 +803,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
|
||||||
Map<String, Long> shardSizes = new HashMap<>();
|
Map<String, Long> shardSizes = new HashMap<>();
|
||||||
shardSizes.put("[test][0][p]", 40L);
|
shardSizes.put("[test][0][p]", 40L);
|
||||||
shardSizes.put("[test][1][p]", 40L);
|
shardSizes.put("[test][1][p]", 40L);
|
||||||
final ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
final ClusterInfo clusterInfo = new ClusterInfo(Collections.unmodifiableMap(usages), Collections.unmodifiableMap(shardSizes));
|
||||||
|
|
||||||
DiskThresholdDecider diskThresholdDecider = new DiskThresholdDecider(diskSettings);
|
DiskThresholdDecider diskThresholdDecider = new DiskThresholdDecider(diskSettings);
|
||||||
MetaData metaData = MetaData.builder()
|
MetaData metaData = MetaData.builder()
|
||||||
|
|
|
@ -19,17 +19,24 @@
|
||||||
|
|
||||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import org.elasticsearch.Version;
|
||||||
|
|
||||||
import org.elasticsearch.cluster.ClusterInfo;
|
import org.elasticsearch.cluster.ClusterInfo;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
import org.elasticsearch.cluster.DiskUsage;
|
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||||
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||||
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
|
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
|
||||||
|
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -44,19 +51,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
|
||||||
public void testDynamicSettings() {
|
public void testDynamicSettings() {
|
||||||
NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY);
|
NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY);
|
||||||
|
|
||||||
ClusterInfoService cis = new ClusterInfoService() {
|
ClusterInfoService cis = EmptyClusterInfoService.INSTANCE;
|
||||||
@Override
|
|
||||||
public ClusterInfo getClusterInfo() {
|
|
||||||
Map<String, DiskUsage> usages = new HashMap<>();
|
|
||||||
Map<String, Long> shardSizes = new HashMap<>();
|
|
||||||
return new ClusterInfo(ImmutableMap.copyOf(usages), ImmutableMap.copyOf(shardSizes));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void addListener(Listener listener) {
|
|
||||||
// noop
|
|
||||||
}
|
|
||||||
};
|
|
||||||
DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null);
|
DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null);
|
||||||
|
|
||||||
assertThat(decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test")));
|
assertThat(decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test")));
|
||||||
|
@ -94,4 +89,56 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase {
|
||||||
assertFalse("relocations should now be disabled", decider.isIncludeRelocations());
|
assertFalse("relocations should now be disabled", decider.isIncludeRelocations());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testShardSizeAndRelocatingSize() {
|
||||||
|
Map<String, Long> shardSizes = new HashMap<>();
|
||||||
|
shardSizes.put("[test][0][r]", 10L);
|
||||||
|
shardSizes.put("[test][1][r]", 100L);
|
||||||
|
shardSizes.put("[test][2][r]", 1000L);
|
||||||
|
shardSizes.put("[other][0][p]", 10000L);
|
||||||
|
ClusterInfo info = new ClusterInfo(Collections.EMPTY_MAP, shardSizes);
|
||||||
|
ShardRouting test_0 = ShardRouting.newUnassigned("test", 0, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||||
|
ShardRoutingHelper.initialize(test_0, "node1");
|
||||||
|
ShardRoutingHelper.moveToStarted(test_0);
|
||||||
|
ShardRoutingHelper.relocate(test_0, "node2");
|
||||||
|
|
||||||
|
ShardRouting test_1 = ShardRouting.newUnassigned("test", 1, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||||
|
ShardRoutingHelper.initialize(test_1, "node2");
|
||||||
|
ShardRoutingHelper.moveToStarted(test_1);
|
||||||
|
ShardRoutingHelper.relocate(test_1, "node1");
|
||||||
|
|
||||||
|
ShardRouting test_2 = ShardRouting.newUnassigned("test", 2, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||||
|
ShardRoutingHelper.initialize(test_2, "node1");
|
||||||
|
ShardRoutingHelper.moveToStarted(test_2);
|
||||||
|
|
||||||
|
assertEquals(1000l, DiskThresholdDecider.getShardSize(test_2, info));
|
||||||
|
assertEquals(100l, DiskThresholdDecider.getShardSize(test_1, info));
|
||||||
|
assertEquals(10l, DiskThresholdDecider.getShardSize(test_0, info));
|
||||||
|
|
||||||
|
RoutingNode node = new RoutingNode("node1", new DiscoveryNode("node1", LocalTransportAddress.PROTO, Version.CURRENT), Arrays.asList(test_0, test_1.buildTargetRelocatingShard(), test_2));
|
||||||
|
assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false));
|
||||||
|
assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true));
|
||||||
|
|
||||||
|
ShardRouting test_3 = ShardRouting.newUnassigned("test", 3, null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||||
|
ShardRoutingHelper.initialize(test_3, "node1");
|
||||||
|
ShardRoutingHelper.moveToStarted(test_3);
|
||||||
|
assertEquals(0l, DiskThresholdDecider.getShardSize(test_3, info));
|
||||||
|
|
||||||
|
|
||||||
|
ShardRouting other_0 = ShardRouting.newUnassigned("other", 0, null, randomBoolean(), new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
|
||||||
|
ShardRoutingHelper.initialize(other_0, "node2");
|
||||||
|
ShardRoutingHelper.moveToStarted(other_0);
|
||||||
|
ShardRoutingHelper.relocate(other_0, "node1");
|
||||||
|
|
||||||
|
|
||||||
|
node = new RoutingNode("node1", new DiscoveryNode("node1", LocalTransportAddress.PROTO, Version.CURRENT), Arrays.asList(test_0, test_1.buildTargetRelocatingShard(), test_2, other_0.buildTargetRelocatingShard()));
|
||||||
|
if (other_0.primary()) {
|
||||||
|
assertEquals(10100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false));
|
||||||
|
assertEquals(10090l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true));
|
||||||
|
} else {
|
||||||
|
assertEquals(100l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, false));
|
||||||
|
assertEquals(90l, DiskThresholdDecider.sizeOfRelocatingShards(node, info, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.util.concurrent;
|
package org.elasticsearch.common.util.concurrent;
|
||||||
|
|
||||||
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -27,10 +28,12 @@ import java.util.concurrent.ThreadPoolExecutor;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.Matchers.lessThan;
|
import static org.hamcrest.Matchers.lessThan;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Tests for EsExecutors and its components like EsAbortPolicy.
|
||||||
*/
|
*/
|
||||||
public class EsExecutorsTests extends ESTestCase {
|
public class EsExecutorsTests extends ESTestCase {
|
||||||
|
|
||||||
|
@ -38,9 +41,8 @@ public class EsExecutorsTests extends ESTestCase {
|
||||||
return TimeUnit.values()[between(0, TimeUnit.values().length - 1)];
|
return TimeUnit.values()[between(0, TimeUnit.values().length - 1)];
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testFixedForcedExecution() throws Exception {
|
public void testFixedForcedExecution() throws Exception {
|
||||||
EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
|
EsThreadPoolExecutor executor = EsExecutors.newFixed(getTestName(), 1, 1, EsExecutors.daemonThreadFactory("test"));
|
||||||
final CountDownLatch wait = new CountDownLatch(1);
|
final CountDownLatch wait = new CountDownLatch(1);
|
||||||
|
|
||||||
final CountDownLatch exec1Wait = new CountDownLatch(1);
|
final CountDownLatch exec1Wait = new CountDownLatch(1);
|
||||||
|
@ -101,9 +103,8 @@ public class EsExecutorsTests extends ESTestCase {
|
||||||
executor.shutdownNow();
|
executor.shutdownNow();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testFixedRejected() throws Exception {
|
public void testFixedRejected() throws Exception {
|
||||||
EsThreadPoolExecutor executor = EsExecutors.newFixed(1, 1, EsExecutors.daemonThreadFactory("test"));
|
EsThreadPoolExecutor executor = EsExecutors.newFixed(getTestName(), 1, 1, EsExecutors.daemonThreadFactory("test"));
|
||||||
final CountDownLatch wait = new CountDownLatch(1);
|
final CountDownLatch wait = new CountDownLatch(1);
|
||||||
|
|
||||||
final CountDownLatch exec1Wait = new CountDownLatch(1);
|
final CountDownLatch exec1Wait = new CountDownLatch(1);
|
||||||
|
@ -156,13 +157,12 @@ public class EsExecutorsTests extends ESTestCase {
|
||||||
terminate(executor);
|
terminate(executor);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testScaleUp() throws Exception {
|
public void testScaleUp() throws Exception {
|
||||||
final int min = between(1, 3);
|
final int min = between(1, 3);
|
||||||
final int max = between(min + 1, 6);
|
final int max = between(min + 1, 6);
|
||||||
final ThreadBarrier barrier = new ThreadBarrier(max + 1);
|
final ThreadBarrier barrier = new ThreadBarrier(max + 1);
|
||||||
|
|
||||||
ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"));
|
ThreadPoolExecutor pool = EsExecutors.newScaling(getTestName(), min, max, between(1, 100), randomTimeUnit(), EsExecutors.daemonThreadFactory("test"));
|
||||||
assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
|
assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
|
||||||
assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
|
assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
|
||||||
|
|
||||||
|
@ -193,13 +193,12 @@ public class EsExecutorsTests extends ESTestCase {
|
||||||
terminate(pool);
|
terminate(pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testScaleDown() throws Exception {
|
public void testScaleDown() throws Exception {
|
||||||
final int min = between(1, 3);
|
final int min = between(1, 3);
|
||||||
final int max = between(min + 1, 6);
|
final int max = between(min + 1, 6);
|
||||||
final ThreadBarrier barrier = new ThreadBarrier(max + 1);
|
final ThreadBarrier barrier = new ThreadBarrier(max + 1);
|
||||||
|
|
||||||
final ThreadPoolExecutor pool = EsExecutors.newScaling(min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"));
|
final ThreadPoolExecutor pool = EsExecutors.newScaling(getTestName(), min, max, between(1, 100), TimeUnit.MILLISECONDS, EsExecutors.daemonThreadFactory("test"));
|
||||||
assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
|
assertThat("Min property", pool.getCorePoolSize(), equalTo(min));
|
||||||
assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
|
assertThat("Max property", pool.getMaximumPoolSize(), equalTo(max));
|
||||||
|
|
||||||
|
@ -236,4 +235,77 @@ public class EsExecutorsTests extends ESTestCase {
|
||||||
});
|
});
|
||||||
terminate(pool);
|
terminate(pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testRejectionMessageAndShuttingDownFlag() throws InterruptedException {
|
||||||
|
int pool = between(1, 10);
|
||||||
|
int queue = between(0, 100);
|
||||||
|
int actions = queue + pool;
|
||||||
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
|
EsThreadPoolExecutor executor = EsExecutors.newFixed(getTestName(), pool, queue, EsExecutors.daemonThreadFactory("dummy"));
|
||||||
|
try {
|
||||||
|
for (int i = 0; i < actions; i++) {
|
||||||
|
executor.execute(new Runnable() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
try {
|
||||||
|
latch.await();
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
executor.execute(new Runnable() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
// Doesn't matter is going to be rejected
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "dummy runnable";
|
||||||
|
}
|
||||||
|
});
|
||||||
|
fail("Didn't get a rejection when we expected one.");
|
||||||
|
} catch (EsRejectedExecutionException e) {
|
||||||
|
assertFalse("Thread pool registering as terminated when it isn't", e.isExecutorShutdown());
|
||||||
|
String message = ExceptionsHelper.detailedMessage(e);
|
||||||
|
assertThat(message, containsString("of dummy runnable"));
|
||||||
|
assertThat(message, containsString("on EsThreadPoolExecutor[testRejectionMessage"));
|
||||||
|
assertThat(message, containsString("queue capacity = " + queue));
|
||||||
|
assertThat(message, containsString("[Running"));
|
||||||
|
assertThat(message, containsString("active threads = " + pool));
|
||||||
|
assertThat(message, containsString("queued tasks = " + queue));
|
||||||
|
assertThat(message, containsString("completed tasks = 0"));
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
latch.countDown();
|
||||||
|
terminate(executor);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
executor.execute(new Runnable() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
// Doesn't matter is going to be rejected
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "dummy runnable";
|
||||||
|
}
|
||||||
|
});
|
||||||
|
fail("Didn't get a rejection when we expected one.");
|
||||||
|
} catch (EsRejectedExecutionException e) {
|
||||||
|
assertTrue("Thread pool not registering as terminated when it is", e.isExecutorShutdown());
|
||||||
|
String message = ExceptionsHelper.detailedMessage(e);
|
||||||
|
assertThat(message, containsString("of dummy runnable"));
|
||||||
|
assertThat(message, containsString("on EsThreadPoolExecutor[" + getTestName()));
|
||||||
|
assertThat(message, containsString("queue capacity = " + queue));
|
||||||
|
assertThat(message, containsString("[Terminated"));
|
||||||
|
assertThat(message, containsString("active threads = 0"));
|
||||||
|
assertThat(message, containsString("queued tasks = 0"));
|
||||||
|
assertThat(message, containsString("completed tasks = " + actions));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSubmitPrioritizedExecutorWithRunnables() throws Exception {
|
public void testSubmitPrioritizedExecutorWithRunnables() throws Exception {
|
||||||
ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
|
ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
List<Integer> results = new ArrayList<>(8);
|
List<Integer> results = new ArrayList<>(8);
|
||||||
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
||||||
CountDownLatch finishedLatch = new CountDownLatch(8);
|
CountDownLatch finishedLatch = new CountDownLatch(8);
|
||||||
|
@ -91,7 +91,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testExecutePrioritizedExecutorWithRunnables() throws Exception {
|
public void testExecutePrioritizedExecutorWithRunnables() throws Exception {
|
||||||
ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
|
ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
List<Integer> results = new ArrayList<>(8);
|
List<Integer> results = new ArrayList<>(8);
|
||||||
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
||||||
CountDownLatch finishedLatch = new CountDownLatch(8);
|
CountDownLatch finishedLatch = new CountDownLatch(8);
|
||||||
|
@ -121,7 +121,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
|
public void testSubmitPrioritizedExecutorWithCallables() throws Exception {
|
||||||
ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
|
ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
List<Integer> results = new ArrayList<>(8);
|
List<Integer> results = new ArrayList<>(8);
|
||||||
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
||||||
CountDownLatch finishedLatch = new CountDownLatch(8);
|
CountDownLatch finishedLatch = new CountDownLatch(8);
|
||||||
|
@ -151,7 +151,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
|
public void testSubmitPrioritizedExecutorWithMixed() throws Exception {
|
||||||
ExecutorService executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
|
ExecutorService executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
List<Integer> results = new ArrayList<>(8);
|
List<Integer> results = new ArrayList<>(8);
|
||||||
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
CountDownLatch awaitingLatch = new CountDownLatch(1);
|
||||||
CountDownLatch finishedLatch = new CountDownLatch(8);
|
CountDownLatch finishedLatch = new CountDownLatch(8);
|
||||||
|
@ -182,7 +182,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
||||||
@Test
|
@Test
|
||||||
public void testTimeout() throws Exception {
|
public void testTimeout() throws Exception {
|
||||||
ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor(EsExecutors.daemonThreadFactory(getTestName()));
|
ScheduledExecutorService timer = Executors.newSingleThreadScheduledExecutor(EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
|
PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
final CountDownLatch invoked = new CountDownLatch(1);
|
final CountDownLatch invoked = new CountDownLatch(1);
|
||||||
final CountDownLatch block = new CountDownLatch(1);
|
final CountDownLatch block = new CountDownLatch(1);
|
||||||
executor.execute(new Runnable() {
|
executor.execute(new Runnable() {
|
||||||
|
@ -246,7 +246,7 @@ public class PrioritizedExecutorsTests extends ESTestCase {
|
||||||
ThreadPool threadPool = new ThreadPool("test");
|
ThreadPool threadPool = new ThreadPool("test");
|
||||||
final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler();
|
final ScheduledThreadPoolExecutor timer = (ScheduledThreadPoolExecutor) threadPool.scheduler();
|
||||||
final AtomicBoolean timeoutCalled = new AtomicBoolean();
|
final AtomicBoolean timeoutCalled = new AtomicBoolean();
|
||||||
PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(EsExecutors.daemonThreadFactory(getTestName()));
|
PrioritizedEsThreadPoolExecutor executor = EsExecutors.newSinglePrioritizing(getTestName(), EsExecutors.daemonThreadFactory(getTestName()));
|
||||||
final CountDownLatch invoked = new CountDownLatch(1);
|
final CountDownLatch invoked = new CountDownLatch(1);
|
||||||
executor.execute(new Runnable() {
|
executor.execute(new Runnable() {
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -39,6 +39,7 @@ import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
import org.apache.lucene.util.TestUtil;
|
import org.apache.lucene.util.TestUtil;
|
||||||
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT;
|
import org.elasticsearch.bwcompat.OldIndexBackwardsCompatibilityIT;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
@ -256,7 +257,11 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
// we don't need to notify anybody in this test
|
// we don't need to notify anybody in this test
|
||||||
}
|
}
|
||||||
}, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(new HashSet<>(Arrays.asList(wrappers))), translogConfig);
|
}, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(new HashSet<>(Arrays.asList(wrappers))), translogConfig);
|
||||||
|
try {
|
||||||
|
config.setCreate(Lucene.indexExists(store.directory()) == false);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ElasticsearchException("can't find index?", e);
|
||||||
|
}
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -775,6 +780,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
// this so we have to disable the check explicitly
|
// this so we have to disable the check explicitly
|
||||||
directory.setPreventDoubleWrite(false);
|
directory.setPreventDoubleWrite(false);
|
||||||
}
|
}
|
||||||
|
config.setCreate(false);
|
||||||
engine = new InternalEngine(config, false);
|
engine = new InternalEngine(config, false);
|
||||||
assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
|
assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||||
}
|
}
|
||||||
|
@ -1869,6 +1875,7 @@ public class InternalEngineTests extends ESTestCase {
|
||||||
parser.mappingUpdate = dynamicUpdate();
|
parser.mappingUpdate = dynamicUpdate();
|
||||||
|
|
||||||
engine.close();
|
engine.close();
|
||||||
|
engine.config().setCreate(false);
|
||||||
engine = new InternalEngine(engine.config(), false); // we need to reuse the engine config unless the parser.mappingModified won't work
|
engine = new InternalEngine(engine.config(), false); // we need to reuse the engine config unless the parser.mappingModified won't work
|
||||||
|
|
||||||
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
|
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.lucene.search.TermQuery;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.MockDirectoryWrapper;
|
import org.apache.lucene.store.MockDirectoryWrapper;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
|
@ -226,6 +227,11 @@ public class ShadowEngineTests extends ESTestCase {
|
||||||
public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) {
|
public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) {
|
||||||
// we don't need to notify anybody in this test
|
// we don't need to notify anybody in this test
|
||||||
}}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(), translogConfig);
|
}}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(), translogConfig);
|
||||||
|
try {
|
||||||
|
config.setCreate(Lucene.indexExists(store.directory()) == false);
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ElasticsearchException("can't find index?", e);
|
||||||
|
}
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.indices.state;
|
package org.elasticsearch.indices.state;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||||
|
@ -92,9 +91,7 @@ public class RareClusterStateIT extends ESIntegTestCase {
|
||||||
.nodes(DiscoveryNodes.EMPTY_NODES)
|
.nodes(DiscoveryNodes.EMPTY_NODES)
|
||||||
.build()
|
.build()
|
||||||
);
|
);
|
||||||
ClusterInfo clusterInfo = new ClusterInfo(ImmutableMap.<String, DiskUsage>of(), ImmutableMap.<String, Long>of());
|
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), ClusterInfo.EMPTY);
|
||||||
|
|
||||||
RoutingAllocation routingAllocation = new RoutingAllocation(allocationDeciders, routingNodes, current.nodes(), clusterInfo);
|
|
||||||
allocator.allocateUnassigned(routingAllocation);
|
allocator.allocateUnassigned(routingAllocation);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,12 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.plugins;
|
package org.elasticsearch.plugins;
|
||||||
|
|
||||||
|
import com.google.common.base.Charsets;
|
||||||
import org.apache.http.impl.client.HttpClients;
|
import org.apache.http.impl.client.HttpClients;
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.common.Base64;
|
||||||
|
import org.elasticsearch.common.cli.CliTool;
|
||||||
import org.elasticsearch.common.cli.CliTool.ExitStatus;
|
import org.elasticsearch.common.cli.CliTool.ExitStatus;
|
||||||
import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal;
|
import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
|
@ -32,11 +35,23 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||||
import org.elasticsearch.test.junit.annotations.Network;
|
import org.elasticsearch.test.junit.annotations.Network;
|
||||||
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
|
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
|
||||||
import org.elasticsearch.test.rest.client.http.HttpResponse;
|
import org.elasticsearch.test.rest.client.http.HttpResponse;
|
||||||
|
import org.jboss.netty.bootstrap.ServerBootstrap;
|
||||||
|
import org.jboss.netty.channel.*;
|
||||||
|
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
|
||||||
|
import org.jboss.netty.handler.codec.http.*;
|
||||||
|
import org.jboss.netty.handler.ssl.SslContext;
|
||||||
|
import org.jboss.netty.handler.ssl.SslHandler;
|
||||||
|
import org.jboss.netty.handler.ssl.util.InsecureTrustManagerFactory;
|
||||||
|
import org.jboss.netty.handler.ssl.util.SelfSignedCertificate;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import javax.net.ssl.HttpsURLConnection;
|
||||||
|
import javax.net.ssl.SSLContext;
|
||||||
|
import javax.net.ssl.SSLSocketFactory;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.charset.StandardCharsets;
|
import java.nio.charset.StandardCharsets;
|
||||||
import java.nio.file.FileVisitResult;
|
import java.nio.file.FileVisitResult;
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
|
@ -46,6 +61,8 @@ import java.nio.file.attribute.BasicFileAttributes;
|
||||||
import java.nio.file.attribute.PosixFileAttributeView;
|
import java.nio.file.attribute.PosixFileAttributeView;
|
||||||
import java.nio.file.attribute.PosixFileAttributes;
|
import java.nio.file.attribute.PosixFileAttributes;
|
||||||
import java.nio.file.attribute.PosixFilePermission;
|
import java.nio.file.attribute.PosixFilePermission;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.zip.ZipEntry;
|
import java.util.zip.ZipEntry;
|
||||||
import java.util.zip.ZipOutputStream;
|
import java.util.zip.ZipOutputStream;
|
||||||
|
@ -59,6 +76,7 @@ import static org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDirectoryExists;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDirectoryExists;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
|
||||||
import static org.hamcrest.Matchers.*;
|
import static org.hamcrest.Matchers.*;
|
||||||
|
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
|
||||||
|
|
||||||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0)
|
@ClusterScope(scope = Scope.TEST, numDataNodes = 0, transportClientRatio = 0.0)
|
||||||
@LuceneTestCase.SuppressFileSystems("*") // TODO: clean up this test to allow extra files
|
@LuceneTestCase.SuppressFileSystems("*") // TODO: clean up this test to allow extra files
|
||||||
|
@ -477,6 +495,77 @@ public class PluginManagerIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testThatBasicAuthIsRejectedOnHttp() throws Exception {
|
||||||
|
assertStatus(String.format(Locale.ROOT, "install foo --url http://user:pass@localhost:12345/foo.zip --verbose"), CliTool.ExitStatus.IO_ERROR);
|
||||||
|
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Basic auth is only supported for HTTPS!")));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testThatBasicAuthIsSupportedWithHttps() throws Exception {
|
||||||
|
assumeTrue("test requires security manager to be disabled", System.getSecurityManager() == null);
|
||||||
|
|
||||||
|
SSLSocketFactory defaultSocketFactory = HttpsURLConnection.getDefaultSSLSocketFactory();
|
||||||
|
ServerBootstrap serverBootstrap = new ServerBootstrap(new NioServerSocketChannelFactory());
|
||||||
|
SelfSignedCertificate ssc = new SelfSignedCertificate("localhost");
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
// Create a trust manager that does not validate certificate chains:
|
||||||
|
SSLContext sc = SSLContext.getInstance("SSL");
|
||||||
|
sc.init(null, InsecureTrustManagerFactory.INSTANCE.getTrustManagers(), null);
|
||||||
|
HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
|
||||||
|
|
||||||
|
final List<HttpRequest> requests = new ArrayList<>();
|
||||||
|
final SslContext sslContext = SslContext.newServerContext(ssc.certificate(), ssc.privateKey());
|
||||||
|
|
||||||
|
serverBootstrap.setPipelineFactory(new ChannelPipelineFactory() {
|
||||||
|
@Override
|
||||||
|
public ChannelPipeline getPipeline() throws Exception {
|
||||||
|
return Channels.pipeline(
|
||||||
|
new SslHandler(sslContext.newEngine()),
|
||||||
|
new HttpRequestDecoder(),
|
||||||
|
new HttpResponseEncoder(),
|
||||||
|
new LoggingServerHandler(requests)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Channel channel = serverBootstrap.bind(new InetSocketAddress("localhost", 0));
|
||||||
|
int port = ((InetSocketAddress) channel.getLocalAddress()).getPort();
|
||||||
|
// IO_ERROR because there is no real file delivered...
|
||||||
|
assertStatus(String.format(Locale.ROOT, "install foo --url https://user:pass@localhost:%s/foo.zip --verbose --timeout 1s", port), ExitStatus.IO_ERROR);
|
||||||
|
|
||||||
|
assertThat(requests, hasSize(1));
|
||||||
|
String msg = String.format(Locale.ROOT, "Request header did not contain Authorization header, terminal output was: %s", terminal.getTerminalOutput());
|
||||||
|
assertThat(msg, requests.get(0).headers().contains("Authorization"), is(true));
|
||||||
|
assertThat(msg, requests.get(0).headers().get("Authorization"), is("Basic " + Base64.encodeBytes("user:pass".getBytes(Charsets.UTF_8))));
|
||||||
|
} finally {
|
||||||
|
HttpsURLConnection.setDefaultSSLSocketFactory(defaultSocketFactory);
|
||||||
|
serverBootstrap.releaseExternalResources();
|
||||||
|
ssc.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class LoggingServerHandler extends SimpleChannelUpstreamHandler {
|
||||||
|
|
||||||
|
private List<HttpRequest> requests;
|
||||||
|
|
||||||
|
public LoggingServerHandler(List<HttpRequest> requests) {
|
||||||
|
this.requests = requests;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void messageReceived(final ChannelHandlerContext ctx, final MessageEvent e) throws InterruptedException {
|
||||||
|
final HttpRequest request = (HttpRequest) e.getMessage();
|
||||||
|
requests.add(request);
|
||||||
|
final org.jboss.netty.handler.codec.http.HttpResponse response = new DefaultHttpResponse(HTTP_1_1, HttpResponseStatus.BAD_REQUEST);
|
||||||
|
ctx.getChannel().write(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
private Tuple<Settings, Environment> buildInitialSettings() throws IOException {
|
private Tuple<Settings, Environment> buildInitialSettings() throws IOException {
|
||||||
Settings settings = settingsBuilder()
|
Settings settings = settingsBuilder()
|
||||||
.put("discovery.zen.ping.multicast.enabled", false)
|
.put("discovery.zen.ping.multicast.enabled", false)
|
||||||
|
|
|
@ -29,8 +29,11 @@ import org.junit.Test;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Locale;
|
||||||
|
|
||||||
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
|
||||||
|
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||||
import static org.hamcrest.Matchers.hasSize;
|
import static org.hamcrest.Matchers.hasSize;
|
||||||
import static org.hamcrest.Matchers.is;
|
import static org.hamcrest.Matchers.is;
|
||||||
|
|
||||||
|
@ -62,22 +65,40 @@ public class PluginManagerUnitTests extends ESTestCase {
|
||||||
public void testSimplifiedNaming() throws IOException {
|
public void testSimplifiedNaming() throws IOException {
|
||||||
String pluginName = randomAsciiOfLength(10);
|
String pluginName = randomAsciiOfLength(10);
|
||||||
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(pluginName);
|
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(pluginName);
|
||||||
assertThat(handle.urls(), hasSize(1));
|
|
||||||
URL expected = new URL("http", "download.elastic.co", "/org.elasticsearch.plugins/" + pluginName + "/" +
|
assertThat(handle.urls(), hasSize(Version.CURRENT.snapshot() ? 2 : 1));
|
||||||
|
|
||||||
|
Iterator<URL> iterator = handle.urls().iterator();
|
||||||
|
|
||||||
|
if (Version.CURRENT.snapshot()) {
|
||||||
|
String expectedSnapshotUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/snapshot/org/elasticsearch/plugin/%s/%s-SNAPSHOT/%s-%s-SNAPSHOT.zip",
|
||||||
|
pluginName, Version.CURRENT.number(), pluginName, Version.CURRENT.number());
|
||||||
|
assertThat(iterator.next(), is(new URL(expectedSnapshotUrl)));
|
||||||
|
}
|
||||||
|
|
||||||
|
URL expected = new URL("http", "download.elastic.co", "/elasticsearch/release/org/elasticsearch/plugin/" + pluginName + "/" + Version.CURRENT.number() + "/" +
|
||||||
pluginName + "-" + Version.CURRENT.number() + ".zip");
|
pluginName + "-" + Version.CURRENT.number() + ".zip");
|
||||||
assertThat(handle.urls().get(0), is(expected));
|
assertThat(iterator.next(), is(expected));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTrimmingElasticsearchFromOfficialPluginName() throws IOException {
|
public void testTrimmingElasticsearchFromOfficialPluginName() throws IOException {
|
||||||
String randomName = randomAsciiOfLength(10);
|
String randomPluginName = randomFrom(PluginManager.OFFICIAL_PLUGINS.asList());
|
||||||
String pluginName = randomFrom("elasticsearch-", "es-") + randomName;
|
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(randomPluginName);
|
||||||
PluginManager.PluginHandle handle = PluginManager.PluginHandle.parse(pluginName);
|
assertThat(handle.name, is(randomPluginName.replaceAll("^elasticsearch-", "")));
|
||||||
assertThat(handle.name, is(randomName));
|
|
||||||
assertThat(handle.urls(), hasSize(1));
|
assertThat(handle.urls(), hasSize(Version.CURRENT.snapshot() ? 2 : 1));
|
||||||
URL expected = new URL("http", "download.elastic.co", "/org.elasticsearch.plugins/" + pluginName + "/" +
|
Iterator<URL> iterator = handle.urls().iterator();
|
||||||
pluginName + "-" + Version.CURRENT.number() + ".zip");
|
|
||||||
assertThat(handle.urls().get(0), is(expected));
|
if (Version.CURRENT.snapshot()) {
|
||||||
|
String expectedSnapshotUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/snapshot/org/elasticsearch/plugin/%s/%s-SNAPSHOT/%s-%s-SNAPSHOT.zip",
|
||||||
|
randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number());
|
||||||
|
assertThat(iterator.next(), is(new URL(expectedSnapshotUrl)));
|
||||||
|
}
|
||||||
|
|
||||||
|
String releaseUrl = String.format(Locale.ROOT, "http://download.elastic.co/elasticsearch/release/org/elasticsearch/plugin/%s/%s/%s-%s.zip",
|
||||||
|
randomPluginName, Version.CURRENT.number(), randomPluginName, Version.CURRENT.number());
|
||||||
|
assertThat(iterator.next(), is(new URL(releaseUrl)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -256,7 +256,6 @@ public class HDRPercentileRanksIT extends AbstractNumericTestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Test
|
@Test
|
||||||
@AwaitsFix(bugUrl="Fails with seed: B75FCDC119D90BBE, Colin to fix")
|
|
||||||
public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
|
public void testSingleValuedField_WithValueScript_WithParams() throws Exception {
|
||||||
int sigDigits = randomSignificantDigits();
|
int sigDigits = randomSignificantDigits();
|
||||||
Map<String, Object> params = new HashMap<>();
|
Map<String, Object> params = new HashMap<>();
|
||||||
|
|
|
@ -278,8 +278,12 @@ public class NoisyChannelSpellCheckerTests extends ESTestCase {
|
||||||
assertThat(corrections.length, equalTo(1));
|
assertThat(corrections.length, equalTo(1));
|
||||||
assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
|
assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("xorr the god jewel"));
|
||||||
|
|
||||||
|
// Test a special case where one of the suggest term is unchanged by the postFilter, 'II' here is unchanged by the reverse analyzer.
|
||||||
|
corrections = suggester.getCorrections(wrapper, new BytesRef("Quazar II"), generator, 1, 1, ir, "body", wordScorer, 1, 2).corrections;
|
||||||
|
assertThat(corrections.length, equalTo(1));
|
||||||
|
assertThat(corrections[0].join(new BytesRef(" ")).utf8ToString(), equalTo("quasar ii"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMarvelHerosTrigram() throws IOException {
|
public void testMarvelHerosTrigram() throws IOException {
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import com.google.common.collect.ImmutableSet;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.ClusterInfoService;
|
import org.elasticsearch.cluster.ClusterInfoService;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
|
@ -66,7 +67,7 @@ public abstract class ESAllocationTestCase extends ESTestCase {
|
||||||
public static AllocationService createAllocationService(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
|
public static AllocationService createAllocationService(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
|
||||||
return new AllocationService(settings,
|
return new AllocationService(settings,
|
||||||
randomAllocationDeciders(settings, nodeSettingsService, random),
|
randomAllocationDeciders(settings, nodeSettingsService, random),
|
||||||
new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), ClusterInfoService.EMPTY);
|
new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -537,7 +537,7 @@ public abstract class ESTestCase extends LuceneTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void uncaughtException(Thread t, Throwable e) {
|
public void uncaughtException(Thread t, Throwable e) {
|
||||||
if (e instanceof EsRejectedExecutionException) {
|
if (e instanceof EsRejectedExecutionException) {
|
||||||
if (e.getMessage() != null && e.getMessage().contains(EsAbortPolicy.SHUTTING_DOWN_KEY)) {
|
if (e.getMessage() != null && ((EsRejectedExecutionException) e).isExecutorShutdown()) {
|
||||||
return; // ignore the EsRejectedExecutionException when a node shuts down
|
return; // ignore the EsRejectedExecutionException when a node shuts down
|
||||||
}
|
}
|
||||||
} else if (e instanceof OutOfMemoryError) {
|
} else if (e instanceof OutOfMemoryError) {
|
||||||
|
|
|
@ -314,7 +314,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
// always reduce this - it can make tests really slow
|
// always reduce this - it can make tests really slow
|
||||||
builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
|
builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
|
||||||
defaultSettings = builder.build();
|
defaultSettings = builder.build();
|
||||||
executor = EsExecutors.newCached(0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName));
|
executor = EsExecutors.newCached("test runner", 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String nodeMode() {
|
public static String nodeMode() {
|
||||||
|
|
|
@ -1,172 +0,0 @@
|
||||||
# Licensed to Elasticsearch under one or more contributor
|
|
||||||
# license agreements. See the NOTICE file distributed with
|
|
||||||
# this work for additional information regarding copyright
|
|
||||||
# ownership. Elasticsearch licenses this file to you under
|
|
||||||
# the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing,
|
|
||||||
# software distributed under the License is distributed on
|
|
||||||
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
|
||||||
# either express or implied. See the License for the specific
|
|
||||||
# language governing permissions and limitations under the License.
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
import traceback
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import signal
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
import tempfile
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
|
|
||||||
from http.client import HTTPConnection
|
|
||||||
|
|
||||||
LOG = os.environ.get('ES_SMOKE_TEST_PLUGINS_LOG', '/tmp/elasticsearch_smoke_test_plugins.log')
|
|
||||||
|
|
||||||
print('Logging to %s' % LOG)
|
|
||||||
|
|
||||||
if os.path.exists(LOG):
|
|
||||||
raise RuntimeError('please remove old log %s first' % LOG)
|
|
||||||
|
|
||||||
try:
|
|
||||||
JAVA_HOME = os.environ['JAVA7_HOME']
|
|
||||||
except KeyError:
|
|
||||||
try:
|
|
||||||
JAVA_HOME = os.environ['JAVA_HOME']
|
|
||||||
except KeyError:
|
|
||||||
raise RuntimeError("""
|
|
||||||
Please set JAVA_HOME in the env before running release tool
|
|
||||||
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
|
|
||||||
|
|
||||||
JAVA_ENV = 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (JAVA_HOME, JAVA_HOME, JAVA_HOME)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# make sure mvn3 is used if mvn3 is available
|
|
||||||
# some systems use maven 2 as default
|
|
||||||
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
|
|
||||||
MVN = 'mvn3'
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
MVN = 'mvn'
|
|
||||||
|
|
||||||
def log(msg):
|
|
||||||
f = open(LOG, mode='ab')
|
|
||||||
f.write(('\n'+msg).encode('utf-8'))
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
def run(command, quiet=False):
|
|
||||||
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
|
|
||||||
if os.system('%s >> %s 2>&1' % (command, LOG)):
|
|
||||||
msg = ' FAILED: %s [see log %s]' % (command, LOG)
|
|
||||||
if not quiet:
|
|
||||||
print(msg)
|
|
||||||
raise RuntimeError(msg)
|
|
||||||
|
|
||||||
def readServerOutput(p, startupEvent, failureEvent):
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
line = p.stdout.readline()
|
|
||||||
if len(line) == 0:
|
|
||||||
p.poll()
|
|
||||||
if not startupEvent.isSet():
|
|
||||||
failureEvent.set()
|
|
||||||
startupEvent.set()
|
|
||||||
print('ES: **process exit**\n')
|
|
||||||
break
|
|
||||||
line = line.decode('utf-8').rstrip()
|
|
||||||
if line.endswith('started') and not startupEvent.isSet():
|
|
||||||
startupEvent.set()
|
|
||||||
print('ES: %s' % line)
|
|
||||||
except:
|
|
||||||
print()
|
|
||||||
print('Exception reading Elasticsearch output:')
|
|
||||||
traceback.print_exc()
|
|
||||||
failureEvent.set()
|
|
||||||
startupEvent.set()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
print('Build release bits...')
|
|
||||||
|
|
||||||
run('%s; %s clean package -DskipTests' % (JAVA_ENV, MVN))
|
|
||||||
|
|
||||||
for f in os.listdir('distribution/tar/target/releases/'):
|
|
||||||
if f.endswith('.tar.gz'):
|
|
||||||
artifact = f
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
raise RuntimeError('could not find elasticsearch release under distribution/tar/target/releases/')
|
|
||||||
|
|
||||||
tmp_dir = tempfile.mkdtemp()
|
|
||||||
p = None
|
|
||||||
try:
|
|
||||||
# Extract artifact:
|
|
||||||
run('tar -xzf distribution/tar/target/releases/%s -C %s' % (artifact, tmp_dir))
|
|
||||||
es_install_dir = os.path.join(tmp_dir, artifact[:-7])
|
|
||||||
es_plugin_path = os.path.join(es_install_dir, 'bin/plugin')
|
|
||||||
installed_plugin_names = set()
|
|
||||||
print('Find plugins:')
|
|
||||||
for name in os.listdir('plugins'):
|
|
||||||
if name not in ('target', 'pom.xml'):
|
|
||||||
url = 'file://%s/plugins/%s/target/releases/elasticsearch-%s-2.0.0-beta1-SNAPSHOT.zip' % (os.path.abspath('.'), name, name)
|
|
||||||
print(' install plugin %s...' % name)
|
|
||||||
run('%s; %s install %s --url %s' % (JAVA_ENV, es_plugin_path, name, url))
|
|
||||||
installed_plugin_names.add(name)
|
|
||||||
|
|
||||||
print('Start Elasticsearch')
|
|
||||||
|
|
||||||
env = os.environ.copy()
|
|
||||||
env['JAVA_HOME'] = JAVA_HOME
|
|
||||||
env['PATH'] = '%s/bin:%s' % (JAVA_HOME, env['PATH'])
|
|
||||||
env['JAVA_CMD'] = '%s/bin/java' % JAVA_HOME
|
|
||||||
|
|
||||||
startupEvent = threading.Event()
|
|
||||||
failureEvent = threading.Event()
|
|
||||||
p = subprocess.Popen(('%s/bin/elasticsearch' % es_install_dir,
|
|
||||||
'-Des.node.name=smoke_tester',
|
|
||||||
'-Des.cluster.name=smoke_tester_cluster'
|
|
||||||
'-Des.discovery.zen.ping.multicast.enabled=false',
|
|
||||||
'-Des.logger.level=debug',
|
|
||||||
'-Des.script.inline=on',
|
|
||||||
'-Des.script.indexed=on'),
|
|
||||||
stdout = subprocess.PIPE,
|
|
||||||
stderr = subprocess.STDOUT,
|
|
||||||
env = env)
|
|
||||||
thread = threading.Thread(target=readServerOutput, args=(p, startupEvent, failureEvent))
|
|
||||||
thread.setDaemon(True)
|
|
||||||
thread.start()
|
|
||||||
|
|
||||||
startupEvent.wait(1200)
|
|
||||||
if failureEvent.isSet():
|
|
||||||
raise RuntimeError('ES failed to start')
|
|
||||||
|
|
||||||
print('Confirm plugins are installed')
|
|
||||||
conn = HTTPConnection('127.0.0.1', 9200, 20);
|
|
||||||
conn.request('GET', '/_nodes?plugin=true&pretty=true')
|
|
||||||
res = conn.getresponse()
|
|
||||||
if res.status == 200:
|
|
||||||
nodes = json.loads(res.read().decode("utf-8"))['nodes']
|
|
||||||
for _, node in nodes.items():
|
|
||||||
node_plugins = node['plugins']
|
|
||||||
for node_plugin in node_plugins:
|
|
||||||
plugin_name = node_plugin['name']
|
|
||||||
if plugin_name not in installed_plugin_names:
|
|
||||||
raise RuntimeError('Unexpeced plugin %s' % plugin_name)
|
|
||||||
installed_plugin_names.remove(plugin_name)
|
|
||||||
if len(installed_plugin_names) > 0:
|
|
||||||
raise RuntimeError('Plugins not loaded %s' % installed_plugin_names)
|
|
||||||
else:
|
|
||||||
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
|
|
||||||
finally:
|
|
||||||
if p is not None:
|
|
||||||
try:
|
|
||||||
os.kill(p.pid, signal.SIGKILL)
|
|
||||||
except ProcessLookupError:
|
|
||||||
pass
|
|
||||||
shutil.rmtree(tmp_dir)
|
|
||||||
|
|
|
@ -1,11 +1,6 @@
|
||||||
<?xml version="1.0"?>
|
<?xml version="1.0"?>
|
||||||
<project name="elasticsearch-integration-tests">
|
<project name="elasticsearch-integration-tests">
|
||||||
|
|
||||||
<!-- this is all to not run tests for 'pom' packaging. maven you fail -->
|
|
||||||
<condition property="shouldskip">
|
|
||||||
<istrue value="${skip.integ.tests}"/>
|
|
||||||
</condition>
|
|
||||||
|
|
||||||
<!-- our pid file for easy cleanup -->
|
<!-- our pid file for easy cleanup -->
|
||||||
<property name="integ.pidfile" location="${integ.scratch}/es.pid"/>
|
<property name="integ.pidfile" location="${integ.scratch}/es.pid"/>
|
||||||
|
|
||||||
|
@ -15,13 +10,6 @@
|
||||||
<!-- name of our cluster, maybe needs changing -->
|
<!-- name of our cluster, maybe needs changing -->
|
||||||
<property name="integ.cluster.name" value="prepare_release"/>
|
<property name="integ.cluster.name" value="prepare_release"/>
|
||||||
|
|
||||||
<!-- arguments passed to elasticsearch when running -->
|
|
||||||
<property name="integ.args"
|
|
||||||
value="-Des.node.name=smoke_tester -Des.cluster.name=${integ.cluster.name}
|
|
||||||
-Des.discovery.zen.ping.multicast.enabled=false -Des.script.inline=on
|
|
||||||
-Des.http.port=${integ.http.port} -Des.transport.tcp.port=${integ.transport.port}
|
|
||||||
-Des.script.indexed=on -Des.pidfile=${integ.pidfile} -Des.repositories.url.allowed_urls=http://snapshot.test*"/>
|
|
||||||
|
|
||||||
<!-- runs an OS script -->
|
<!-- runs an OS script -->
|
||||||
<macrodef name="run-script">
|
<macrodef name="run-script">
|
||||||
<attribute name="script"/>
|
<attribute name="script"/>
|
||||||
|
@ -37,14 +25,19 @@
|
||||||
<!-- create a temp CWD, to enforce that commands don't rely on CWD -->
|
<!-- create a temp CWD, to enforce that commands don't rely on CWD -->
|
||||||
<mkdir dir="${integ.temp}"/>
|
<mkdir dir="${integ.temp}"/>
|
||||||
|
|
||||||
<exec executable="cmd" osfamily="winnt" dir="${integ.temp}" failonerror="${failonerror}" spawn="@{spawn}">
|
<!-- print commands we run -->
|
||||||
|
<local name="script.base"/>
|
||||||
|
<basename file="@{script}" property="script.base"/>
|
||||||
|
<echo>execute: ${script.base} @{args}</echo>
|
||||||
|
|
||||||
|
<exec executable="cmd" osfamily="winnt" dir="${integ.temp}" failonerror="${failonerror}" spawn="@{spawn}" taskname="${script.base}">
|
||||||
<arg value="/c"/>
|
<arg value="/c"/>
|
||||||
<arg value="@{script}.bat"/>
|
<arg value="@{script}.bat"/>
|
||||||
<arg line="@{args}"/>
|
<arg line="@{args}"/>
|
||||||
<nested/>
|
<nested/>
|
||||||
</exec>
|
</exec>
|
||||||
|
|
||||||
<exec executable="sh" osfamily="unix" dir="${integ.temp}" failonerror="${failonerror}" spawn="@{spawn}">
|
<exec executable="sh" osfamily="unix" dir="${integ.temp}" failonerror="${failonerror}" spawn="@{spawn}" taskname="${script.base}">
|
||||||
<arg value="@{script}"/>
|
<arg value="@{script}"/>
|
||||||
<arg line="@{args}"/>
|
<arg line="@{args}"/>
|
||||||
<nested/>
|
<nested/>
|
||||||
|
@ -54,9 +47,10 @@
|
||||||
|
|
||||||
<!-- extracts PID from file -->
|
<!-- extracts PID from file -->
|
||||||
<macrodef name="extract-pid">
|
<macrodef name="extract-pid">
|
||||||
|
<attribute name="file"/>
|
||||||
<attribute name="property"/>
|
<attribute name="property"/>
|
||||||
<sequential>
|
<sequential>
|
||||||
<loadfile srcFile="${integ.pidfile}" property="@{property}">
|
<loadfile srcFile="@{file}" property="@{property}">
|
||||||
<filterchain>
|
<filterchain>
|
||||||
<striplinebreaks/>
|
<striplinebreaks/>
|
||||||
</filterchain>
|
</filterchain>
|
||||||
|
@ -119,36 +113,65 @@
|
||||||
<!-- start elasticsearch and wait until its ready -->
|
<!-- start elasticsearch and wait until its ready -->
|
||||||
<macrodef name="startup-elasticsearch">
|
<macrodef name="startup-elasticsearch">
|
||||||
<attribute name="home" default="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
|
<attribute name="home" default="${integ.scratch}/elasticsearch-${elasticsearch.version}"/>
|
||||||
|
<attribute name="spawn" default="true"/>
|
||||||
<attribute name="args" default="${integ.args}"/>
|
<attribute name="args" default="${integ.args}"/>
|
||||||
|
<attribute name="es.cluster.name" default="${integ.cluster.name}"/>
|
||||||
|
<attribute name="es.http.port" default="${integ.http.port}"/>
|
||||||
|
<attribute name="es.transport.tcp.port" default="${integ.transport.port}"/>
|
||||||
|
<attribute name="es.pidfile" default="${integ.pidfile}"/>
|
||||||
|
<attribute name="additional.args" default=""/>
|
||||||
|
<element name="nested.args" optional="true"/>
|
||||||
<sequential>
|
<sequential>
|
||||||
|
|
||||||
|
<!-- build args to pass to es -->
|
||||||
|
<local name="integ.args"/>
|
||||||
|
<property name="integ.args" value="
|
||||||
|
-Des.cluster.name=@{es.cluster.name}
|
||||||
|
-Des.http.port=@{es.http.port}
|
||||||
|
-Des.transport.tcp.port=@{es.transport.tcp.port}
|
||||||
|
-Des.pidfile=@{es.pidfile}
|
||||||
|
-Des.path.repo=@{home}/repo
|
||||||
|
-Des.discovery.zen.ping.multicast.enabled=false
|
||||||
|
-Des.script.inline=on
|
||||||
|
-Des.script.indexed=on
|
||||||
|
-Des.repositories.url.allowed_urls=http://snapshot.test*
|
||||||
|
@{additional.args}"
|
||||||
|
/>
|
||||||
|
|
||||||
|
<!-- run bin/elasticsearch with args -->
|
||||||
<echo>Starting up external cluster...</echo>
|
<echo>Starting up external cluster...</echo>
|
||||||
<run-script script="@{home}/bin/elasticsearch" spawn="true"
|
<run-script script="@{home}/bin/elasticsearch"
|
||||||
args="@{args} -Des.path.repo=@{home}/repo"/>
|
spawn="@{spawn}"
|
||||||
|
args="${integ.args}">
|
||||||
|
<nested>
|
||||||
|
<nested.args/>
|
||||||
|
</nested>
|
||||||
|
</run-script>
|
||||||
|
|
||||||
<local name="failed.to.start"/>
|
<local name="failed.to.start"/>
|
||||||
<waitfor maxwait="30" maxwaitunit="second"
|
<waitfor maxwait="30" maxwaitunit="second"
|
||||||
checkevery="500" checkeveryunit="millisecond"
|
checkevery="500" checkeveryunit="millisecond"
|
||||||
timeoutproperty="failed.to.start">
|
timeoutproperty="failed.to.start">
|
||||||
<http url="http://127.0.0.1:${integ.http.port}"/>
|
<http url="http://127.0.0.1:@{es.http.port}"/>
|
||||||
</waitfor>
|
</waitfor>
|
||||||
|
|
||||||
<!-- best effort, print console log. useful if it fails especially -->
|
<!-- best effort, print console log. useful if it fails especially -->
|
||||||
<local name="log.contents"/>
|
<local name="log.contents"/>
|
||||||
<loadfile srcFile="@{home}/logs/${integ.cluster.name}.log"
|
<loadfile srcFile="@{home}/logs/@{es.cluster.name}.log"
|
||||||
property="log.contents"
|
property="log.contents"
|
||||||
failonerror="false"/>
|
failonerror="false"/>
|
||||||
<echo message="${log.contents}"/>
|
<echo message="${log.contents}" taskname="elasticsearch"/>
|
||||||
|
|
||||||
<fail message="ES instance did not start" if="failed.to.start"/>
|
<fail message="ES instance did not start" if="failed.to.start"/>
|
||||||
|
|
||||||
<local name="integ.pid"/>
|
<local name="integ.pid"/>
|
||||||
<extract-pid property="integ.pid"/>
|
<extract-pid file="@{es.pidfile}" property="integ.pid"/>
|
||||||
<echo>External cluster started PID ${integ.pid}</echo>
|
<echo>External cluster started PID ${integ.pid}</echo>
|
||||||
</sequential>
|
</sequential>
|
||||||
</macrodef>
|
</macrodef>
|
||||||
|
|
||||||
<!-- unzip the elasticsearch zip -->
|
<!-- unzip the elasticsearch zip -->
|
||||||
<target name="setup-workspace" depends="stop-external-cluster" unless="${shouldskip}">
|
<target name="setup-workspace" depends="stop-external-cluster">
|
||||||
<sequential>
|
<sequential>
|
||||||
<delete dir="${integ.scratch}"/>
|
<delete dir="${integ.scratch}"/>
|
||||||
<unzip src="${integ.deps}/elasticsearch-${elasticsearch.version}.zip" dest="${integ.scratch}"/>
|
<unzip src="${integ.deps}/elasticsearch-${elasticsearch.version}.zip" dest="${integ.scratch}"/>
|
||||||
|
@ -162,16 +185,15 @@
|
||||||
<unzip src="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip" dest="${integ.scratch}"/>
|
<unzip src="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip" dest="${integ.scratch}"/>
|
||||||
<local name="home"/>
|
<local name="home"/>
|
||||||
<property name="home" location="${integ.scratch}/${project.artifactId}-${elasticsearch.version}"/>
|
<property name="home" location="${integ.scratch}/${project.artifactId}-${elasticsearch.version}"/>
|
||||||
<run-script script="${home}/bin/elasticsearch" spawn="false"
|
<startup-elasticsearch spawn="false" home="${home}">
|
||||||
args="${integ.args} -Des.path.repo=${home}/repo">
|
<nested.args>
|
||||||
<nested>
|
|
||||||
<env key="JAVA_OPTS" value="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000"/>
|
<env key="JAVA_OPTS" value="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000"/>
|
||||||
</nested>
|
</nested.args>
|
||||||
</run-script>
|
</startup-elasticsearch>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<!-- unzip core release artifact, install plugin, then start ES -->
|
<!-- unzip core release artifact, install plugin, then start ES -->
|
||||||
<target name="start-external-cluster-with-plugin" depends="setup-workspace" unless="${shouldskip}">
|
<target name="start-external-cluster-with-plugin" depends="setup-workspace">
|
||||||
<install-plugin name="${project.artifactId}" file="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"/>
|
<install-plugin name="${project.artifactId}" file="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"/>
|
||||||
<startup-elasticsearch/>
|
<startup-elasticsearch/>
|
||||||
</target>
|
</target>
|
||||||
|
@ -181,7 +203,7 @@
|
||||||
<target name="stop-external-cluster" if="integ.pidfile.exists">
|
<target name="stop-external-cluster" if="integ.pidfile.exists">
|
||||||
<local name="integ.pid"/>
|
<local name="integ.pid"/>
|
||||||
|
|
||||||
<extract-pid property="integ.pid"/>
|
<extract-pid file="${integ.pidfile}" property="integ.pid"/>
|
||||||
<echo>Shutting down external cluster PID ${integ.pid}</echo>
|
<echo>Shutting down external cluster PID ${integ.pid}</echo>
|
||||||
|
|
||||||
<exec executable="taskkill" failonerror="true" osfamily="winnt">
|
<exec executable="taskkill" failonerror="true" osfamily="winnt">
|
||||||
|
@ -198,7 +220,7 @@
|
||||||
|
|
||||||
<!-- distribution tests: .zip -->
|
<!-- distribution tests: .zip -->
|
||||||
|
|
||||||
<target name="setup-workspace-zip" depends="stop-external-cluster" unless="${shouldskip}">
|
<target name="setup-workspace-zip" depends="stop-external-cluster">
|
||||||
<sequential>
|
<sequential>
|
||||||
<delete dir="${integ.scratch}"/>
|
<delete dir="${integ.scratch}"/>
|
||||||
<unzip src="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"
|
<unzip src="${project.build.directory}/releases/${project.artifactId}-${project.version}.zip"
|
||||||
|
@ -206,14 +228,14 @@
|
||||||
</sequential>
|
</sequential>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<target name="start-external-cluster-zip" depends="setup-workspace-zip" unless="${shouldskip}">
|
<target name="start-external-cluster-zip" depends="setup-workspace-zip">
|
||||||
<startup-elasticsearch/>
|
<startup-elasticsearch/>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
|
|
||||||
<!-- distribution tests: .tar.gz -->
|
<!-- distribution tests: .tar.gz -->
|
||||||
|
|
||||||
<target name="setup-workspace-tar" depends="stop-external-cluster" unless="${shouldskip}">
|
<target name="setup-workspace-tar" depends="stop-external-cluster">
|
||||||
<sequential>
|
<sequential>
|
||||||
<delete dir="${integ.scratch}"/>
|
<delete dir="${integ.scratch}"/>
|
||||||
<untar src="${project.build.directory}/releases/${project.artifactId}-${project.version}.tar.gz"
|
<untar src="${project.build.directory}/releases/${project.artifactId}-${project.version}.tar.gz"
|
||||||
|
@ -222,13 +244,13 @@
|
||||||
</sequential>
|
</sequential>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<target name="start-external-cluster-tar" depends="setup-workspace-tar" unless="${shouldskip}">
|
<target name="start-external-cluster-tar" depends="setup-workspace-tar">
|
||||||
<startup-elasticsearch/>
|
<startup-elasticsearch/>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<!-- distribution tests: .deb -->
|
<!-- distribution tests: .deb -->
|
||||||
|
|
||||||
<target name="setup-workspace-deb" depends="stop-external-cluster" unless="${shouldskip}">
|
<target name="setup-workspace-deb" depends="stop-external-cluster">
|
||||||
<sequential>
|
<sequential>
|
||||||
<delete dir="${integ.scratch}"/>
|
<delete dir="${integ.scratch}"/>
|
||||||
<mkdir dir="${integ.scratch}/deb-extracted"/>
|
<mkdir dir="${integ.scratch}/deb-extracted"/>
|
||||||
|
@ -248,12 +270,12 @@
|
||||||
</sequential>
|
</sequential>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<target name="start-external-cluster-deb" depends="setup-workspace-deb" unless="${shouldskip}">
|
<target name="start-external-cluster-deb" depends="setup-workspace-deb">
|
||||||
<startup-elasticsearch home="${integ.scratch}/deb-extracted/usr/share/elasticsearch/"/>
|
<startup-elasticsearch home="${integ.scratch}/deb-extracted/usr/share/elasticsearch/"/>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<!-- distribution tests: .rpm -->
|
<!-- distribution tests: .rpm -->
|
||||||
<target name="setup-workspace-rpm" depends="stop-external-cluster" unless="${shouldskip}">
|
<target name="setup-workspace-rpm" depends="stop-external-cluster">
|
||||||
<sequential>
|
<sequential>
|
||||||
<delete dir="${integ.scratch}"/>
|
<delete dir="${integ.scratch}"/>
|
||||||
<!-- use full paths with paranoia, we will be doing relocations -->
|
<!-- use full paths with paranoia, we will be doing relocations -->
|
||||||
|
@ -288,7 +310,7 @@
|
||||||
</sequential>
|
</sequential>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
<target name="start-external-cluster-rpm" depends="setup-workspace-rpm" unless="${shouldskip}">
|
<target name="start-external-cluster-rpm" depends="setup-workspace-rpm">
|
||||||
<startup-elasticsearch home="${integ.scratch}/rpm-extracted/usr/share/elasticsearch/"/>
|
<startup-elasticsearch home="${integ.scratch}/rpm-extracted/usr/share/elasticsearch/"/>
|
||||||
</target>
|
</target>
|
||||||
|
|
||||||
|
|
|
@ -8,11 +8,22 @@ use lib "$RealBin/lib";
|
||||||
use File::Spec();
|
use File::Spec();
|
||||||
use File::Temp();
|
use File::Temp();
|
||||||
use File::Find();
|
use File::Find();
|
||||||
use Digest::SHA qw(sha1);
|
|
||||||
use File::Basename qw(basename);
|
use File::Basename qw(basename);
|
||||||
use Archive::Extract();
|
use Archive::Extract();
|
||||||
$Archive::Extract::PREFER_BIN = 1;
|
$Archive::Extract::PREFER_BIN = 1;
|
||||||
|
|
||||||
|
our $SHA_CLASS = 'Digest::SHA';
|
||||||
|
if ( eval { require Digest::SHA } ) {
|
||||||
|
$SHA_CLASS = 'Digest::SHA';
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
|
||||||
|
print STDERR "Digest::SHA not available. "
|
||||||
|
. "Falling back to Digest::SHA::PurePerl\n";
|
||||||
|
require Digest::SHA::PurePerl;
|
||||||
|
$SHA_CLASS = 'Digest::SHA::PurePerl';
|
||||||
|
}
|
||||||
|
|
||||||
my $mode = shift(@ARGV) || "";
|
my $mode = shift(@ARGV) || "";
|
||||||
die usage() unless $mode =~ /^--(check|update)$/;
|
die usage() unless $mode =~ /^--(check|update)$/;
|
||||||
|
|
||||||
|
@ -230,7 +241,7 @@ sub calculate_shas {
|
||||||
#===================================
|
#===================================
|
||||||
my %shas;
|
my %shas;
|
||||||
while ( my $file = shift() ) {
|
while ( my $file = shift() ) {
|
||||||
my $digest = eval { Digest::SHA->new(1)->addfile($file) }
|
my $digest = eval { $SHA_CLASS->new(1)->addfile($file) }
|
||||||
or die "Error calculating SHA1 for <$file>: $!\n";
|
or die "Error calculating SHA1 for <$file>: $!\n";
|
||||||
$shas{ basename($file) . ".sha1" } = $digest->hexdigest;
|
$shas{ basename($file) . ".sha1" } = $digest->hexdigest;
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<properties>
|
<properties>
|
||||||
<skip.integ.tests>true</skip.integ.tests>
|
<skip.unit.tests>true</skip.unit.tests>
|
||||||
<rpm.outputDirectory>${project.build.directory}/releases/</rpm.outputDirectory>
|
<rpm.outputDirectory>${project.build.directory}/releases/</rpm.outputDirectory>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@
|
||||||
<shadeTestJar>false</shadeTestJar>
|
<shadeTestJar>false</shadeTestJar>
|
||||||
<promoteTransitiveDependencies>true</promoteTransitiveDependencies>
|
<promoteTransitiveDependencies>true</promoteTransitiveDependencies>
|
||||||
<createDependencyReducedPom>true</createDependencyReducedPom>
|
<createDependencyReducedPom>true</createDependencyReducedPom>
|
||||||
<dependencyReducedPomLocation>${build.directory}/dependency-reduced-pom.xml</dependencyReducedPomLocation>
|
<dependencyReducedPomLocation>${project.build.directory}/dependency-reduced-pom.xml</dependencyReducedPomLocation>
|
||||||
<artifactSet>
|
<artifactSet>
|
||||||
<excludes>
|
<excludes>
|
||||||
<exclude>org.apache.lucene:*</exclude>
|
<exclude>org.apache.lucene:*</exclude>
|
||||||
|
|
|
@ -85,10 +85,12 @@ supported scripting languages:
|
||||||
To increase security, Elasticsearch does not allow you to specify scripts for
|
To increase security, Elasticsearch does not allow you to specify scripts for
|
||||||
non-sandboxed languages with a request. Instead, scripts must be placed in the
|
non-sandboxed languages with a request. Instead, scripts must be placed in the
|
||||||
`scripts` directory inside the configuration directory (the directory where
|
`scripts` directory inside the configuration directory (the directory where
|
||||||
elasticsearch.yml is). Scripts placed into this directory will automatically be
|
elasticsearch.yml is). The default location of this `scripts` directory can be
|
||||||
picked up and be available to be used. Once a script has been placed in this
|
changed by setting `path.scripts` in elasticsearch.yml. Scripts placed into
|
||||||
directory, it can be referenced by name. For example, a script called
|
this directory will automatically be picked up and be available to be used.
|
||||||
`calculate-score.groovy` can be referenced in a request like this:
|
Once a script has been placed in this directory, it can be referenced by name.
|
||||||
|
For example, a script called `calculate-score.groovy` can be referenced in a
|
||||||
|
request like this:
|
||||||
|
|
||||||
[source,sh]
|
[source,sh]
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
|
|
@ -309,6 +309,7 @@
|
||||||
<goal>run</goal>
|
<goal>run</goal>
|
||||||
</goals>
|
</goals>
|
||||||
<configuration>
|
<configuration>
|
||||||
|
<skip>${skip.integ.tests}</skip>
|
||||||
<target>
|
<target>
|
||||||
<ant antfile="${elasticsearch.integ.antfile}" target="start-external-cluster-with-plugin"/>
|
<ant antfile="${elasticsearch.integ.antfile}" target="start-external-cluster-with-plugin"/>
|
||||||
</target>
|
</target>
|
||||||
|
@ -322,6 +323,7 @@
|
||||||
<goal>run</goal>
|
<goal>run</goal>
|
||||||
</goals>
|
</goals>
|
||||||
<configuration>
|
<configuration>
|
||||||
|
<skip>${skip.integ.tests}</skip>
|
||||||
<target>
|
<target>
|
||||||
<ant antfile="${elasticsearch.integ.antfile}" target="stop-external-cluster"/>
|
<ant antfile="${elasticsearch.integ.antfile}" target="stop-external-cluster"/>
|
||||||
</target>
|
</target>
|
||||||
|
|
1
pom.xml
1
pom.xml
|
@ -1463,5 +1463,6 @@ org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UT
|
||||||
<module>core</module>
|
<module>core</module>
|
||||||
<module>distribution</module>
|
<module>distribution</module>
|
||||||
<module>plugins</module>
|
<module>plugins</module>
|
||||||
|
<module>qa</module>
|
||||||
</modules>
|
</modules>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
@ -0,0 +1,320 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<groupId>org.elasticsearch.qa</groupId>
|
||||||
|
<artifactId>elasticsearch-qa</artifactId>
|
||||||
|
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||||
|
<packaging>pom</packaging>
|
||||||
|
<name>QA: Parent POM</name>
|
||||||
|
<inceptionYear>2015</inceptionYear>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.elasticsearch</groupId>
|
||||||
|
<artifactId>elasticsearch-parent</artifactId>
|
||||||
|
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<properties>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<!-- elasticsearch and its test framework -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.hamcrest</groupId>
|
||||||
|
<artifactId>hamcrest-all</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-test-framework</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.elasticsearch</groupId>
|
||||||
|
<artifactId>elasticsearch</artifactId>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- Provided dependencies by elasticsearch itself -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.elasticsearch</groupId>
|
||||||
|
<artifactId>elasticsearch</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-core</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-backward-codecs</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-analyzers-common</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-queries</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-memory</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-highlighter</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-queryparser</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-suggest</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-join</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-spatial</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.lucene</groupId>
|
||||||
|
<artifactId>lucene-expressions</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.spatial4j</groupId>
|
||||||
|
<artifactId>spatial4j</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.vividsolutions</groupId>
|
||||||
|
<artifactId>jts</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.github.spullara.mustache.java</groupId>
|
||||||
|
<artifactId>compiler</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.guava</groupId>
|
||||||
|
<artifactId>guava</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.carrotsearch</groupId>
|
||||||
|
<artifactId>hppc</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>joda-time</groupId>
|
||||||
|
<artifactId>joda-time</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.joda</groupId>
|
||||||
|
<artifactId>joda-convert</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-core</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.dataformat</groupId>
|
||||||
|
<artifactId>jackson-dataformat-smile</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.dataformat</groupId>
|
||||||
|
<artifactId>jackson-dataformat-yaml</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.dataformat</groupId>
|
||||||
|
<artifactId>jackson-dataformat-cbor</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.ning</groupId>
|
||||||
|
<artifactId>compress-lzf</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.tdunning</groupId>
|
||||||
|
<artifactId>t-digest</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.commons</groupId>
|
||||||
|
<artifactId>commons-lang3</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>commons-cli</groupId>
|
||||||
|
<artifactId>commons-cli</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.codehaus.groovy</groupId>
|
||||||
|
<artifactId>groovy-all</artifactId>
|
||||||
|
<classifier>indy</classifier>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>apache-log4j-extras</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-api</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>net.java.dev.jna</groupId>
|
||||||
|
<artifactId>jna</artifactId>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<!-- Required by the REST test framework -->
|
||||||
|
<!-- TODO: remove this dependency when we will have a REST Test module -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.httpcomponents</groupId>
|
||||||
|
<artifactId>httpclient</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<!-- typical layout -->
|
||||||
|
<build>
|
||||||
|
<resources>
|
||||||
|
<resource>
|
||||||
|
<directory>src/main/resources</directory>
|
||||||
|
<filtering>true</filtering>
|
||||||
|
<includes>
|
||||||
|
<include>**/*.properties</include>
|
||||||
|
</includes>
|
||||||
|
</resource>
|
||||||
|
</resources>
|
||||||
|
|
||||||
|
<testResources>
|
||||||
|
<testResource>
|
||||||
|
<directory>src/test/java</directory>
|
||||||
|
<includes>
|
||||||
|
<include>**/*.json</include>
|
||||||
|
<include>**/*.txt</include>
|
||||||
|
</includes>
|
||||||
|
</testResource>
|
||||||
|
<testResource>
|
||||||
|
<directory>src/test/resources</directory>
|
||||||
|
<excludes>
|
||||||
|
<exclude>elasticsearch.yml</exclude>
|
||||||
|
<exclude>**/*.properties</exclude>
|
||||||
|
</excludes>
|
||||||
|
</testResource>
|
||||||
|
<testResource>
|
||||||
|
<directory>src/test/resources</directory>
|
||||||
|
<filtering>true</filtering>
|
||||||
|
<includes>
|
||||||
|
<include>elasticsearch.yml</include>
|
||||||
|
<include>**/*.properties</include>
|
||||||
|
</includes>
|
||||||
|
</testResource>
|
||||||
|
<!-- REST API specification and test suites -->
|
||||||
|
<testResource>
|
||||||
|
<directory>${project.basedir}/rest-api-spec</directory>
|
||||||
|
<filtering>true</filtering>
|
||||||
|
<targetPath>rest-api-spec</targetPath>
|
||||||
|
<includes>
|
||||||
|
<include>api/*.json</include>
|
||||||
|
<include>test/**/*.yaml</include>
|
||||||
|
</includes>
|
||||||
|
</testResource>
|
||||||
|
<!-- REST API specifications copied from main Elasticsearch specs
|
||||||
|
because they are required to execute the REST tests in here -->
|
||||||
|
<testResource>
|
||||||
|
<directory>${elasticsearch.tools.directory}/rest-api-spec</directory>
|
||||||
|
<targetPath>rest-api-spec</targetPath>
|
||||||
|
<includes>
|
||||||
|
<!-- required by the test framework -->
|
||||||
|
<include>api/info.json</include>
|
||||||
|
<include>api/cluster.health.json</include>
|
||||||
|
<include>api/cluster.state.json</include>
|
||||||
|
<!-- used in plugin REST tests -->
|
||||||
|
<include>api/index.json</include>
|
||||||
|
<include>api/get.json</include>
|
||||||
|
<include>api/update.json</include>
|
||||||
|
<include>api/search.json</include>
|
||||||
|
<include>api/indices.analyze.json</include>
|
||||||
|
<include>api/indices.create.json</include>
|
||||||
|
<include>api/indices.refresh.json</include>
|
||||||
|
<include>api/nodes.info.json</include>
|
||||||
|
<include>api/count.json</include>
|
||||||
|
</includes>
|
||||||
|
</testResource>
|
||||||
|
<!-- shared test resources like log4j.properties -->
|
||||||
|
<testResource>
|
||||||
|
<directory>${elasticsearch.tools.directory}/shared-test-resources</directory>
|
||||||
|
<filtering>false</filtering>
|
||||||
|
</testResource>
|
||||||
|
</testResources>
|
||||||
|
|
||||||
|
<pluginManagement>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>com.carrotsearch.randomizedtesting</groupId>
|
||||||
|
<artifactId>junit4-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>integ-tests</id>
|
||||||
|
<configuration>
|
||||||
|
<!-- currently only 1 cpu works, because integ tests don't make "unique" test directories? -->
|
||||||
|
<parallelism>1</parallelism>
|
||||||
|
<systemProperties>
|
||||||
|
<!-- use external cluster -->
|
||||||
|
<tests.cluster>127.0.0.1:${integ.transport.port}</tests.cluster>
|
||||||
|
</systemProperties>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</pluginManagement>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
<modules>
|
||||||
|
<module>smoke-test-plugins</module>
|
||||||
|
</modules>
|
||||||
|
</project>
|
|
@ -0,0 +1,42 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<project name="smoke-test-plugins"
|
||||||
|
xmlns:ac="antlib:net.sf.antcontrib">
|
||||||
|
|
||||||
|
<import file="${elasticsearch.integ.antfile.default}"/>
|
||||||
|
|
||||||
|
<macrodef name="convert-plugin-name">
|
||||||
|
<attribute name="file"/>
|
||||||
|
<attribute name="outputproperty"/>
|
||||||
|
<sequential>
|
||||||
|
<local name="file.base"/>
|
||||||
|
<basename file="@{file}" property="file.base"/>
|
||||||
|
<filter-property src="file.base" dest="@{outputproperty}">
|
||||||
|
<chain>
|
||||||
|
<replaceregex pattern="^elasticsearch-" replace=""/>
|
||||||
|
<replacestring from="-${elasticsearch.version}.zip" to=""/>
|
||||||
|
</chain>
|
||||||
|
</filter-property>
|
||||||
|
</sequential>
|
||||||
|
</macrodef>
|
||||||
|
|
||||||
|
<target name="start-external-cluster-with-plugins" depends="setup-workspace" unless="${shouldskip}">
|
||||||
|
<fail message="Expected ${expected.plugin.count} dependencies, are plugins missing from this pom.xml?">
|
||||||
|
<condition>
|
||||||
|
<resourcecount count="${expected.plugin.count}" when="ne">
|
||||||
|
<fileset dir="${integ.deps}/plugins"/>
|
||||||
|
</resourcecount>
|
||||||
|
</condition>
|
||||||
|
</fail>
|
||||||
|
<ac:for param="file">
|
||||||
|
<path>
|
||||||
|
<fileset dir="${integ.deps}/plugins"/>
|
||||||
|
</path>
|
||||||
|
<sequential>
|
||||||
|
<local name="plugin.name"/>
|
||||||
|
<convert-plugin-name file="@{file}" outputproperty="plugin.name"/>
|
||||||
|
<install-plugin name="${plugin.name}" file="@{file}"/>
|
||||||
|
</sequential>
|
||||||
|
</ac:for>
|
||||||
|
<startup-elasticsearch/>
|
||||||
|
</target>
|
||||||
|
</project>
|
|
@ -0,0 +1,240 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>org.elasticsearch.qa</groupId>
|
||||||
|
<artifactId>elasticsearch-qa</artifactId>
|
||||||
|
<version>2.0.0-beta1-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
This test unzips elasticsearch, installs each plugin,
|
||||||
|
starts elasticsearch, verifies loaded plugin count.
|
||||||
|
|
||||||
|
"expected plugin count" is computed from plugins/,
|
||||||
|
currently any folder having a pom.xml file.
|
||||||
|
|
||||||
|
our yaml file uses property filtering to populate it.
|
||||||
|
-->
|
||||||
|
|
||||||
|
<artifactId>smoke-test-plugins</artifactId>
|
||||||
|
<name>QA: Smoke Test Plugins</name>
|
||||||
|
<description>Loads up all of our plugins</description>
|
||||||
|
|
||||||
|
<properties>
|
||||||
|
<skip.unit.tests>true</skip.unit.tests>
|
||||||
|
<elasticsearch.integ.antfile>${project.basedir}/integration-tests.xml</elasticsearch.integ.antfile>
|
||||||
|
<tests.rest.suite>smoke_test_plugins</tests.rest.suite>
|
||||||
|
<tests.rest.load_packaged>false</tests.rest.load_packaged>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-dependency-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>integ-setup-dependencies</id>
|
||||||
|
<phase>pre-integration-test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>copy</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<skip>${skip.integ.tests}</skip>
|
||||||
|
<useBaseVersion>true</useBaseVersion>
|
||||||
|
<outputDirectory>${integ.deps}/plugins</outputDirectory>
|
||||||
|
|
||||||
|
<artifactItems>
|
||||||
|
<!-- elasticsearch distribution -->
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.distribution.zip</groupId>
|
||||||
|
<artifactId>elasticsearch</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
<outputDirectory>${integ.deps}</outputDirectory>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<!-- plugins -->
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-analysis-kuromoji</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-analysis-smartcn</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-analysis-stempel</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-analysis-phonetic</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-analysis-icu</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-cloud-gce</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-cloud-azure</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-cloud-aws</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-site-example</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-lang-python</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-lang-javascript</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
|
||||||
|
<artifactItem>
|
||||||
|
<groupId>org.elasticsearch.plugin</groupId>
|
||||||
|
<artifactId>elasticsearch-delete-by-query</artifactId>
|
||||||
|
<version>${elasticsearch.version}</version>
|
||||||
|
<type>zip</type>
|
||||||
|
<overWrite>true</overWrite>
|
||||||
|
</artifactItem>
|
||||||
|
</artifactItems>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<!-- integration tests -->
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-antrun-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>count-expected-plugins</id>
|
||||||
|
<phase>validate</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<property name="plugins.dir" location="${project.basedir}/../../plugins"/>
|
||||||
|
<resourcecount property="expected.plugin.count">
|
||||||
|
<fileset dir="${plugins.dir}" includes="*/pom.xml"/>
|
||||||
|
</resourcecount>
|
||||||
|
<echo>Found ${expected.plugin.count} plugins in ${plugins.dir}</echo>
|
||||||
|
</target>
|
||||||
|
<exportAntProperties>true</exportAntProperties>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
<!-- start up external cluster -->
|
||||||
|
<execution>
|
||||||
|
<id>integ-setup</id>
|
||||||
|
<phase>pre-integration-test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<ant antfile="${elasticsearch.integ.antfile}" target="start-external-cluster-with-plugins">
|
||||||
|
<property name="plugins.dir" value="${plugins.dir}"/>
|
||||||
|
<property name="expected.plugin.count" value="${expected.plugin.count}"/>
|
||||||
|
</ant>
|
||||||
|
</target>
|
||||||
|
<skip>${skip.integ.tests}</skip>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
<!-- shut down external cluster -->
|
||||||
|
<execution>
|
||||||
|
<id>integ-teardown</id>
|
||||||
|
<phase>post-integration-test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<ant antfile="${elasticsearch.integ.antfile}" target="stop-external-cluster"/>
|
||||||
|
</target>
|
||||||
|
<skip>${skip.integ.tests}</skip>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>ant-contrib</groupId>
|
||||||
|
<artifactId>ant-contrib</artifactId>
|
||||||
|
<version>1.0b3</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>ant</groupId>
|
||||||
|
<artifactId>ant</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.ant</groupId>
|
||||||
|
<artifactId>ant-nodeps</artifactId>
|
||||||
|
<version>1.8.1</version>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
|
@ -0,0 +1,13 @@
|
||||||
|
# Integration tests for smoke testing plugins
|
||||||
|
#
|
||||||
|
"Correct Plugin Count":
|
||||||
|
- do:
|
||||||
|
cluster.state: {}
|
||||||
|
|
||||||
|
# Get master node id
|
||||||
|
- set: { master_node: master }
|
||||||
|
|
||||||
|
- do:
|
||||||
|
nodes.info: {}
|
||||||
|
|
||||||
|
- length: { nodes.$master.plugins: ${expected.plugin.count} }
|
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.smoketest;
|
||||||
|
|
||||||
|
import com.carrotsearch.randomizedtesting.annotations.Name;
|
||||||
|
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
|
||||||
|
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||||
|
import org.elasticsearch.test.rest.RestTestCandidate;
|
||||||
|
import org.elasticsearch.test.rest.parser.RestTestParseException;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class SmokeTestPluginsIT extends ESRestTestCase {
|
||||||
|
|
||||||
|
public SmokeTestPluginsIT(@Name("yaml") RestTestCandidate testCandidate) {
|
||||||
|
super(testCandidate);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ParametersFactory
|
||||||
|
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
|
||||||
|
return ESRestTestCase.createParameters(0, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue