Merge branch 'master' into explicit_casts

This commit is contained in:
Robert Muir 2016-06-20 13:39:13 -04:00
commit fea120b073
175 changed files with 1470 additions and 1150 deletions

View File

@ -84,7 +84,7 @@ dependencies {
compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
compile 'de.thetaphi:forbiddenapis:2.1'
compile 'de.thetaphi:forbiddenapis:2.2'
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
compile 'org.apache.rat:apache-rat:0.11'
}

View File

@ -127,7 +127,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
current.println(setup)
}
body(test)
body(test, false)
}
private void response(Snippet response) {
@ -136,7 +136,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
}
void emitDo(String method, String pathAndQuery,
String body, String catchPart) {
String body, String catchPart, boolean inSetup) {
def (String path, String query) = pathAndQuery.tokenize('?')
current.println(" - do:")
if (catchPart != null) {
@ -160,6 +160,19 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
current.println(" body: |")
body.eachLine { current.println(" $it") }
}
/* Catch any shard failures. These only cause a non-200 response if
* no shard succeeds. But we need to fail the tests on all of these
* because they mean invalid syntax or broken queries or something
* else that we don't want to teach people to do. The REST test
* framework doesn't allow us to has assertions in the setup
* section so we have to skip it there. We also have to skip _cat
* actions because they don't return json so we can't is_false
* them. That is ok because they don't have this
* partial-success-is-success thing.
*/
if (false == inSetup && false == path.startsWith('_cat')) {
current.println(" - is_false: _shards.failures")
}
}
private void setup(Snippet setup) {
@ -169,7 +182,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
setupCurrent(setup)
current.println('---')
current.println("setup:")
body(setup)
body(setup, true)
// always wait for yellow before anything is executed
current.println(
" - do:\n" +
@ -179,7 +192,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
" wait_for_status: \"yellow\"")
}
private void body(Snippet snippet) {
private void body(Snippet snippet, boolean inSetup) {
parse("$snippet", snippet.contents, SYNTAX) { matcher, last ->
if (matcher.group("comment") != null) {
// Comment
@ -193,7 +206,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
// Leading '/'s break the generated paths
pathAndQuery = pathAndQuery.substring(1)
}
emitDo(method, pathAndQuery, body, catchPart)
emitDo(method, pathAndQuery, body, catchPart, inSetup)
}
}

View File

@ -712,7 +712,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]cluster[/\\]stats[/\\]ClusterStatsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]TransportAnalyzeActionTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]cache[/\\]clear[/\\]ClearIndicesCacheBlocksIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]create[/\\]CreateIndexIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]flush[/\\]SyncedFlushUnitTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]get[/\\]GetIndexIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]admin[/\\]indices[/\\]shards[/\\]IndicesShardStoreRequestIT.java" checks="LineLength" />
@ -859,7 +858,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ZenUnicastDiscoveryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]NodeJoinControllerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscoveryUnitTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ping[/\\]unicast[/\\]UnicastZenPingIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateActionTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]document[/\\]DocumentActionsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]EnvironmentTests.java" checks="LineLength" />
@ -1023,7 +1021,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]state[/\\]CloseIndexDisableCloseAllIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]state[/\\]OpenCloseIndexIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]state[/\\]RareClusterStateIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]state[/\\]SimpleIndexStateIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]stats[/\\]IndexStatsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStoreIntegrationIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]indices[/\\]store[/\\]IndicesStoreTests.java" checks="LineLength" />

View File

@ -32,7 +32,7 @@ org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
@defaultMessage Soon to be removed
org.apache.lucene.document.FieldType#numericType()
@defaultMessage Don't use MethodHandles in slow ways, dont be lenient in tests.
# unfortunately, invoke() cannot be banned, because forbidden apis does not support signature polymorphic methods
@defaultMessage Don't use MethodHandles in slow ways, don't be lenient in tests.
java.lang.invoke.MethodHandle#invoke(java.lang.Object[])
java.lang.invoke.MethodHandle#invokeWithArguments(java.lang.Object[])
java.lang.invoke.MethodHandle#invokeWithArguments(java.util.List)

View File

@ -22,7 +22,6 @@ package org.elasticsearch;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
@ -329,18 +328,4 @@ public class Version {
public boolean isRC() {
return build > 50 && build < 99;
}
public static class Module extends AbstractModule {
private final Version version;
public Module(Version version) {
this.version = version;
}
@Override
protected void configure() {
bind(Version.class).toInstance(version);
}
}
}

View File

@ -37,15 +37,13 @@ import org.elasticsearch.transport.TransportService;
public class TransportMainAction extends HandledTransportAction<MainRequest, MainResponse> {
private final ClusterService clusterService;
private final Version version;
@Inject
public TransportMainAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
ClusterService clusterService, Version version) {
ClusterService clusterService) {
super(settings, MainAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MainRequest::new);
this.clusterService = clusterService;
this.version = version;
}
@Override
@ -54,6 +52,7 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
assert Node.NODE_NAME_SETTING.exists(settings);
final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false;
listener.onResponse(
new MainResponse(Node.NODE_NAME_SETTING.get(settings), version, clusterState.getClusterName(), Build.CURRENT, available));
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), Build.CURRENT,
available));
}
}

View File

@ -24,6 +24,8 @@ import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.support.AbstractClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
/**
@ -42,7 +44,15 @@ public abstract class FilterClient extends AbstractClient {
* @see #in()
*/
public FilterClient(Client in) {
super(in.settings(), in.threadPool());
this(in.settings(), in.threadPool(), in);
}
/**
* A Constructor that allows to pass settings and threadpool separately. This is useful if the
* client is a proxy and not yet fully constructed ie. both dependencies are not available yet.
*/
protected FilterClient(Settings settings, ThreadPool threadPool, Client in) {
super(settings, threadPool);
this.in = in;
}

View File

@ -1,34 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.node;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.inject.AbstractModule;
/**
*
*/
public class NodeClientModule extends AbstractModule {
@Override
protected void configure() {
bind(Client.class).to(NodeClient.class).asEagerSingleton();
}
}

View File

@ -19,7 +19,7 @@
package org.elasticsearch.client.transport;
import org.elasticsearch.Version;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionModule;
@ -42,11 +42,9 @@ import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.node.Node;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.threadpool.ExecutorBuilder;
@ -54,6 +52,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransport;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
@ -120,21 +119,17 @@ public class TransportClient extends AbstractClient {
public TransportClient build() {
final PluginsService pluginsService = newPluginService(providedSettings);
final Settings settings = pluginsService.updatedSettings();
Version version = Version.CURRENT;
final List<Closeable> resourcesToClose = new ArrayList<>();
final ThreadPool threadPool = new ThreadPool(settings);
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
final NetworkService networkService = new NetworkService(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
boolean success = false;
try {
ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new PluginsModule(pluginsService));
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
modules.add(new SearchModule(settings, namedWriteableRegistry) {
@ -156,21 +151,25 @@ public class TransportClient extends AbstractClient {
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
modules.add((b -> b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService)));
modules.add((b -> {
b.bind(BigArrays.class).toInstance(bigArrays);
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
}));
Injector injector = modules.createInjector();
final TransportService transportService = injector.getInstance(TransportService.class);
transportService.start();
transportService.acceptIncomingRequests();
TransportClient transportClient = new TransportClient(injector);
success = true;
resourcesToClose.clear();
return transportClient;
} finally {
if (!success) {
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
IOUtils.closeWhileHandlingException(resourcesToClose);
}
}
}
@ -265,24 +264,16 @@ public class TransportClient extends AbstractClient {
*/
@Override
public void close() {
injector.getInstance(TransportClientNodesService.class).close();
injector.getInstance(TransportService.class).close();
try {
injector.getInstance(MonitorService.class).close();
} catch (Exception e) {
// ignore, might not be bounded
}
List<Closeable> closeables = new ArrayList<>();
closeables.add(injector.getInstance(TransportClientNodesService.class));
closeables.add(injector.getInstance(TransportService.class));
for (Class<? extends LifecycleComponent> plugin : injector.getInstance(PluginsService.class).nodeServices()) {
injector.getInstance(plugin).close();
closeables.add(injector.getInstance(plugin));
}
try {
ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS);
} catch (Exception e) {
// ignore
}
injector.getInstance(BigArrays.class).close();
closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS));
closeables.add(injector.getInstance(BigArrays.class));
IOUtils.closeWhileHandlingException(closeables);
}
@Override

View File

@ -48,6 +48,7 @@ import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
@ -66,7 +67,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
/**
*
*/
public class TransportClientNodesService extends AbstractComponent {
public class TransportClientNodesService extends AbstractComponent implements Closeable {
private final TimeValue nodesSamplerInterval;
@ -112,12 +113,12 @@ public class TransportClientNodesService extends AbstractComponent {
@Inject
public TransportClientNodesService(Settings settings,TransportService transportService,
ThreadPool threadPool, Version version) {
ThreadPool threadPool) {
super(settings);
this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings);
this.transportService = transportService;
this.threadPool = threadPool;
this.minCompatibilityVersion = version.minimumCompatibilityVersion();
this.minCompatibilityVersion = Version.CURRENT.minimumCompatibilityVersion();
this.nodesSamplerInterval = CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL.get(this.settings);
this.pingTimeout = CLIENT_TRANSPORT_PING_TIMEOUT.get(this.settings).millis();

View File

@ -103,7 +103,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
private final ClusterService clusterService;
private final IndicesService indicesService;
private final AllocationService allocationService;
private final Version version;
private final AliasValidator aliasValidator;
private final IndexTemplateFilter indexTemplateFilter;
private final Environment env;
@ -114,13 +113,12 @@ public class MetaDataCreateIndexService extends AbstractComponent {
@Inject
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
IndicesService indicesService, AllocationService allocationService,
Version version, AliasValidator aliasValidator,
AliasValidator aliasValidator,
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
this.allocationService = allocationService;
this.version = version;
this.aliasValidator = aliasValidator;
this.env = env;
this.nodeServicesProvider = nodeServicesProvider;
@ -287,7 +285,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
DiscoveryNodes nodes = currentState.nodes();
final Version createdVersion = Version.smallest(version, nodes.getSmallestNonClientNodeVersion());
final Version createdVersion = Version.smallest(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
}

View File

@ -46,12 +46,10 @@ public class DiscoveryNodeService extends AbstractComponent {
// don't use node.id.seed so it won't be seen as an attribute
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
private final Version version;
@Inject
public DiscoveryNodeService(Settings settings, Version version) {
public DiscoveryNodeService(Settings settings) {
super(settings);
this.version = version;
}
public static String generateNodeId(Settings settings) {
@ -93,7 +91,7 @@ public class DiscoveryNodeService extends AbstractComponent {
}
}
return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes,
roles, version);
roles, Version.CURRENT);
}
public interface CustomAttributesProvider {

View File

@ -237,7 +237,11 @@ public class ClusterService extends AbstractLifecycleComponent<ClusterService> {
* The local node.
*/
public DiscoveryNode localNode() {
return clusterState.getNodes().getLocalNode();
DiscoveryNode localNode = clusterState.getNodes().getLocalNode();
if (localNode == null) {
throw new IllegalStateException("No local node found. Is the node started?");
}
return localNode;
}
public OperationRouting operationRouting() {

View File

@ -263,6 +263,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
List<String> keys = scoredKeys.stream().map((a) -> a.v2()).collect(Collectors.toList());
if (keys.isEmpty() == false) {
msg += " did you mean " + (keys.size() == 1 ? "[" + keys.get(0) + "]": "any of " + keys.toString()) + "?";
} else {
msg += " please check that any required plugins are installed, or check the breaking changes documentation for removed " +
"settings";
}
throw new IllegalArgumentException(msg);
}

View File

@ -134,6 +134,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING,
BitsetFilterCache.INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING,
IndexModule.INDEX_STORE_TYPE_SETTING,
IndexModule.INDEX_STORE_PRE_LOAD_SETTING,
IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING,
IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING,
PrimaryShardAllocator.INDEX_RECOVERY_INITIAL_SHARDS_SETTING,

View File

@ -373,12 +373,11 @@ public class BigArrays implements Releasable {
final boolean checkBreaker;
private final BigArrays circuitBreakingInstance;
@Inject
public BigArrays(Settings settings, @Nullable final CircuitBreakerService breakerService) {
// Checking the breaker is disabled if not specified
this(new PageCacheRecycler(settings), breakerService, false);
}
// public for tests
public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService, boolean checkBreaker) {
this.checkBreaker = checkBreaker;
this.recycler = recycler;

View File

@ -53,9 +53,9 @@ public class ElectMasterService extends AbstractComponent {
private volatile int minimumMasterNodes;
@Inject
public ElectMasterService(Settings settings, Version version) {
public ElectMasterService(Settings settings) {
super(settings);
this.minMasterVersion = version.minimumCompatibilityVersion();
this.minMasterVersion = Version.CURRENT.minimumCompatibilityVersion();
this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
}

View File

@ -132,8 +132,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
@Inject
public UnicastZenPing(Settings settings, ThreadPool threadPool, TransportService transportService,
Version version, ElectMasterService electMasterService,
@Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
ElectMasterService electMasterService, @Nullable Set<UnicastHostsProvider> unicastHostsProviders) {
super(settings);
this.threadPool = threadPool;
this.transportService = transportService;
@ -166,7 +165,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
TransportAddress[] addresses = transportService.addressesFromString(host, limitPortCounts);
for (TransportAddress address : addresses) {
configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#",
address, emptyMap(), emptySet(), version.minimumCompatibilityVersion()));
address, emptyMap(), emptySet(), getVersion().minimumCompatibilityVersion()));
}
} catch (Exception e) {
throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e);
@ -586,4 +585,8 @@ public class UnicastZenPing extends AbstractLifecycleComponent<ZenPing> implemen
}
}
}
protected Version getVersion() {
return Version.CURRENT; // for tests
}
}

View File

@ -1,43 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.env;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.threadpool.ThreadPool;
/**
*
*/
public class EnvironmentModule extends AbstractModule {
private final Environment environment;
private final ThreadPool threadPool;
public EnvironmentModule(Environment environment, ThreadPool threadPool) {
this.threadPool = threadPool;
this.environment = environment;
}
@Override
protected void configure() {
bind(ThreadPool.class).toInstance(threadPool);
bind(Environment.class).toInstance(environment);
}
}

View File

@ -32,12 +32,9 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
@ -66,7 +63,6 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -167,7 +163,6 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
public static final String NODE_LOCK_FILENAME = "node.lock";
public static final String UPGRADE_LOCK_FILENAME = "upgrade.lock";
@Inject
public NodeEnvironment(Settings settings, Environment environment) throws IOException {
super(settings);

View File

@ -1,48 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.env;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.AbstractModule;
/**
*
*/
public class NodeEnvironmentModule extends AbstractModule {
private final NodeEnvironment nodeEnvironment;
public NodeEnvironmentModule() {
this(null);
}
public NodeEnvironmentModule(@Nullable NodeEnvironment nodeEnvironment) {
this.nodeEnvironment = nodeEnvironment;
}
@Override
protected void configure() {
if (nodeEnvironment != null) {
bind(NodeEnvironment.class).toInstance(nodeEnvironment);
} else {
bind(NodeEnvironment.class).asEagerSingleton();
}
}
}

View File

@ -27,11 +27,6 @@ import org.elasticsearch.common.settings.Settings;
*/
public class GatewayModule extends AbstractModule {
private final Settings settings;
public GatewayModule(Settings settings) {
this.settings = settings;
}
@Override
protected void configure() {

View File

@ -45,6 +45,7 @@ import org.elasticsearch.indices.mapper.MapperRegistry;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@ -74,6 +75,14 @@ public final class IndexModule {
public static final Setting<String> INDEX_STORE_TYPE_SETTING =
new Setting<>("index.store.type", "", Function.identity(), Property.IndexScope, Property.NodeScope);
/** On which extensions to load data into the file-system cache upon opening of files.
* This only works with the mmap directory, and even in that case is still
* best-effort only. */
public static final Setting<List<String>> INDEX_STORE_PRE_LOAD_SETTING =
Setting.listSetting("index.store.preload", Collections.emptyList(), Function.identity(),
Property.IndexScope, Property.NodeScope);
public static final String SIMILARITY_SETTINGS_PREFIX = "index.similarity";
// whether to use the query cache

View File

@ -53,6 +53,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.Callback;
import org.elasticsearch.common.util.CancellableThreads;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.SuspendableRefContainer;
import org.elasticsearch.index.Index;
@ -131,8 +132,8 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.stream.Collectors;
public class IndexShard extends AbstractIndexShardComponent implements IndicesClusterStateService.Shard {
@ -158,6 +159,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private final TranslogConfig translogConfig;
private final IndexEventListener indexEventListener;
private final QueryCachingPolicy cachingPolicy;
private final CancellableThreads cancellableThreads;
/**
@ -265,6 +267,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
refreshListeners = buildRefreshListeners();
persistMetadata(shardRouting, null);
cancellableThreads = new CancellableThreads();
}
public Store store() {
@ -843,6 +846,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
} finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times
IOUtils.close(engine);
}
cancellableThreads.cancel(reason);
}
}
}
@ -1281,7 +1285,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private void checkIndex() throws IOException {
if (store.tryIncRef()) {
try {
doCheckIndex();
cancellableThreads.executeIO(this::doCheckIndex);
} catch (ClosedByInterruptException ex) {
assert cancellableThreads.isCancelled();
// that's fine we might run into this when we cancel the thread since Java NIO will close the channel on interrupt
// and on the next access we fail it.
} finally {
store.decRef();
}

View File

@ -105,8 +105,6 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
private RateLimiter restoreRateLimiter;
private RateLimiterListener rateLimiterListener;
private RateLimitingInputStream.Listener snapshotThrottleListener;
private RateLimitingInputStream.Listener restoreThrottleListener;
@ -163,7 +161,6 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
this.chunkSize = chunkSize;
this.snapshotRateLimiter = snapshotRateLimiter;
this.restoreRateLimiter = restoreRateLimiter;
this.rateLimiterListener = rateLimiterListener;
this.snapshotThrottleListener = nanos -> rateLimiterListener.onSnapshotPause(nanos);
this.restoreThrottleListener = nanos -> rateLimiterListener.onRestorePause(nanos);
this.compress = compress;

View File

@ -31,13 +31,11 @@ import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.SimpleFSLockFactory;
import org.apache.lucene.store.SleepingLockWrapper;
import org.apache.lucene.store.StoreRateLimiting;
import org.apache.lucene.util.Constants;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.ShardPath;
@ -45,7 +43,7 @@ import org.elasticsearch.index.shard.ShardPath;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
/**
@ -87,8 +85,12 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
@Override
public Directory newDirectory() throws IOException {
final Path location = path.resolveIndex();
final LockFactory lockFactory = indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING);
Files.createDirectories(location);
Directory wrapped = newFSDirectory(location, indexSettings.getValue(INDEX_LOCK_FACTOR_SETTING));
Directory wrapped = newFSDirectory(location, lockFactory);
Set<String> preLoadExtensions = new HashSet<>(
indexSettings.getValue(IndexModule.INDEX_STORE_PRE_LOAD_SETTING));
wrapped = setPreload(wrapped, location, lockFactory, preLoadExtensions);
if (IndexMetaData.isOnSharedFilesystem(indexSettings.getSettings())) {
wrapped = new SleepingLockWrapper(wrapped, 5000);
}
@ -100,25 +102,11 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
rateLimitingTimeInNanos.inc(nanos);
}
/*
* We are mmapping norms, docvalues as well as term dictionaries, all other files are served through NIOFS
* this provides good random access performance while not creating unnecessary mmaps for files like stored
* fields etc.
*/
private static final Set<String> PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("nvd", "dvd", "tim"));
protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException {
final String storeType = indexSettings.getSettings().get(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(),
IndexModule.Type.FS.getSettingsKey());
if (IndexModule.Type.FS.match(storeType) || isDefault(storeType)) {
final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults
if (open instanceof MMapDirectory
&& isDefault(storeType)
&& Constants.WINDOWS == false) {
return newDefaultDir(location, (MMapDirectory) open, lockFactory);
}
return open;
if (IndexModule.Type.FS.match(storeType) || IndexModule.Type.DEFAULT.match(storeType)) {
return FSDirectory.open(location, lockFactory); // use lucene defaults
} else if (IndexModule.Type.SIMPLEFS.match(storeType)) {
return new SimpleFSDirectory(location, lockFactory);
} else if (IndexModule.Type.NIOFS.match(storeType)) {
@ -129,17 +117,25 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim
throw new IllegalArgumentException("No directory found for type [" + storeType + "]");
}
private static boolean isDefault(String storeType) {
return IndexModule.Type.DEFAULT.match(storeType);
}
private Directory newDefaultDir(Path location, final MMapDirectory mmapDir, LockFactory lockFactory) throws IOException {
return new FileSwitchDirectory(PRIMARY_EXTENSIONS, mmapDir, new NIOFSDirectory(location, lockFactory), true) {
@Override
public String[] listAll() throws IOException {
// Avoid doing listAll twice:
return mmapDir.listAll();
private static Directory setPreload(Directory directory, Path location, LockFactory lockFactory,
Set<String> preLoadExtensions) throws IOException {
if (preLoadExtensions.isEmpty() == false
&& directory instanceof MMapDirectory
&& ((MMapDirectory) directory).getPreload() == false) {
if (preLoadExtensions.contains("*")) {
((MMapDirectory) directory).setPreload(true);
return directory;
}
};
MMapDirectory primary = new MMapDirectory(location, lockFactory);
primary.setPreload(true);
return new FileSwitchDirectory(preLoadExtensions, primary, directory, true) {
@Override
public String[] listAll() throws IOException {
// avoid listing twice
return primary.listAll();
}
};
}
return directory;
}
}

View File

@ -27,7 +27,7 @@ import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.node.NodeClientModule;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
@ -67,9 +67,7 @@ import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.NodeEnvironmentModule;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService;
@ -89,7 +87,6 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.repositories.RepositoriesModule;
@ -104,9 +101,7 @@ import org.elasticsearch.tasks.TaskPersistenceService;
import org.elasticsearch.threadpool.ExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.tribe.TribeModule;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherModule;
import org.elasticsearch.watcher.ResourceWatcherService;
import java.io.BufferedWriter;
@ -174,17 +169,17 @@ public class Node implements Closeable {
* @param preparedSettings Base settings to configure the node with
*/
public Node(Settings preparedSettings) {
this(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), Version.CURRENT, Collections.<Class<? extends Plugin>>emptyList());
this(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null), Collections.<Class<? extends Plugin>>emptyList());
}
protected Node(Environment tmpEnv, Version version, Collection<Class<? extends Plugin>> classpathPlugins) {
protected Node(Environment tmpEnv, Collection<Class<? extends Plugin>> classpathPlugins) {
Settings tmpSettings = Settings.builder().put(tmpEnv.settings())
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();
final List<Closeable> resourcesToClose = new ArrayList<>(); // register everything we need to release in the case of an error
tmpSettings = TribeService.processSettings(tmpSettings);
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(tmpSettings));
final String displayVersion = version + (Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "");
final String displayVersion = Version.CURRENT + (Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "");
final JvmInfo jvmInfo = JvmInfo.jvmInfo();
logger.info(
"version[{}], pid[{}], build[{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]",
@ -242,40 +237,50 @@ public class Node implements Closeable {
} catch (IOException ex) {
throw new IllegalStateException("Failed to created node environment", ex);
}
final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool);
resourcesToClose.add(resourceWatcherService);
final NetworkService networkService = new NetworkService(settings);
final ClusterService clusterService = new ClusterService(settings, settingsModule.getClusterSettings(), threadPool);
resourcesToClose.add(clusterService);
final TribeService tribeService = new TribeService(settings, clusterService);
resourcesToClose.add(tribeService);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
ModulesBuilder modules = new ModulesBuilder();
modules.add(new Version.Module(version));
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
final MonitorService monitorService = new MonitorService(settings, nodeEnvironment, threadPool);
modules.add(new PluginsModule(pluginsService));
modules.add(new EnvironmentModule(environment, threadPool));
modules.add(new NodeModule(this, monitorService));
modules.add(new NetworkModule(networkService, settings, false, namedWriteableRegistry));
modules.add(scriptModule);
modules.add(new NodeEnvironmentModule(nodeEnvironment));
modules.add(new DiscoveryModule(this.settings));
modules.add(new ClusterModule(this.settings, clusterService));
modules.add(new IndicesModule(namedWriteableRegistry));
modules.add(new SearchModule(settings, namedWriteableRegistry));
modules.add(new ActionModule(DiscoveryNode.isIngestNode(settings), false));
modules.add(new GatewayModule(settings));
modules.add(new NodeClientModule());
modules.add(new ResourceWatcherModule());
modules.add(new GatewayModule());
modules.add(new RepositoriesModule());
modules.add(new TribeModule());
modules.add(new AnalysisModule(environment));
pluginsService.processModules(modules);
CircuitBreakerService circuitBreakerService = createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
BigArrays bigArrays = createBigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
modules.add(b -> b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService));
modules.add(b -> {
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(Client.class).to(NodeClient.class).asEagerSingleton();
b.bind(Environment.class).toInstance(environment);
b.bind(ThreadPool.class).toInstance(threadPool);
b.bind(NodeEnvironment.class).toInstance(nodeEnvironment);
b.bind(TribeService.class).toInstance(tribeService);
b.bind(ResourceWatcherService.class).toInstance(resourceWatcherService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
b.bind(BigArrays.class).toInstance(bigArrays);
}
);
injector = modules.createInjector();
client = injector.getInstance(Client.class);
success = true;
@ -624,4 +629,12 @@ public class Node implements Closeable {
throw new IllegalArgumentException("Unknown circuit breaker type [" + type + "]");
}
}
/**
* Creates a new {@link BigArrays} instance used for this node.
* This method can be overwritten by subclasses to change their {@link BigArrays} implementation for instance for testing
*/
BigArrays createBigArrays(Settings settings, CircuitBreakerService circuitBreakerService) {
return new BigArrays(settings, circuitBreakerService);
}
}

View File

@ -37,9 +37,6 @@ public class NodeModule extends AbstractModule {
private final MonitorService monitorService;
private final ProcessorsRegistry.Builder processorsRegistryBuilder;
// pkg private so tests can mock
Class<? extends BigArrays> bigArraysImpl = BigArrays.class;
public NodeModule(Node node, MonitorService monitorService) {
this.node = node;
this.monitorService = monitorService;
@ -48,12 +45,6 @@ public class NodeModule extends AbstractModule {
@Override
protected void configure() {
if (bigArraysImpl == BigArrays.class) {
bind(BigArrays.class).asEagerSingleton();
} else {
bind(BigArrays.class).to(bigArraysImpl).asEagerSingleton();
}
bind(Node.class).toInstance(node);
bind(MonitorService.class).toInstance(monitorService);
bind(NodeService.class).asEagerSingleton();

View File

@ -70,14 +70,12 @@ public class NodeService extends AbstractComponent implements Closeable {
private volatile Map<String, String> serviceAttributes = emptyMap();
private final Version version;
private final Discovery discovery;
@Inject
public NodeService(Settings settings, ThreadPool threadPool, MonitorService monitorService,
Discovery discovery, TransportService transportService, IndicesService indicesService,
PluginsService pluginService, CircuitBreakerService circuitBreakerService, Version version,
PluginsService pluginService, CircuitBreakerService circuitBreakerService,
ProcessorsRegistry.Builder processorsRegistryBuilder, ClusterService clusterService, SettingsFilter settingsFilter) {
super(settings);
this.threadPool = threadPool;
@ -85,7 +83,6 @@ public class NodeService extends AbstractComponent implements Closeable {
this.transportService = transportService;
this.indicesService = indicesService;
this.discovery = discovery;
this.version = version;
this.pluginService = pluginService;
this.circuitBreakerService = circuitBreakerService;
this.clusterService = clusterService;
@ -126,7 +123,7 @@ public class NodeService extends AbstractComponent implements Closeable {
}
public NodeInfo info() {
return new NodeInfo(version, Build.CURRENT, discovery.localNode(), serviceAttributes,
return new NodeInfo(Version.CURRENT, Build.CURRENT, discovery.localNode(), serviceAttributes,
settings,
monitorService.osService().info(),
monitorService.processService().info(),
@ -141,7 +138,7 @@ public class NodeService extends AbstractComponent implements Closeable {
public NodeInfo info(boolean settings, boolean os, boolean process, boolean jvm, boolean threadPool,
boolean transport, boolean http, boolean plugin, boolean ingest) {
return new NodeInfo(version, Build.CURRENT, discovery.localNode(), serviceAttributes,
return new NodeInfo(Version.CURRENT, Build.CURRENT, discovery.localNode(), serviceAttributes,
settings ? settingsFilter.filter(this.settings) : null,
os ? monitorService.osService().info() : null,
process ? monitorService.processService().info() : null,

View File

@ -1,36 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import org.elasticsearch.common.inject.AbstractModule;
public class PluginsModule extends AbstractModule {
private final PluginsService pluginsService;
public PluginsModule(PluginsService pluginsService) {
this.pluginsService = pluginsService;
}
@Override
protected void configure() {
bind(PluginsService.class).toInstance(pluginsService);
}
}

View File

@ -39,9 +39,9 @@ public class CompiledScript {
public CompiledScript(ScriptService.ScriptType type, String name, String lang, Object compiled) {
this.type = type;
this.name = name;
this.lang = lang;
this.compiled = compiled;
}
this.lang = lang;
this.compiled = compiled;
}
/**
* Method to get the type of language.

View File

@ -90,11 +90,6 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri
public void close() {
}
@Override
public void scriptRemoved(CompiledScript script) {
// Nothing to do here
}
@Override
public boolean isInlineScriptEnabled() {
return true;

View File

@ -23,7 +23,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.search.lookup.SearchLookup;
import java.io.Closeable;
import java.util.List;
import java.util.Map;
/**
@ -49,13 +48,6 @@ public interface ScriptEngineService extends Closeable {
SearchScript search(CompiledScript compiledScript, SearchLookup lookup, @Nullable Map<String, Object> vars);
/**
* Handler method called when a script is removed from the Guava cache.
*
* The passed script may be null if it has already been garbage collected.
* */
void scriptRemoved(@Nullable CompiledScript script);
/**
* Returns <code>true</code> if this scripting engine can safely accept inline scripts by default. The default is <code>false</code>
*/

View File

@ -507,16 +507,10 @@ public class ScriptService extends AbstractComponent implements Closeable {
private class ScriptCacheRemovalListener implements RemovalListener<CacheKey, CompiledScript> {
@Override
public void onRemoval(RemovalNotification<CacheKey, CompiledScript> notification) {
scriptMetrics.onCacheEviction();
for (ScriptEngineService service : scriptEngines) {
try {
service.scriptRemoved(notification.getValue());
} catch (Exception e) {
logger.warn("exception calling script removal listener for script service", e);
// We don't rethrow because Guava would just catch the
// exception and log it, which we have already done
}
if (logger.isDebugEnabled()) {
logger.debug("removed {} from cache, reason: {}", notification.getValue(), notification.getRemovalReason());
}
scriptMetrics.onCacheEviction();
}
}

View File

@ -77,7 +77,6 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
public static final String LOCAL_TRANSPORT_THREAD_NAME_PREFIX = "local_transport";
final ThreadPool threadPool;
private final ThreadPoolExecutor workers;
private final Version version;
private volatile TransportServiceAdapter transportServiceAdapter;
private volatile BoundTransportAddress boundAddress;
private volatile LocalTransportAddress localAddress;
@ -92,11 +91,10 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
public static final String TRANSPORT_LOCAL_QUEUE = "transport.local.queue";
@Inject
public LocalTransport(Settings settings, ThreadPool threadPool, Version version,
public LocalTransport(Settings settings, ThreadPool threadPool,
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
super(settings);
this.threadPool = threadPool;
this.version = version;
int workerCount = this.settings.getAsInt(TRANSPORT_LOCAL_WORKERS, EsExecutors.boundedNumberOfProcessors(settings));
int queueSize = this.settings.getAsInt(TRANSPORT_LOCAL_QUEUE, -1);
logger.debug("creating [{}] workers, queue_size [{}]", workerCount, queueSize);
@ -207,7 +205,7 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
@Override
public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request,
TransportRequestOptions options) throws IOException, TransportException {
final Version version = Version.smallest(node.getVersion(), this.version);
final Version version = Version.smallest(node.getVersion(), getVersion());
try (BytesStreamOutput stream = new BytesStreamOutput()) {
stream.setVersion(version);
@ -404,4 +402,8 @@ public class LocalTransport extends AbstractLifecycleComponent<Transport> implem
public List<String> getLocalAddresses() {
return Collections.singletonList("0.0.0.0");
}
protected Version getVersion() { // for tests
return Version.CURRENT;
}
}

View File

@ -210,7 +210,6 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
intSetting("transport.netty.boss_count", 1, 1, Property.NodeScope);
protected final NetworkService networkService;
protected final Version version;
protected final boolean blockingClient;
protected final TimeValue connectTimeout;
@ -254,13 +253,12 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
final ScheduledPing scheduledPing;
@Inject
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, Version version,
public NettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
super(settings);
this.threadPool = threadPool;
this.networkService = networkService;
this.bigArrays = bigArrays;
this.version = version;
this.workerCount = WORKER_COUNT.get(settings);
this.blockingClient = TCP_BLOCKING_CLIENT.get(settings);
@ -894,7 +892,7 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
// we pick the smallest of the 2, to support both backward and forward compatibility
// note, this is the only place we need to do this, since from here on, we use the serialized version
// as the version to use also when the node receiving this request will send the response with
Version version = Version.smallest(this.version, node.getVersion());
Version version = Version.smallest(getCurrentVersion(), node.getVersion());
stream.setVersion(version);
threadPool.getThreadContext().writeTo(stream);
@ -1401,4 +1399,9 @@ public class NettyTransport extends AbstractLifecycleComponent<Transport> implem
}
}
}
protected Version getCurrentVersion() {
// this is just for tests to mock stuff like the nodes version - tests can override this internally
return Version.CURRENT;
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.tribe;
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.Node;
@ -32,6 +31,6 @@ import java.util.Collections;
*/
class TribeClientNode extends Node {
TribeClientNode(Settings settings) {
super(new Environment(settings), Version.CURRENT, Collections.<Class<? extends Plugin>>emptyList());
super(new Environment(settings), Collections.<Class<? extends Plugin>>emptyList());
}
}

View File

@ -1,32 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.tribe;
import org.elasticsearch.common.inject.AbstractModule;
/**
*/
public class TribeModule extends AbstractModule {
@Override
protected void configure() {
bind(TribeService.class).asEagerSingleton();
}
}

View File

@ -40,7 +40,6 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.regex.Regex;
@ -175,7 +174,6 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
private final List<Node> nodes = new CopyOnWriteArrayList<>();
@Inject
public TribeService(Settings settings, ClusterService clusterService) {
super(settings);
this.clusterService = clusterService;

View File

@ -1,31 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.watcher;
import org.elasticsearch.common.inject.AbstractModule;
/**
*
*/
public class ResourceWatcherModule extends AbstractModule {
@Override
protected void configure() {
bind(ResourceWatcherService.class).asEagerSingleton();
}
}

View File

@ -167,7 +167,7 @@ public abstract class TaskManagerTestCase extends ESTestCase {
public TestNode(String name, ThreadPool threadPool, Settings settings) {
clusterService = createClusterService(threadPool);
transportService = new TransportService(settings,
new LocalTransport(settings, threadPool, Version.CURRENT, new NamedWriteableRegistry(),
new LocalTransport(settings, threadPool, new NamedWriteableRegistry(),
new NoneCircuitBreakerService()), threadPool) {
@Override
protected TaskManager createTaskManager() {

View File

@ -70,7 +70,8 @@ public class CreateIndexIT extends ESIntegTestCase {
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 4L)).get();
fail();
} catch (IllegalArgumentException ex) {
assertEquals("unknown setting [index.creation_date]", ex.getMessage());
assertEquals("unknown setting [index.creation_date] please check that any required plugins are installed, or check the " +
"breaking changes documentation for removed settings", ex.getMessage());
}
}
@ -165,7 +166,8 @@ public class CreateIndexIT extends ESIntegTestCase {
.get();
fail("should have thrown an exception about the shard count");
} catch (IllegalArgumentException e) {
assertEquals("unknown setting [index.unknown.value]", e.getMessage());
assertEquals("unknown setting [index.unknown.value] please check that any required plugins are installed, or check the" +
" breaking changes documentation for removed settings", e.getMessage());
}
}
@ -211,13 +213,16 @@ public class CreateIndexIT extends ESIntegTestCase {
@Override
public void run() {
try {
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).get(); // recreate that index
// recreate that index
client().prepareIndex("test", "test").setSource("index_version", indexVersion.get()).get();
synchronized (indexVersionLock) {
// we sync here since we have to ensure that all indexing operations below for a given ID are done before we increment the
// index version otherwise a doc that is in-flight could make it into an index that it was supposed to be deleted for and our assertion fail...
// we sync here since we have to ensure that all indexing operations below for a given ID are done before
// we increment the index version otherwise a doc that is in-flight could make it into an index that it
// was supposed to be deleted for and our assertion fail...
indexVersion.incrementAndGet();
}
assertAcked(client().admin().indices().prepareDelete("test").get()); // from here on all docs with index_version == 0|1 must be gone!!!! only 2 are ok;
// from here on all docs with index_version == 0|1 must be gone!!!! only 2 are ok;
assertAcked(client().admin().indices().prepareDelete("test").get());
} finally {
latch.countDown();
}
@ -249,8 +254,10 @@ public class CreateIndexIT extends ESIntegTestCase {
latch.await();
refresh();
// we only really assert that we never reuse segments of old indices or anything like this here and that nothing fails with crazy exceptions
SearchResponse expected = client().prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(new RangeQueryBuilder("index_version").from(indexVersion.get(), true)).get();
// we only really assert that we never reuse segments of old indices or anything like this here and that nothing fails with
// crazy exceptions
SearchResponse expected = client().prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen())
.setQuery(new RangeQueryBuilder("index_version").from(indexVersion.get(), true)).get();
SearchResponse all = client().prepareSearch("test").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get();
assertEquals(expected + " vs. " + all, expected.getHits().getTotalHits(), all.getHits().getTotalHits());
logger.info("total: {}", expected.getHits().getTotalHits());
@ -283,7 +290,8 @@ public class CreateIndexIT extends ESIntegTestCase {
}
public void testRestartIndexCreationAfterFullClusterRestart() throws Exception {
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get();
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable",
"none")).get();
client().admin().indices().prepareCreate("test").setSettings(indexSettings()).get();
internalCluster().fullRestart();
ensureGreen("test");

View File

@ -158,7 +158,6 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
null,
null,
null,
Version.CURRENT,
null,
new HashSet<>(),
null,
@ -189,7 +188,6 @@ public class MetaDataIndexTemplateServiceTests extends ESSingleNodeTestCase {
clusterService,
indicesService,
null,
Version.CURRENT,
null,
new HashSet<>(),
null,

View File

@ -112,7 +112,7 @@ public class MainActionTests extends ESTestCase {
when(clusterService.state()).thenReturn(state);
TransportMainAction action = new TransportMainAction(settings, mock(ThreadPool.class), mock(TransportService.class),
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService, Version.CURRENT);
mock(ActionFilters.class), mock(IndexNameExpressionResolver.class), clusterService);
AtomicReference<MainResponse> responseRef = new AtomicReference<>();
action.doExecute(new MainRequest(), new ActionListener<MainResponse>() {
@Override

View File

@ -89,7 +89,7 @@ public class BroadcastReplicationTests extends ESTestCase {
@Before
public void setUp() throws Exception {
super.setUp();
LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, Version.CURRENT, new NamedWriteableRegistry(), circuitBreakerService);
LocalTransport transport = new LocalTransport(Settings.EMPTY, threadPool, new NamedWriteableRegistry(), circuitBreakerService);
clusterService = createClusterService(threadPool);
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
transportService.start();

View File

@ -102,7 +102,7 @@ public class TransportClientNodesServiceTests extends ESTestCase {
transportService.start();
transportService.acceptIncomingRequests();
transportClientNodesService =
new TransportClientNodesService(settings, transportService, threadPool, Version.CURRENT);
new TransportClientNodesService(settings, transportService, threadPool);
this.nodesCount = randomIntBetween(1, 10);
for (int i = 0; i < nodesCount; i++) {
transportClientNodesService.addTransportAddresses(new LocalTransportAddress("node" + i));

View File

@ -212,7 +212,6 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
null,
null,
null,
Version.CURRENT,
null,
new HashSet<>(),
null,

View File

@ -54,7 +54,7 @@ public class DiscoveryNodeServiceTests extends ESTestCase {
}
}
}
DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(builder.build(), Version.CURRENT);
DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(builder.build());
DiscoveryNode discoveryNode = discoveryNodeService.buildLocalNode(DummyTransportAddress.INSTANCE);
assertThat(discoveryNode.getRoles(), equalTo(selectedRoles));
assertThat(discoveryNode.getAttributes(), equalTo(expectedAttributes));
@ -68,7 +68,7 @@ public class DiscoveryNodeServiceTests extends ESTestCase {
builder.put("node.attr.attr" + i, "value" + i);
expectedAttributes.put("attr" + i, "value" + i);
}
DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(builder.build(), Version.CURRENT);
DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(builder.build());
int numCustomAttributes = randomIntBetween(0, 5);
Map<String, String> customAttributes = new HashMap<>();
for (int i = 0; i < numCustomAttributes; i++) {

View File

@ -58,7 +58,7 @@ public class NetworkModuleTests extends ModuleTestCase {
static class FakeTransport extends AssertingLocalTransport {
public FakeTransport() {
super(null, null, null, null, null);
super(null, null, null, null);
}
}

View File

@ -202,20 +202,22 @@ public class ScopedSettingsTests extends ESTestCase {
IndexScopedSettings settings = new IndexScopedSettings(
Settings.EMPTY,
IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
String unknownMsgSuffix = " please check that any required plugins are installed, or check the breaking changes documentation for" +
" removed settings";
settings.validate(Settings.builder().put("index.store.type", "boom"));
settings.validate(Settings.builder().put("index.store.type", "boom").build());
try {
settings.validate(Settings.builder().put("index.store.type", "boom", "i.am.not.a.setting", true));
fail();
} catch (IllegalArgumentException e) {
assertEquals("unknown setting [i.am.not.a.setting]", e.getMessage());
assertEquals("unknown setting [i.am.not.a.setting]" + unknownMsgSuffix, e.getMessage());
}
try {
settings.validate(Settings.builder().put("index.store.type", "boom", "i.am.not.a.setting", true).build());
fail();
} catch (IllegalArgumentException e) {
assertEquals("unknown setting [i.am.not.a.setting]", e.getMessage());
assertEquals("unknown setting [i.am.not.a.setting]" + unknownMsgSuffix, e.getMessage());
}
try {

View File

@ -51,7 +51,8 @@ public class SettingsModuleTests extends ModuleTestCase {
() -> new SettingsModule(settings));
assertEquals("Failed to parse value [[2.0]] for setting [cluster.routing.allocation.balance.shard]", ex.getMessage());
assertEquals(1, ex.getSuppressed().length);
assertEquals("unknown setting [some.foo.bar]", ex.getSuppressed()[0].getMessage());
assertEquals("unknown setting [some.foo.bar] please check that any required plugins are installed, or check the breaking " +
"changes documentation for removed settings", ex.getSuppressed()[0].getMessage());
}
{
@ -127,7 +128,8 @@ public class SettingsModuleTests extends ModuleTestCase {
new SettingsModule(settings);
fail();
} catch (IllegalArgumentException ex) {
assertEquals("tribe.blocks validation failed: unknown setting [wtf]", ex.getMessage());
assertEquals("tribe.blocks validation failed: unknown setting [wtf] please check that any required plugins are" +
" installed, or check the breaking changes documentation for removed settings", ex.getMessage());
}
}
}

View File

@ -33,8 +33,8 @@ public class DiscoveryModuleTests extends ModuleTestCase {
public static class DummyMasterElectionService extends ElectMasterService {
public DummyMasterElectionService(Settings settings, Version version) {
super(settings, version);
public DummyMasterElectionService(Settings settings) {
super(settings);
}
}

View File

@ -141,7 +141,12 @@ public class ZenFaultDetectionTests extends ESTestCase {
// trace zenfd actions but keep the default otherwise
.put(TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), singleton(TransportLivenessAction.NAME))
.build(),
new LocalTransport(settings, threadPool, version, namedWriteableRegistry, circuitBreakerService),
new LocalTransport(settings, threadPool, namedWriteableRegistry, circuitBreakerService) {
@Override
protected Version getVersion() {
return version;
}
},
threadPool);
transportService.start();
transportService.acceptIncomingRequests();

View File

@ -35,15 +35,12 @@ import java.util.Set;
public class ElectMasterServiceTests extends ESTestCase {
ElectMasterService electMasterService() {
return new ElectMasterService(Settings.EMPTY, Version.CURRENT);
return new ElectMasterService(Settings.EMPTY);
}
List<DiscoveryNode> generateRandomNodes() {
int count = scaledRandomIntBetween(1, 100);
ArrayList<DiscoveryNode> nodes = new ArrayList<>(count);
for (int i = 0; i < count; i++) {
Set<DiscoveryNode.Role> roles = new HashSet<>();
if (randomBoolean()) {

View File

@ -99,7 +99,7 @@ public class NodeJoinControllerTests extends ESTestCase {
setState(clusterService, ClusterState.builder(clusterService.state()).nodes(
DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.getId())));
nodeJoinController = new NodeJoinController(clusterService, new NoopAllocationService(Settings.EMPTY),
new ElectMasterService(Settings.EMPTY, Version.CURRENT),
new ElectMasterService(Settings.EMPTY),
new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)),
Settings.EMPTY);
}

View File

@ -320,7 +320,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase {
}
public void testJoinElectedMaster_incompatibleMinVersion() {
ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY, Version.CURRENT);
ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY);
DiscoveryNode node = new DiscoveryNode("_node_id", new LocalTransportAddress("_id"), emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);

View File

@ -68,7 +68,7 @@ public class UnicastZenPingIT extends ESTestCase {
ThreadPool threadPool = new TestThreadPool(getClass().getName());
NetworkService networkService = new NetworkService(settings);
ElectMasterService electMasterService = new ElectMasterService(settings, Version.CURRENT);
ElectMasterService electMasterService = new ElectMasterService(settings);
NetworkHandle handleA = startServices(settings, threadPool, networkService, "UZP_A", Version.CURRENT);
NetworkHandle handleB = startServices(settings, threadPool, networkService, "UZP_B", Version.CURRENT);
@ -88,7 +88,7 @@ public class UnicastZenPingIT extends ESTestCase {
.build();
Settings hostsSettingsMismatch = Settings.builder().put(hostsSettings).put(settingsMismatch).build();
UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, Version.CURRENT, electMasterService, null);
UnicastZenPing zenPingA = new UnicastZenPing(hostsSettings, threadPool, handleA.transportService, electMasterService, null);
zenPingA.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
@ -102,7 +102,7 @@ public class UnicastZenPingIT extends ESTestCase {
});
zenPingA.start();
UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, Version.CURRENT, electMasterService, null);
UnicastZenPing zenPingB = new UnicastZenPing(hostsSettings, threadPool, handleB.transportService, electMasterService, null);
zenPingB.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
@ -116,7 +116,13 @@ public class UnicastZenPingIT extends ESTestCase {
});
zenPingB.start();
UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, versionD, electMasterService, null);
UnicastZenPing zenPingC = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleC.transportService, electMasterService,
null) {
@Override
protected Version getVersion() {
return versionD;
}
};
zenPingC.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
@ -130,7 +136,7 @@ public class UnicastZenPingIT extends ESTestCase {
});
zenPingC.start();
UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, Version.CURRENT, electMasterService, null);
UnicastZenPing zenPingD = new UnicastZenPing(hostsSettingsMismatch, threadPool, handleD.transportService, electMasterService, null);
zenPingD.setPingContextProvider(new PingContextProvider() {
@Override
public DiscoveryNodes nodes() {
@ -191,8 +197,15 @@ public class UnicastZenPingIT extends ESTestCase {
}
}
private NetworkHandle startServices(Settings settings, ThreadPool threadPool, NetworkService networkService, String nodeId, Version version) {
NettyTransport transport = new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE, version, new NamedWriteableRegistry(), new NoneCircuitBreakerService());
private NetworkHandle startServices(Settings settings, ThreadPool threadPool, NetworkService networkService, String nodeId,
Version version) {
NettyTransport transport = new NettyTransport(settings, threadPool, networkService, BigArrays.NON_RECYCLING_INSTANCE,
new NamedWriteableRegistry(), new NoneCircuitBreakerService()) {
@Override
protected Version getCurrentVersion() {
return version;
}
};
final TransportService transportService = new TransportService(settings, transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
@ -208,7 +221,8 @@ public class UnicastZenPingIT extends ESTestCase {
public void onNodeDisconnected(DiscoveryNode node) {
}
});
final DiscoveryNode node = new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(), version);
final DiscoveryNode node = new DiscoveryNode(nodeId, transportService.boundAddress().publishAddress(), emptyMap(), emptySet(),
version);
transportService.setLocalNode(node);
return new NetworkHandle((InetSocketTransportAddress)transport.boundAddress().publishAddress(), transportService, node, counters);
}
@ -219,7 +233,8 @@ public class UnicastZenPingIT extends ESTestCase {
public final DiscoveryNode node;
public final ConcurrentMap<TransportAddress, AtomicInteger> counters;
public NetworkHandle(InetSocketTransportAddress address, TransportService transportService, DiscoveryNode discoveryNode, ConcurrentMap<TransportAddress, AtomicInteger> counters) {
public NetworkHandle(InetSocketTransportAddress address, TransportService transportService, DiscoveryNode discoveryNode,
ConcurrentMap<TransportAddress, AtomicInteger> counters) {
this.address = address;
this.transportService = transportService;
this.node = discoveryNode;

View File

@ -145,26 +145,22 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
public MockNode createMockNode(final String name) throws Exception {
return createMockNode(name, Settings.EMPTY, Version.CURRENT);
return createMockNode(name, Settings.EMPTY);
}
public MockNode createMockNode(String name, Settings settings) throws Exception {
return createMockNode(name, settings, Version.CURRENT);
return createMockNode(name, settings, null);
}
public MockNode createMockNode(final String name, Settings settings, Version version) throws Exception {
return createMockNode(name, settings, version, null);
}
public MockNode createMockNode(String name, Settings settings, Version version, @Nullable ClusterStateListener listener) throws Exception {
public MockNode createMockNode(String name, Settings settings, @Nullable ClusterStateListener listener) throws Exception {
settings = Settings.builder()
.put("name", name)
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put(settings)
.build();
MockTransportService service = buildTransportService(settings, version);
DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(settings, version);
MockTransportService service = buildTransportService(settings);
DiscoveryNodeService discoveryNodeService = new DiscoveryNodeService(settings);
DiscoveryNode discoveryNode = discoveryNodeService.buildLocalNode(service.boundAddress().publishAddress());
MockNode node = new MockNode(discoveryNode, service, listener, logger);
node.action = buildPublishClusterStateAction(settings, service, () -> node.clusterState, node);
@ -232,8 +228,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
terminate(threadPool);
}
protected MockTransportService buildTransportService(Settings settings, Version version) {
MockTransportService transportService = MockTransportService.local(Settings.EMPTY, version, threadPool);
protected MockTransportService buildTransportService(Settings settings) {
MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
return transportService;
@ -257,8 +253,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
public void testSimpleClusterStatePublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, Version.CURRENT).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY, Version.CURRENT);
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
// Initial cluster state
ClusterState clusterState = nodeA.clusterState;
@ -286,7 +282,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
// Adding new node - this node should get full cluster state while nodeB should still be getting diffs
MockNode nodeC = createMockNode("nodeC", Settings.EMPTY, Version.CURRENT);
MockNode nodeC = createMockNode("nodeC", Settings.EMPTY);
// cluster state update 3 - register node C
previousClusterState = clusterState;
@ -336,14 +332,11 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
public void testUnexpectedDiffPublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, Version.CURRENT, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
fail("Shouldn't send cluster state to myself");
}
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, event -> {
fail("Shouldn't send cluster state to myself");
}).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY, Version.CURRENT);
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build();
@ -362,14 +355,14 @@ public class PublishClusterStateActionTests extends ESTestCase {
public void testDisablingDiffPublishing() throws Exception {
Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build();
MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new ClusterStateListener() {
MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
fail("Shouldn't send cluster state to myself");
}
});
MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new ClusterStateListener() {
MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
assertFalse(event.state().wasReadFromDiff());
@ -400,7 +393,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
int numberOfNodes = randomIntBetween(2, 10);
int numberOfIterations = scaledRandomIntBetween(5, 50);
Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), randomBoolean()).build();
MockNode master = createMockNode("node0", settings, Version.CURRENT, new ClusterStateListener() {
MockNode master = createMockNode("node0", settings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
assertProperMetaDataForVersion(event.state().metaData(), event.state().version());
@ -409,7 +402,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(master.nodes());
for (int i = 1; i < numberOfNodes; i++) {
final String name = "node" + i;
final MockNode node = createMockNode(name, settings, Version.CURRENT, new ClusterStateListener() {
final MockNode node = createMockNode(name, settings, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
assertProperMetaDataForVersion(event.state().metaData(), event.state().version());
@ -444,14 +437,14 @@ public class PublishClusterStateActionTests extends ESTestCase {
}
public void testSerializationFailureDuringDiffPublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, Version.CURRENT, new ClusterStateListener() {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY, new ClusterStateListener() {
@Override
public void clusterChanged(ClusterChangedEvent event) {
fail("Shouldn't send cluster state to myself");
}
}).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY, Version.CURRENT);
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).put(nodeB.discoveryNode).build();

View File

@ -74,7 +74,7 @@ public class HttpServerTests extends ESTestCase {
ClusterService clusterService = new ClusterService(Settings.EMPTY,
new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), null);
NodeService nodeService = new NodeService(Settings.EMPTY, null, null, null, null, null, null, null, null, null,
NodeService nodeService = new NodeService(Settings.EMPTY, null, null, null, null, null, null, null, null,
clusterService, null);
httpServer = new HttpServer(settings, httpServerTransport, restController, nodeService, circuitBreakerService);
httpServer.start();

View File

@ -76,7 +76,7 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase {
.build();
clusterService = createClusterService(THREAD_POOL);
transport =
new LocalTransport(settings, THREAD_POOL, Version.CURRENT, new NamedWriteableRegistry(), new NoneCircuitBreakerService());
new LocalTransport(settings, THREAD_POOL, new NamedWriteableRegistry(), new NoneCircuitBreakerService());
transportService = new TransportService(clusterService.getSettings(), transport, THREAD_POOL);
indicesService = getInstanceFromNode(IndicesService.class);
shardStateAction = new ShardStateAction(settings, clusterService, transportService, null, null, THREAD_POOL);

View File

@ -19,13 +19,13 @@
package org.elasticsearch.index.store;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.FileSwitchDirectory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.RateLimitedFSDirectory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.SleepingLockWrapper;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.ShardId;
@ -36,6 +36,7 @@ import org.elasticsearch.test.IndexSettingsModule;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
public class FsDirectoryServiceTests extends ESTestCase {
@ -73,4 +74,43 @@ public class FsDirectoryServiceTests extends ESTestCase {
assertFalse(delegate instanceof SleepingLockWrapper);
assertTrue(delegate instanceof SimpleFSDirectory);
}
public void testPreload() throws IOException {
doTestPreload();
doTestPreload("nvd", "dvd", "tim");
doTestPreload("*");
}
private void doTestPreload(String...preload) throws IOException {
Settings build = Settings.builder()
.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), "mmapfs")
.putArray(IndexModule.INDEX_STORE_PRE_LOAD_SETTING.getKey(), preload)
.build();
IndexSettings settings = IndexSettingsModule.newIndexSettings("foo", build);
IndexStoreConfig config = new IndexStoreConfig(settings.getSettings());
IndexStore store = new IndexStore(settings, config);
Path tempDir = createTempDir().resolve(settings.getUUID()).resolve("0");
Files.createDirectories(tempDir);
ShardPath path = new ShardPath(false, tempDir, tempDir, new ShardId(settings.getIndex(), 0));
FsDirectoryService fsDirectoryService = new FsDirectoryService(settings, store, path);
Directory directory = fsDirectoryService.newDirectory();
assertTrue(directory instanceof RateLimitedFSDirectory);
RateLimitedFSDirectory rateLimitingDirectory = (RateLimitedFSDirectory) directory;
Directory delegate = rateLimitingDirectory.getDelegate();
assertFalse(delegate instanceof SleepingLockWrapper);
if (preload.length == 0) {
assertTrue(delegate.toString(), delegate instanceof MMapDirectory);
assertFalse(((MMapDirectory) delegate).getPreload());
} else if (Arrays.asList(preload).contains("*")) {
assertTrue(delegate.toString(), delegate instanceof MMapDirectory);
assertTrue(((MMapDirectory) delegate).getPreload());
} else {
assertTrue(delegate.toString(), delegate instanceof FileSwitchDirectory);
FileSwitchDirectory fsd = (FileSwitchDirectory) delegate;
assertTrue(fsd.getPrimaryDir() instanceof MMapDirectory);
assertTrue(((MMapDirectory) fsd.getPrimaryDir()).getPreload());
assertTrue(fsd.getSecondaryDir() instanceof MMapDirectory);
assertFalse(((MMapDirectory) fsd.getSecondaryDir()).getPreload());
}
}
}

View File

@ -77,6 +77,7 @@ public class IndexStoreTests extends ESTestCase {
assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory);
break;
case FS:
case DEFAULT:
if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
assertTrue(directory.toString(), directory instanceof MMapDirectory);
} else if (Constants.WINDOWS) {
@ -85,19 +86,6 @@ public class IndexStoreTests extends ESTestCase {
assertTrue(directory.toString(), directory instanceof NIOFSDirectory);
}
break;
case DEFAULT:
if (Constants.WINDOWS) {
if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory);
} else {
assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory);
}
} else if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) {
assertTrue(type + " " + directory.toString(), directory instanceof FileSwitchDirectory);
} else {
assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory);
}
break;
default:
fail();
}

View File

@ -158,7 +158,7 @@ public class ClusterStateChanges {
MetaDataUpdateSettingsService metaDataUpdateSettingsService = new MetaDataUpdateSettingsService(settings, clusterService,
allocationService, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, new IndexNameExpressionResolver(settings));
MetaDataCreateIndexService createIndexService = new MetaDataCreateIndexService(settings, clusterService, indicesService,
allocationService, Version.CURRENT, new AliasValidator(settings), Collections.emptySet(), environment,
allocationService, new AliasValidator(settings), Collections.emptySet(), environment,
nodeServicesProvider, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS);
transportCloseIndexAction = new TransportCloseIndexAction(settings, transportService, clusterService, threadPool,

View File

@ -56,7 +56,8 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
assertEquals(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size()
, numShards.totalNumShards);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
@ -88,7 +89,8 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
assertEquals(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(),
numShards.totalNumShards);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
@ -119,7 +121,8 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
assertEquals(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(),
numShards.totalNumShards);
logger.info("--> indexing a simple document");
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
@ -143,7 +146,8 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
}
logger.info("--> creating test index with valid settings ");
CreateIndexResponse response = client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("number_of_shards", 1)).get();
CreateIndexResponse response = client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put("number_of_shards", 1)).get();
assertThat(response.isAcknowledged(), equalTo(true));
}
}

View File

@ -322,7 +322,8 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
.setTemplate("te*")
.setSettings(Settings.builder().put("does_not_exist", "test"))
.get());
assertEquals("unknown setting [index.does_not_exist]", e.getMessage());
assertEquals("unknown setting [index.does_not_exist] please check that any required plugins are" +
" installed, or check the breaking changes documentation for removed settings", e.getMessage());
response = client().admin().indices().prepareGetTemplates().get();
assertEquals(0, response.getIndexTemplates().size());

View File

@ -28,7 +28,6 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.InternalSettingsPlugin;
@ -41,9 +40,7 @@ import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static java.util.Collections.singleton;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
@ -60,7 +57,10 @@ public class NativeScriptTests extends ESTestCase {
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
final ThreadPool threadPool = new ThreadPool(settings);
Injector injector = new ModulesBuilder().add(
new EnvironmentModule(new Environment(settings), threadPool),
(b) -> {
b.bind(Environment.class).toInstance(new Environment(settings));
b.bind(ThreadPool.class).toInstance(threadPool);
},
new SettingsModule(settings),
scriptModule).createInjector();

View File

@ -257,12 +257,6 @@ public class ScriptModesTests extends ESTestCase {
@Override
public void close() {
}
@Override
public void scriptRemoved(@Nullable CompiledScript script) {
}
}
}

View File

@ -518,11 +518,6 @@ public class ScriptServiceTests extends ESTestCase {
}
@Override
public void scriptRemoved(CompiledScript script) {
// Nothing to do here
}
@Override
public boolean isInlineScriptEnabled() {
return true;
@ -562,12 +557,6 @@ public class ScriptServiceTests extends ESTestCase {
@Override
public void close() {
}
@Override
public void scriptRemoved(CompiledScript script) {
// Nothing to do here
}
}

View File

@ -95,12 +95,6 @@ public class ScriptSettingsTests extends ESTestCase {
@Override
public void close() {
}
@Override
public void scriptRemoved(@Nullable CompiledScript script) {
}
}

View File

@ -38,7 +38,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.index.query.QueryParseContext;
@ -109,7 +108,11 @@ public class AggregatorParsingTests extends ESTestCase {
List<Setting<?>> scriptSettings = scriptModule.getSettings();
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
injector = new ModulesBuilder().add(new EnvironmentModule(new Environment(settings), threadPool), settingsModule
injector = new ModulesBuilder().add(
(b) -> {
b.bind(Environment.class).toInstance(new Environment(settings));
b.bind(ThreadPool.class).toInstance(threadPool);
}, settingsModule
, scriptModule, new IndicesModule(namedWriteableRegistry) {
@Override

View File

@ -42,7 +42,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.index.query.QueryParseContext;
@ -103,53 +102,9 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
*/
@BeforeClass
public static void init() throws IOException {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
Version version = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
Settings settings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false)
.build();
namedWriteableRegistry = new NamedWriteableRegistry();
index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
final ThreadPool threadPool = new ThreadPool(settings);
final ClusterService clusterService = createClusterService(threadPool);
setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder()
.put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
ScriptModule scriptModule = newTestScriptModule();
List<Setting<?>> scriptSettings = scriptModule.getSettings();
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
injector = new ModulesBuilder().add(
new EnvironmentModule(new Environment(settings), threadPool),
settingsModule,
scriptModule,
new IndicesModule(namedWriteableRegistry) {
@Override
protected void configure() {
bindMapperExtension();
}
}, new SearchModule(settings, namedWriteableRegistry) {
@Override
protected void configureSearch() {
// Skip me
}
},
new IndexSettingsModule(index, settings),
new AbstractModule() {
@Override
protected void configure() {
bind(ClusterService.class).toProvider(Providers.of(clusterService));
bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
}
}
).createInjector();
injector = buildInjector(index);
namedWriteableRegistry = injector.getInstance(NamedWriteableRegistry.class);
aggParsers = injector.getInstance(AggregatorParsers.class);
//create some random type with some default field, those types will stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)];
@ -161,6 +116,59 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
parseFieldMatcher = ParseFieldMatcher.STRICT;
}
public static final Injector buildInjector(Index index) {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
Version version = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
Settings settings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false)
.build();
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
final ThreadPool threadPool = new ThreadPool(settings);
final ClusterService clusterService = createClusterService(threadPool);
setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder()
.put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
ScriptModule scriptModule = newTestScriptModule();
List<Setting<?>> scriptSettings = scriptModule.getSettings();
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
return new ModulesBuilder().add(
(b) -> {
b.bind(Environment.class).toInstance(new Environment(settings));
b.bind(ThreadPool.class).toInstance(threadPool);
},
settingsModule,
scriptModule,
new IndicesModule(namedWriteableRegistry) {
@Override
protected void configure() {
bindMapperExtension();
}
}, new SearchModule(settings, namedWriteableRegistry) {
@Override
protected void configureSearch() {
// Skip me
}
},
new IndexSettingsModule(index, settings),
new AbstractModule() {
@Override
protected void configure() {
bind(ClusterService.class).toProvider(Providers.of(clusterService));
bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
}
}
).createInjector();
}
@AfterClass
public static void afterClass() throws Exception {
injector.getInstance(ClusterService.class).close();

View File

@ -19,55 +19,29 @@
package org.elasticsearch.search.aggregations;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder;
import org.elasticsearch.test.AbstractQueryTestCase;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.InternalSettingsPlugin;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.threadpool.ThreadPool;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelineAggregationBuilder<AF>> extends ESTestCase {
@ -77,9 +51,6 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
protected static final String DOUBLE_FIELD_NAME = "mapped_double";
protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean";
protected static final String DATE_FIELD_NAME = "mapped_date";
protected static final String OBJECT_FIELD_NAME = "mapped_object";
protected static final String[] mappedFieldNames = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME,
DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, OBJECT_FIELD_NAME};
private static Injector injector;
private static Index index;
@ -103,52 +74,9 @@ public abstract class BasePipelineAggregationTestCase<AF extends AbstractPipelin
*/
@BeforeClass
public static void init() throws IOException {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
Version version = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
Settings settings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false)
.build();
namedWriteableRegistry = new NamedWriteableRegistry();
index = new Index(randomAsciiOfLengthBetween(1, 10), "_na_");
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, version).build();
final ThreadPool threadPool = new ThreadPool(settings);
final ClusterService clusterService = createClusterService(threadPool);
setState(clusterService, new ClusterState.Builder(clusterService.state()).metaData(new MetaData.Builder()
.put(new IndexMetaData.Builder(index.getName()).settings(indexSettings).numberOfShards(1).numberOfReplicas(0))));
ScriptModule scriptModule = newTestScriptModule();
List<Setting<?>> scriptSettings = scriptModule.getSettings();
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
injector = new ModulesBuilder().add(
new EnvironmentModule(new Environment(settings),threadPool),
settingsModule,
scriptModule,
new IndicesModule(namedWriteableRegistry) {
@Override
protected void configure() {
bindMapperExtension();
}
}, new SearchModule(settings, namedWriteableRegistry) {
@Override
protected void configureSearch() {
// Skip me
}
},
new IndexSettingsModule(index, settings),
new AbstractModule() {
@Override
protected void configure() {
bind(ClusterService.class).toProvider(Providers.of(clusterService));
bind(CircuitBreakerService.class).to(NoneCircuitBreakerService.class);
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
}
}
).createInjector();
injector = BaseAggregationTestCase.buildInjector(index);
namedWriteableRegistry = injector.getInstance(NamedWriteableRegistry.class);
aggParsers = injector.getInstance(AggregatorParsers.class);
//create some random type with some default field, those types will stick around for all of the subclasses
currentTypes = new String[randomIntBetween(0, 5)];

View File

@ -456,10 +456,6 @@ public class AvgIT extends AbstractNumericTestCase {
}
};
}
@Override
public void scriptRemoved(CompiledScript script) {
}
}
/**
@ -564,10 +560,6 @@ public class AvgIT extends AbstractNumericTestCase {
};
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;

View File

@ -453,10 +453,6 @@ public class SumIT extends AbstractNumericTestCase {
};
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;
@ -573,10 +569,6 @@ public class SumIT extends AbstractNumericTestCase {
};
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;

View File

@ -318,10 +318,6 @@ public class ValueCountIT extends ESIntegTestCase {
};
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;

View File

@ -46,7 +46,6 @@ import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryParseContext;
@ -132,7 +131,10 @@ public class SearchSourceBuilderTests extends ESTestCase {
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
injector = new ModulesBuilder().add(
new EnvironmentModule(new Environment(settings), threadPool), settingsModule,
(b) -> {
b.bind(Environment.class).toInstance(new Environment(settings));
b.bind(ThreadPool.class).toInstance(threadPool);
}, settingsModule,
scriptModule, new IndicesModule(namedWriteableRegistry) {
@Override
protected void configure() {

View File

@ -66,7 +66,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase {
threadPool = new ThreadPool(settings);
NetworkService networkService = new NetworkService(settings);
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry(),
nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, new NamedWriteableRegistry(),
new NoneCircuitBreakerService());
nettyTransport.start();
TransportService transportService = new TransportService(settings, nettyTransport, threadPool);

View File

@ -64,7 +64,6 @@ public class NettyTransportServiceHandshakeTests extends ESTestCase {
threadPool,
new NetworkService(settings),
BigArrays.NON_RECYCLING_INSTANCE,
Version.CURRENT,
new NamedWriteableRegistry(),
new NoneCircuitBreakerService());
TransportService transportService = new MockTransportService(settings, transport, threadPool);

View File

@ -36,9 +36,9 @@ public class TransportModuleTests extends ModuleTestCase {
static class FakeTransport extends AssertingLocalTransport {
@Inject
public FakeTransport(Settings settings, CircuitBreakerService circuitBreakerService, ThreadPool threadPool, Version version,
public FakeTransport(Settings settings, CircuitBreakerService circuitBreakerService, ThreadPool threadPool,
NamedWriteableRegistry namedWriteableRegistry) {
super(settings, circuitBreakerService, threadPool, version, namedWriteableRegistry);
super(settings, circuitBreakerService, threadPool, namedWriteableRegistry);
}
}

View File

@ -65,14 +65,14 @@ public class NettyScheduledPingTests extends ESTestCase {
NamedWriteableRegistry registryA = new NamedWriteableRegistry();
final NettyTransport nettyA = new NettyTransport(settings, threadPool, new NetworkService(settings),
BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, registryA, circuitBreakerService);
BigArrays.NON_RECYCLING_INSTANCE, registryA, circuitBreakerService);
MockTransportService serviceA = new MockTransportService(settings, nettyA, threadPool);
serviceA.start();
serviceA.acceptIncomingRequests();
NamedWriteableRegistry registryB = new NamedWriteableRegistry();
final NettyTransport nettyB = new NettyTransport(settings, threadPool, new NetworkService(settings),
BigArrays.NON_RECYCLING_INSTANCE, Version.CURRENT, registryB, circuitBreakerService);
BigArrays.NON_RECYCLING_INSTANCE, registryB, circuitBreakerService);
MockTransportService serviceB = new MockTransportService(settings, nettyB, threadPool);
serviceB.start();

View File

@ -93,9 +93,9 @@ public class NettyTransportIT extends ESIntegTestCase {
@Inject
public ExceptionThrowingNettyTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
Version version, NamedWriteableRegistry namedWriteableRegistry,
NamedWriteableRegistry namedWriteableRegistry,
CircuitBreakerService circuitBreakerService) {
super(settings, threadPool, networkService, bigArrays, version, namedWriteableRegistry, circuitBreakerService);
super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService);
}
@Override

View File

@ -136,7 +136,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) {
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, Version.CURRENT,
NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays,
new NamedWriteableRegistry(), new NoneCircuitBreakerService());
nettyTransport.start();

View File

@ -42,7 +42,7 @@ public class SimpleNettyTransportTests extends AbstractSimpleTransportTestCase {
@Override
protected MockTransportService build(Settings settings, Version version) {
settings = Settings.builder().put(settings).put(TransportSettings.PORT.getKey(), "0").build();
MockTransportService transportService = MockTransportService.nettyFromThreadPool(settings, version, threadPool);
MockTransportService transportService = MockTransportService.nettyFromThreadPool(settings, threadPool, version);
transportService.start();
return transportService;
}

View File

@ -144,10 +144,6 @@ public class UpdateIT extends ESIntegTestCase {
throw new UnsupportedOperationException();
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;
@ -214,15 +210,10 @@ public class UpdateIT extends ESIntegTestCase {
throw new UnsupportedOperationException();
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;
}
}
public static class ScriptedUpsertScriptPlugin extends Plugin implements ScriptPlugin {
@ -285,10 +276,6 @@ public class UpdateIT extends ESIntegTestCase {
throw new UnsupportedOperationException();
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;
@ -357,10 +344,6 @@ public class UpdateIT extends ESIntegTestCase {
throw new UnsupportedOperationException();
}
@Override
public void scriptRemoved(CompiledScript script) {
}
@Override
public boolean isInlineScriptEnabled() {
return true;

View File

@ -7,9 +7,9 @@ The store module allows you to control how index data is stored and accessed on
[[file-system]]
=== File system storage types
There are different file system implementations or _storage types_. The best
one for the operating environment will be automatically chosen: `simplefs` on
Windows 32bit, `niofs` on other 32bit systems and `mmapfs` on 64bit systems.
There are different file system implementations or _storage types_. By default,
elasticsearch will pick the best implementation based on the operating
environment.
This can be overridden for all indices by adding this to the
`config/elasticsearch.yml` file:
@ -36,6 +36,12 @@ experimental[This is an expert-only setting and may be removed in the future]
The following sections lists all the different storage types supported.
`fs`::
Default file system implementation. This will pick the best implementation
depending on the operating environment: `simplefs` on Windows 32bit, `niofs`
on other 32bit systems and `mmapfs` on 64bit systems.
[[simplefs]]`simplefs`::
The Simple FS type is a straightforward implementation of file system
@ -60,13 +66,64 @@ process equal to the size of the file being mapped. Before using this
class, be sure you have allowed plenty of
<<vm-max-map-count,virtual address space>>.
[[default_fs]]`default_fs` deprecated[5.0.0, The `default_fs` store type is deprecated - use `mmapfs` instead]::
[[default_fs]]`default_fs` deprecated[5.0.0, The `default_fs` store type is deprecated - use `fs` instead]::
The `default` type is a hybrid of NIO FS and MMapFS, which chooses the best
file system for each type of file. Currently only the Lucene term dictionary,
doc values and points files are memory mapped to reduce the impact on the
operating system. All other files are opened using Lucene `NIOFSDirectory`.
Address space settings (<<vm-max-map-count>>) might also apply if your term
dictionary are large, if you index many fields that use points (numerics, dates
and ip addresses) or if you have many fields with doc values.
The `default` type is deprecated and is aliased to `fs` for backward
compatibility.
=== Pre-loading data into the file system cache
experimental[This is an expert-only setting and may be removed in the future]
By default, elasticsearch completely relies on the operating system file system
cache for caching I/O operations. It is possible to set `index.store.preload`
in order to tell the operating system to load the content of hot index
files into memory upon opening. This setting accept a comma-separated list of
files extensions: all files whose extenion is in the list will be pre-loaded
upon opening. This can be useful to improve search performance of an index,
especially when the host operating system is restarted, since this causes the
file system cache to be trashed. However note that this may slow down the
opening of indices, as they will only become available after data have been
loaded into physical memory.
This setting is best-effort only and may not work at all depending on the store
type and host operating system.
The `index.store.pre_load` is a static setting that can either be set in the
`config/elasticsearch.yml`:
[source,yaml]
---------------------------------
index.store.pre_load: ["nvd", "dvd"]
---------------------------------
or in the index settings at index creation time:
[source,js]
---------------------------------
PUT /my_index
{
"settings": {
"index.store.pre_load": ["nvd", "dvd"]
}
}
---------------------------------
The default value is the empty array, which means that nothing will be loaded
into the file-system cache eagerly. For indices that are actively searched,
you might want to set it to `["nvd", "dvd"]`, which will cause norms and doc
values to be loaded eagerly into physical memory. These are the two first
extensions to look at since elasticsearch performs random access on them.
A wildcard can be used in order to indicate that all files should be preloaded:
`index.store.pre_load: ["*"]`. Note however that it is generally not useful to
load all files into memory, in particular those for stored fields and term
vectors, so a better option might be to set it to
`["nvd", "dvd", "tim", "doc", "dim"]`, which will preload norms, doc values,
terms dictionaries, postings lists and points, which are the most important
parts of the index for search and aggregations.
Note that this setting can be dangerous on indices that are larger than the size
of the main memory of the host, as it would cause the filesystem cache to be
trashed upon reopens after large merges, which would make indexing and searching
_slower_.

View File

@ -133,7 +133,7 @@ GET hockey/_search
"order": "asc",
"script": {
"lang": "painless",
"inline": "doc['first'].value + ' ' + doc['last'].value"
"inline": "doc['first.keyword'].value + ' ' + doc['last.keyword'].value"
}
}
}

View File

@ -8,7 +8,7 @@ backends are available via officially supported repository plugins.
[float]
=== Repositories
Before any snapshot or restore operation can be performed a snapshot repository should be registered in
Before any snapshot or restore operation can be performed, a snapshot repository should be registered in
Elasticsearch. The repository settings are repository-type specific. See below for details.
[source,js]

View File

@ -266,11 +266,6 @@ public class ExpressionScriptEngineService extends AbstractComponent implements
@Override
public void close() {}
@Override
public void scriptRemoved(CompiledScript script) {
// Nothing to do
}
@Override
public boolean isInlineScriptEnabled() {
return true;

View File

@ -33,9 +33,11 @@ import org.codehaus.groovy.classgen.GeneratorContext;
import org.codehaus.groovy.control.CompilationFailedException;
import org.codehaus.groovy.control.CompilePhase;
import org.codehaus.groovy.control.CompilerConfiguration;
import org.codehaus.groovy.control.MultipleCompilationErrorsException;
import org.codehaus.groovy.control.SourceUnit;
import org.codehaus.groovy.control.customizers.CompilationCustomizer;
import org.codehaus.groovy.control.customizers.ImportCustomizer;
import org.codehaus.groovy.control.messages.Message;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.bootstrap.BootstrapInfo;
import org.elasticsearch.common.Nullable;
@ -49,20 +51,26 @@ import org.elasticsearch.script.ExecutableScript;
import org.elasticsearch.script.LeafSearchScript;
import org.elasticsearch.script.ScoreAccessor;
import org.elasticsearch.script.ScriptEngineService;
import org.elasticsearch.script.GeneralScriptException;
import org.elasticsearch.script.ScriptException;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.lookup.LeafSearchLookup;
import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.emptyList;
/**
* Provides the infrastructure for Groovy as a scripting language for Elasticsearch
*/
@ -78,81 +86,40 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
*/
public static final String GROOVY_INDY_SETTING_NAME = "indy";
private final GroovyClassLoader loader;
/**
* Classloader used as a parent classloader for all Groovy scripts
*/
private final ClassLoader loader;
public GroovyScriptEngineService(Settings settings) {
super(settings);
ImportCustomizer imports = new ImportCustomizer();
imports.addStarImports("org.joda.time");
imports.addStaticStars("java.lang.Math");
CompilerConfiguration config = new CompilerConfiguration();
config.addCompilationCustomizers(imports);
// Add BigDecimal -> Double transformer
config.addCompilationCustomizers(new GroovyBigDecimalTransformer(CompilePhase.CONVERSION));
// always enable invokeDynamic, not the crazy softreference-based stuff
config.getOptimizationOptions().put(GROOVY_INDY_SETTING_NAME, true);
// Groovy class loader to isolate Groovy-land code
// classloader created here
// Creates the classloader here in order to isolate Groovy-land code
final SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
this.loader = AccessController.doPrivileged(new PrivilegedAction<GroovyClassLoader>() {
@Override
public GroovyClassLoader run() {
// snapshot our context (which has permissions for classes), since the script has none
final AccessControlContext engineContext = AccessController.getContext();
return new GroovyClassLoader(new ClassLoader(getClass().getClassLoader()) {
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (sm != null) {
try {
engineContext.checkPermission(new ClassPermission(name));
} catch (SecurityException e) {
throw new ClassNotFoundException(name, e);
}
this.loader = AccessController.doPrivileged((PrivilegedAction<ClassLoader>) () -> {
// snapshot our context (which has permissions for classes), since the script has none
AccessControlContext context = AccessController.getContext();
return new ClassLoader(getClass().getClassLoader()) {
@Override
protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException {
if (sm != null) {
try {
context.checkPermission(new ClassPermission(name));
} catch (SecurityException e) {
throw new ClassNotFoundException(name, e);
}
return super.loadClass(name, resolve);
}
}, config);
}
});
}
@Override
public void close() {
loader.clearCache();
// close classloader here (why do we do this?)
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
AccessController.doPrivileged(new PrivilegedAction<Void>() {
@Override
public Void run() {
try {
loader.close();
} catch (IOException e) {
logger.warn("Unable to close Groovy loader", e);
return super.loadClass(name, resolve);
}
return null;
}
};
});
}
@Override
public void scriptRemoved(@Nullable CompiledScript script) {
// script could be null, meaning the script has already been garbage collected
if (script == null || NAME.equals(script.lang())) {
// Clear the cache, this removes old script versions from the
// cache to prevent running out of PermGen space
loader.clearCache();
}
public void close() throws IOException {
// Nothing to do here
}
@Override
@ -167,30 +134,34 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
@Override
public Object compile(String scriptName, String scriptSource, Map<String, String> params) {
try {
// we reuse classloader, so do a security check just in case.
SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
String fake = MessageDigests.toHexString(MessageDigests.sha1().digest(scriptSource.getBytes(StandardCharsets.UTF_8)));
// same logic as GroovyClassLoader.parseClass() but with a different codesource string:
return AccessController.doPrivileged(new PrivilegedAction<Object>() {
@Override
public Class<?> run() {
GroovyCodeSource gcs = new GroovyCodeSource(scriptSource, fake, BootstrapInfo.UNTRUSTED_CODEBASE);
gcs.setCachable(false);
// TODO: we could be more complicated and paranoid, and move this to separate block, to
// sandbox the compilation process itself better.
return loader.parseClass(gcs);
}
});
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
logger.trace("exception compiling Groovy script:", e);
}
throw new GeneralScriptException("failed to compile groovy script", e);
// Create the script class name
String className = MessageDigests.toHexString(MessageDigests.sha1().digest(scriptSource.getBytes(StandardCharsets.UTF_8)));
final SecurityManager sm = System.getSecurityManager();
if (sm != null) {
sm.checkPermission(new SpecialPermission());
}
return AccessController.doPrivileged((PrivilegedAction<Object>) () -> {
try {
GroovyCodeSource codeSource = new GroovyCodeSource(scriptSource, className, BootstrapInfo.UNTRUSTED_CODEBASE);
codeSource.setCachable(false);
CompilerConfiguration configuration = new CompilerConfiguration()
.addCompilationCustomizers(new ImportCustomizer().addStarImports("org.joda.time").addStaticStars("java.lang.Math"))
.addCompilationCustomizers(new GroovyBigDecimalTransformer(CompilePhase.CONVERSION));
// always enable invokeDynamic, not the crazy softreference-based stuff
configuration.getOptimizationOptions().put(GROOVY_INDY_SETTING_NAME, true);
GroovyClassLoader groovyClassLoader = new GroovyClassLoader(loader, configuration);
return groovyClassLoader.parseClass(codeSource);
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
logger.trace("Exception compiling Groovy script:", e);
}
throw convertToScriptException("Error compiling script " + className, scriptSource, e);
}
});
}
/**
@ -215,7 +186,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
}
return new GroovyScript(compiledScript, createScript(compiledScript.compiled(), allVars), this.logger);
} catch (ReflectiveOperationException e) {
throw new GeneralScriptException("failed to build executable " + compiledScript, e);
throw convertToScriptException("Failed to build executable script", compiledScript.name(), e);
}
}
@ -235,7 +206,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
try {
scriptObject = createScript(compiledScript.compiled(), allVars);
} catch (ReflectiveOperationException e) {
throw new GeneralScriptException("failed to build search " + compiledScript, e);
throw convertToScriptException("Failed to build search script", compiledScript.name(), e);
}
return new GroovyScript(compiledScript, scriptObject, leafLookup, logger);
}
@ -248,6 +219,29 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
};
}
/**
* Converts a {@link Throwable} to a {@link ScriptException}
*/
private ScriptException convertToScriptException(String message, String source, Throwable cause) {
List<String> stack = new ArrayList<>();
if (cause instanceof MultipleCompilationErrorsException) {
@SuppressWarnings({"unchecked"})
List<Message> errors = (List<Message>) ((MultipleCompilationErrorsException) cause).getErrorCollector().getErrors();
for (Message error : errors) {
try (StringWriter writer = new StringWriter()) {
error.write(new PrintWriter(writer));
stack.add(writer.toString());
} catch (IOException e1) {
logger.error("failed to write compilation error message to the stack", e1);
}
}
} else if (cause instanceof CompilationFailedException) {
CompilationFailedException error = (CompilationFailedException) cause;
stack.add(error.getMessage());
}
throw new ScriptException(message, cause, stack, source, NAME);
}
public static final class GroovyScript implements ExecutableScript, LeafSearchScript {
private final CompiledScript compiledScript;
@ -298,17 +292,12 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
try {
// NOTE: we truncate the stack because IndyInterface has security issue (needs getClassLoader)
// we don't do a security check just as a tradeoff, it cannot really escalate to anything.
return AccessController.doPrivileged(new PrivilegedAction<Object>() {
@Override
public Object run() {
return script.run();
}
});
return AccessController.doPrivileged((PrivilegedAction<Object>) script::run);
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
logger.trace("failed to run {}", e, compiledScript);
}
throw new GeneralScriptException("failed to run " + compiledScript, e);
throw new ScriptException("Error evaluating " + compiledScript.name(), e, emptyList(), "", compiledScript.lang());
}
}

View File

@ -26,8 +26,6 @@ grant {
permission java.lang.RuntimePermission "accessDeclaredMembers";
permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect";
permission java.lang.RuntimePermission "accessClassInPackage.jdk.internal.reflect";
// needed by GroovyScriptEngineService to close its classloader (why?)
permission java.lang.RuntimePermission "closeClassLoader";
// Allow executing groovy scripts with codesource of /untrusted
permission groovy.security.GroovyCodeSourcePermission "/untrusted";

View File

@ -24,7 +24,7 @@ import org.apache.lucene.util.Constants;
import org.codehaus.groovy.control.MultipleCompilationErrorsException;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.script.CompiledScript;
import org.elasticsearch.script.GeneralScriptException;
import org.elasticsearch.script.ScriptException;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.test.ESTestCase;
@ -151,7 +151,7 @@ public class GroovySecurityTests extends ESTestCase {
try {
doTest(script);
fail("did not get expected exception");
} catch (GeneralScriptException expected) {
} catch (ScriptException expected) {
Throwable cause = expected.getCause();
assertNotNull(cause);
if (exceptionClass.isAssignableFrom(cause.getClass()) == false) {

View File

@ -51,7 +51,7 @@
- do:
catch: /Unable.to.parse.*/
catch: /script_exception,.+Error.compiling.script.*/
put_script:
id: "1"
lang: "groovy"

View File

@ -133,11 +133,6 @@ public final class MustacheScriptEngineService extends AbstractComponent impleme
// Nothing to do here
}
@Override
public void scriptRemoved(CompiledScript script) {
// Nothing to do here
}
// permission checked before doing crazy reflection
static final SpecialPermission SPECIAL_PERMISSION = new SpecialPermission();

View File

@ -38,7 +38,6 @@ import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
@ -59,7 +58,6 @@ import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.mapper.MapperRegistry;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.script.ScriptEngineRegistry;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.mustache.MustacheScriptEngineService;
@ -110,7 +108,10 @@ public class TemplateQueryParserTests extends ESTestCase {
SettingsModule settingsModule = new SettingsModule(settings, scriptSettings, Collections.emptyList());
final ThreadPool threadPool = new ThreadPool(settings);
injector = new ModulesBuilder().add(
new EnvironmentModule(new Environment(settings), threadPool),
(b) -> {
b.bind(Environment.class).toInstance(new Environment(settings));
b.bind(ThreadPool.class).toInstance(threadPool);
},
settingsModule,
new SearchModule(settings, new NamedWriteableRegistry()) {
@Override

View File

@ -28,6 +28,7 @@ import java.lang.invoke.MethodHandle;
import java.lang.invoke.MethodHandles;
import java.lang.invoke.MethodHandles.Lookup;
import java.lang.invoke.MethodType;
import java.util.BitSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@ -222,19 +223,25 @@ public final class Def {
*/
static MethodHandle lookupMethod(Lookup lookup, MethodType callSiteType,
Class<?> receiverClass, String name, Object args[]) throws Throwable {
long recipe = (Long) args[0];
String recipeString = (String) args[0];
int numArguments = callSiteType.parameterCount();
// simple case: no lambdas
if (recipe == 0) {
if (recipeString.isEmpty()) {
return lookupMethodInternal(receiverClass, name, numArguments - 1).handle;
}
// convert recipe string to a bitset for convenience (the code below should be refactored...)
BitSet lambdaArgs = new BitSet();
for (int i = 0; i < recipeString.length(); i++) {
lambdaArgs.set(recipeString.charAt(i));
}
// otherwise: first we have to compute the "real" arity. This is because we have extra arguments:
// e.g. f(a, g(x), b, h(y), i()) looks like f(a, g, x, b, h, y, i).
int arity = callSiteType.parameterCount() - 1;
int upTo = 1;
for (int i = 0; i < numArguments; i++) {
if ((recipe & (1L << (i - 1))) != 0) {
for (int i = 1; i < numArguments; i++) {
if (lambdaArgs.get(i - 1)) {
String signature = (String) args[upTo++];
int numCaptures = Integer.parseInt(signature.substring(signature.indexOf(',')+1));
arity -= numCaptures;
@ -250,7 +257,7 @@ public final class Def {
upTo = 1;
for (int i = 1; i < numArguments; i++) {
// its a functional reference, replace the argument with an impl
if ((recipe & (1L << (i - 1))) != 0) {
if (lambdaArgs.get(i - 1)) {
// decode signature of form 'type.call,2'
String signature = (String) args[upTo++];
int separator = signature.indexOf('.');
@ -335,6 +342,12 @@ public final class Def {
MethodHandle.class);
handle = (MethodHandle) accessor.invokeExact();
} catch (NoSuchFieldException | IllegalAccessException e) {
// is it a synthetic method? If we generated the method ourselves, be more helpful. It can only fail
// because the arity does not match the expected interface type.
if (call.contains("$")) {
throw new IllegalArgumentException("Incorrect number of parameters for [" + interfaceMethod.name +
"] in [" + clazz.clazz + "]");
}
throw new IllegalArgumentException("Unknown call [" + call + "] with [" + arity + "] arguments.");
}
ref = new FunctionRef(clazz, interfaceMethod, handle, captures);

View File

@ -431,11 +431,11 @@ public final class DefBootstrap {
if (args.length == 0) {
throw new BootstrapMethodError("Invalid number of parameters for method call");
}
if (args[0] instanceof Long == false) {
if (args[0] instanceof String == false) {
throw new BootstrapMethodError("Illegal parameter for method call: " + args[0]);
}
long recipe = (Long) args[0];
int numLambdas = Long.bitCount(recipe);
String recipe = (String) args[0];
int numLambdas = recipe.length();
if (numLambdas > type.parameterCount()) {
throw new BootstrapMethodError("Illegal recipe for method call: too many bits");
}

View File

@ -176,7 +176,12 @@ public class FunctionRef {
* If the interface expects a primitive type to be returned, we can't return Object,
* But we can set SAM to the wrapper version, and a cast will take place
*/
private static MethodType adapt(MethodType expected, MethodType actual) {
private MethodType adapt(MethodType expected, MethodType actual) {
// add some checks, now that we've set everything up, to deliver exceptions as early as possible.
if (expected.parameterCount() != actual.parameterCount()) {
throw new IllegalArgumentException("Incorrect number of parameters for [" + invokedName +
"] in [" + invokedType.returnType() + "]");
}
if (expected.returnType().isPrimitive() && actual.returnType() == Object.class) {
actual = actual.changeReturnType(MethodType.methodType(expected.returnType()).wrap().returnType());
}

View File

@ -1,62 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.painless;
import org.elasticsearch.painless.Definition.Type;
import java.util.List;
import java.util.Objects;
/** Extension of locals for lambdas */
// Note: this isn't functional yet, it throws UOE
// TODO: implement slot renumbering for captures.
class LambdaLocals extends Locals {
private List<Variable> captures;
LambdaLocals(Locals parent, List<Parameter> parameters, List<Variable> captures) {
super(parent);
for (Parameter parameter : parameters) {
defineVariable(parameter.location, parameter.type, parameter.name, false);
}
this.captures = Objects.requireNonNull(captures);
}
@Override
public Variable getVariable(Location location, String name) {
Variable variable = lookupVariable(location, name);
if (variable != null) {
return variable;
}
if (getParent() != null) {
variable = getParent().getVariable(location, name);
if (variable != null) {
assert captures != null; // unused right now
// make it read-only, and record that it was used.
throw new UnsupportedOperationException("lambda capture is not supported");
}
}
throw location.createError(new IllegalArgumentException("Variable [" + name + "] is not defined."));
}
@Override
public Type getReturnType() {
return Definition.DEF_TYPE;
}
}

View File

@ -35,7 +35,7 @@ import java.util.Set;
/**
* Tracks user defined methods and variables across compilation phases.
*/
public class Locals {
public final class Locals {
/** Reserved word: params map parameter */
public static final String PARAMS = "params";
@ -64,16 +64,30 @@ public class Locals {
return new Locals(currentScope);
}
/** Creates a new lambda scope inside the current scope */
public static Locals newLambdaScope(Locals currentScope, List<Parameter> parameters, List<Variable> captures) {
return new LambdaLocals(currentScope, parameters, captures);
/**
* Creates a new lambda scope inside the current scope
* <p>
* This is just like {@link #newFunctionScope}, except the captured parameters are made read-only.
*/
public static Locals newLambdaScope(Locals programScope, List<Parameter> parameters, int captureCount, int maxLoopCounter) {
Locals locals = new Locals(programScope, Definition.DEF_TYPE);
for (int i = 0; i < parameters.size(); i++) {
Parameter parameter = parameters.get(i);
boolean isCapture = i < captureCount;
locals.addVariable(parameter.location, parameter.type, parameter.name, isCapture);
}
// Loop counter to catch infinite loops. Internal use only.
if (maxLoopCounter > 0) {
locals.defineVariable(null, Definition.INT_TYPE, LOOP, true);
}
return locals;
}
/** Creates a new function scope inside the current scope */
public static Locals newFunctionScope(Locals programScope, Type returnType, List<Parameter> parameters, int maxLoopCounter) {
Locals locals = new Locals(programScope, returnType);
for (Parameter parameter : parameters) {
locals.defineVariable(parameter.location, parameter.type, parameter.name, false);
locals.addVariable(parameter.location, parameter.type, parameter.name, false);
}
// Loop counter to catch infinite loops. Internal use only.
if (maxLoopCounter > 0) {
@ -129,7 +143,7 @@ public class Locals {
}
/** Checks if a variable exists or not, in this scope or any parents. */
public final boolean hasVariable(String name) {
public boolean hasVariable(String name) {
Variable variable = lookupVariable(null, name);
if (variable != null) {
return true;
@ -153,7 +167,7 @@ public class Locals {
}
/** Looks up a method. Returns null if the method does not exist. */
public final Method getMethod(MethodKey key) {
public Method getMethod(MethodKey key) {
Method method = lookupMethod(key);
if (method != null) {
return method;
@ -165,7 +179,7 @@ public class Locals {
}
/** Creates a new variable. Throws IAE if the variable has already been defined (even in a parent) or reserved. */
public final Variable addVariable(Location location, Type type, String name, boolean readonly) {
public Variable addVariable(Location location, Type type, String name, boolean readonly) {
if (hasVariable(name)) {
throw location.createError(new IllegalArgumentException("Variable [" + name + "] is already defined."));
}
@ -196,23 +210,23 @@ public class Locals {
// return type of this scope
private final Type returnType;
// next slot number to assign
int nextSlotNumber;
private int nextSlotNumber;
// variable name -> variable
Map<String,Variable> variables;
private Map<String,Variable> variables;
// method name+arity -> methods
Map<MethodKey,Method> methods;
private Map<MethodKey,Method> methods;
/**
* Create a new Locals
*/
Locals(Locals parent) {
private Locals(Locals parent) {
this(parent, parent.getReturnType());
}
/**
* Create a new Locals with specified return type
*/
Locals(Locals parent, Type returnType) {
private Locals(Locals parent, Type returnType) {
this.parent = parent;
this.returnType = returnType;
if (parent == null) {
@ -223,12 +237,12 @@ public class Locals {
}
/** Returns the parent scope */
Locals getParent() {
private Locals getParent() {
return parent;
}
/** Looks up a variable at this scope only. Returns null if the variable does not exist. */
Variable lookupVariable(Location location, String name) {
private Variable lookupVariable(Location location, String name) {
if (variables == null) {
return null;
}
@ -236,7 +250,7 @@ public class Locals {
}
/** Looks up a method at this scope only. Returns null if the method does not exist. */
Method lookupMethod(MethodKey key) {
private Method lookupMethod(MethodKey key) {
if (methods == null) {
return null;
}
@ -245,19 +259,17 @@ public class Locals {
/** Defines a variable at this scope internally. */
Variable defineVariable(Location location, Type type, String name, boolean readonly) {
private Variable defineVariable(Location location, Type type, String name, boolean readonly) {
if (variables == null) {
variables = new HashMap<>();
}
Variable variable = new Variable(location, name, type, readonly);
variable.slot = getNextSlot();
Variable variable = new Variable(location, name, type, getNextSlot(), readonly);
variables.put(name, variable); // TODO: check result
nextSlotNumber += type.type.getSize();
return variable;
}
// TODO: make private, thats bogus
public void addMethod(Method method) {
private void addMethod(Method method) {
if (methods == null) {
methods = new HashMap<>();
}
@ -266,7 +278,7 @@ public class Locals {
}
int getNextSlot() {
private int getNextSlot() {
return nextSlotNumber;
}
@ -274,13 +286,14 @@ public class Locals {
public final Location location;
public final String name;
public final Type type;
int slot = -1;
public final boolean readonly;
private final int slot;
public Variable(Location location, String name, Type type, boolean readonly) {
public Variable(Location location, String name, Type type, int slot, boolean readonly) {
this.location = location;
this.name = name;
this.type = type;
this.slot = slot;
this.readonly = readonly;
}
@ -289,7 +302,7 @@ public class Locals {
}
}
public static class Parameter {
public static final class Parameter {
public final Location location;
public final String name;
public final Type type;

View File

@ -208,15 +208,6 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
};
}
/**
* Action taken when a script is removed from the cache.
* @param script The removed script.
*/
@Override
public void scriptRemoved(final CompiledScript script) {
// Nothing to do.
}
/**
* Action taken when the engine is closed.
*/

Some files were not shown because too many files have changed in this diff Show More