Merge branch 'master' into feature/query-refactoring

This commit is contained in:
Christoph Büscher 2015-07-23 12:32:26 +02:00
commit 2b675ccf03
87 changed files with 884 additions and 1341 deletions

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.plugins.PluginInfo;
import java.io.IOException;
import java.util.ArrayList;
@ -75,7 +76,7 @@ public class PluginsInfo implements Streamable, ToXContent {
public void readFrom(StreamInput in) throws IOException {
int plugins_size = in.readInt();
for (int i = 0; i < plugins_size; i++) {
infos.add(PluginInfo.readPluginInfo(in));
infos.add(PluginInfo.readFromStream(in));
}
}

View File

@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.StreamInput;
@ -38,7 +38,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsInfo;
import java.io.IOException;
import java.net.InetAddress;
@ -143,7 +142,7 @@ public class ClusterStatsNodes implements ToXContent, Streamable {
size = in.readVInt();
plugins = new HashSet<>(size);
for (; size > 0; size--) {
plugins.add(PluginInfo.readPluginInfo(in));
plugins.add(PluginInfo.readFromStream(in));
}
}

View File

@ -19,9 +19,6 @@
package org.elasticsearch.bootstrap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import org.apache.lucene.util.StringHelper;
import org.apache.lucene.util.Constants;
import org.elasticsearch.ExceptionsHelper;
@ -32,7 +29,6 @@ import org.elasticsearch.common.cli.Terminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.spi.Message;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
@ -46,20 +42,10 @@ import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import java.io.IOException;
import java.lang.reflect.Method;
import java.net.URL;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory;
import static com.google.common.collect.Sets.newHashSet;
import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
@ -172,9 +158,6 @@ public class Bootstrap {
});
}
// install any plugins into classpath
setupPlugins(environment);
// look for jar hell
JarHell.checkJarHell();
@ -365,75 +348,5 @@ public class Bootstrap {
return errorMessage.toString();
}
static final String PLUGIN_LIB_PATTERN = "glob:**.{jar,zip}";
private static void setupPlugins(Environment environment) throws IOException {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
Path pluginsDirectory = environment.pluginsFile();
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return;
}
// note: there's only one classloader here, but Uwe gets upset otherwise.
ClassLoader classLoader = Bootstrap.class.getClassLoader();
Class<?> classLoaderClass = classLoader.getClass();
Method addURL = null;
while (!classLoaderClass.equals(Object.class)) {
try {
addURL = classLoaderClass.getDeclaredMethod("addURL", URL.class);
addURL.setAccessible(true);
break;
} catch (NoSuchMethodException e) {
// no method, try the parent
classLoaderClass = classLoaderClass.getSuperclass();
}
}
if (addURL == null) {
logger.debug("failed to find addURL method on classLoader [" + classLoader + "] to add methods");
return;
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
for (Path plugin : stream) {
// We check that subdirs are directories and readable
if (!isAccessibleDirectory(plugin, logger)) {
continue;
}
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
try {
// add the root
addURL.invoke(classLoader, plugin.toUri().toURL());
// gather files to add
List<Path> libFiles = Lists.newArrayList();
libFiles.addAll(Arrays.asList(files(plugin)));
Path libLocation = plugin.resolve("lib");
if (Files.isDirectory(libLocation)) {
libFiles.addAll(Arrays.asList(files(libLocation)));
}
PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(PLUGIN_LIB_PATTERN);
// if there are jars in it, add it as well
for (Path libFile : libFiles) {
if (!matcher.matches(libFile)) {
continue;
}
addURL.invoke(classLoader, libFile.toUri().toURL());
}
} catch (Throwable e) {
logger.warn("failed to add plugin [" + plugin + "]", e);
}
}
}
}
private static Path[] files(Path from) throws IOException {
try (DirectoryStream<Path> stream = Files.newDirectoryStream(from)) {
return Iterators.toArray(stream.iterator(), Path.class);
}
}
}

View File

@ -80,7 +80,6 @@ final class Security {
m.put(Pattern.compile(".*lucene-core-.*\\.jar$"), "es.security.jar.lucene.core");
m.put(Pattern.compile(".*jsr166e-.*\\.jar$"), "es.security.jar.twitter.jsr166e");
m.put(Pattern.compile(".*securemock-.*\\.jar$"), "es.security.jar.elasticsearch.securemock");
m.put(Pattern.compile(".*bcprov-.*\\.jar$"), "es.security.jar.bouncycastle.bcprov");
SPECIAL_JARS = Collections.unmodifiableMap(m);
}

View File

@ -161,9 +161,6 @@ public class FieldValueFactorFunction extends ScoreFunction {
@Override
public String toString() {
if (this == NONE) {
return "";
}
return super.toString().toLowerCase(Locale.ROOT);
}
}

View File

@ -19,7 +19,10 @@
package org.elasticsearch.gateway;
import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
@ -29,6 +32,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
@ -50,8 +54,6 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
public boolean allocateUnassigned(RoutingAllocation allocation) {
boolean changed = false;
final RoutingNodes routingNodes = allocation.routingNodes();
final MetaData metaData = routingNodes.metaData();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
ShardRouting shard = unassignedIterator.next();
@ -60,22 +62,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
boolean canBeAllocatedToAtLeastOneNode = false;
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
RoutingNode node = routingNodes.node(cursor.value.id());
if (node == null) {
continue;
}
// if we can't allocate it on a node, ignore it, for example, this handles
// cases for only allocating a replica after a primary
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.YES) {
canBeAllocatedToAtLeastOneNode = true;
break;
}
}
if (!canBeAllocatedToAtLeastOneNode) {
if (canBeAllocatedToAtLeastOneNode(shard, allocation) == false) {
logger.trace("{}: ignoring allocation, can't be allocated on any node", shard);
unassignedIterator.removeAndIgnore();
continue;
@ -88,106 +75,41 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
continue; // still fetching
}
long lastSizeMatched = 0;
DiscoveryNode lastDiscoNodeMatched = null;
RoutingNode lastNodeMatched = null;
boolean hasReplicaData = false;
IndexMetaData indexMetaData = metaData.index(shard.getIndex());
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : shardStores.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
logger.trace("{}: checking node [{}]", shard, discoNode);
if (storeFilesMetaData == null) {
// already allocated on that node...
continue;
}
RoutingNode node = routingNodes.node(discoNode.id());
if (node == null) {
continue;
}
// check if we can allocate on that node...
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.NO) {
continue;
}
// if it is already allocated, we can't assign to it...
if (storeFilesMetaData.allocated()) {
continue;
}
if (!shard.primary()) {
hasReplicaData |= storeFilesMetaData.iterator().hasNext();
ShardRouting primaryShard = routingNodes.activePrimary(shard);
if (primaryShard != null) {
assert primaryShard.active();
DiscoveryNode primaryNode = allocation.nodes().get(primaryShard.currentNodeId());
if (primaryNode != null) {
TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData primaryNodeFilesStore = shardStores.getData().get(primaryNode);
if (primaryNodeFilesStore != null) {
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryNodeStore = primaryNodeFilesStore.storeFilesMetaData();
if (primaryNodeStore != null && primaryNodeStore.allocated()) {
long sizeMatched = 0;
String primarySyncId = primaryNodeStore.syncId();
String replicaSyncId = storeFilesMetaData.syncId();
// see if we have a sync id we can make use of
if (replicaSyncId != null && replicaSyncId.equals(primarySyncId)) {
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.name(), replicaSyncId);
lastNodeMatched = node;
lastSizeMatched = Long.MAX_VALUE;
lastDiscoNodeMatched = discoNode;
} else {
for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
String metaDataFileName = storeFileMetaData.name();
if (primaryNodeStore.fileExists(metaDataFileName) && primaryNodeStore.file(metaDataFileName).isSame(storeFileMetaData)) {
sizeMatched += storeFileMetaData.length();
}
}
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
shard, discoNode.name(), new ByteSizeValue(sizeMatched), sizeMatched);
if (sizeMatched > lastSizeMatched) {
lastSizeMatched = sizeMatched;
lastDiscoNodeMatched = discoNode;
lastNodeMatched = node;
}
}
}
}
}
}
}
ShardRouting primaryShard = routingNodes.activePrimary(shard);
assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
if (primaryStore == null || primaryStore.allocated() == false) {
// if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
// we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
// will try and recover from
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard);
continue;
}
if (lastNodeMatched != null) {
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
if (matchingNodes.getNodeWithHighestMatch() != null) {
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().id());
// we only check on THROTTLE since we checked before before on NO
Decision decision = allocation.deciders().canAllocate(shard, lastNodeMatched, allocation);
Decision decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
if (decision.type() == Decision.Type.THROTTLE) {
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
}
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we are throttling this, but we have enough to allocate to this node, ignore it for now
unassignedIterator.removeAndIgnore();
} else {
if (logger.isDebugEnabled()) {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store with total_size [{}]", shard.index(), shard.id(), shard, lastDiscoNodeMatched, new ByteSizeValue(lastSizeMatched));
}
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we found a match
changed = true;
unassignedIterator.initialize(lastNodeMatched.nodeId());
unassignedIterator.initialize(nodeWithHighestMatch.nodeId());
}
} else if (hasReplicaData == false) {
} else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation
// of the replica shard needs to be delayed, and if so, add it to the ignore unassigned list
// note: we only care about replica in delayed allocation, since if we have an unassigned primary it
// will anyhow wait to find an existing copy of the shard to be allocated
// note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndex());
long delay = shard.unassignedInfo().getDelayAllocationExpirationIn(settings, indexMetaData.getSettings());
if (delay > 0) {
logger.debug("[{}][{}]: delaying allocation of [{}] for [{}]", shard.index(), shard.id(), shard, TimeValue.timeValueMillis(delay));
@ -203,5 +125,134 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
return changed;
}
/**
* Can the shard be allocated on at least one node based on the allocation deciders.
*/
private boolean canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().dataNodes().values()) {
RoutingNode node = allocation.routingNodes().node(cursor.value.id());
if (node == null) {
continue;
}
// if we can't allocate it on a node, ignore it, for example, this handles
// cases for only allocating a replica after a primary
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.YES) {
return true;
}
}
return false;
}
/**
* Finds the store for the assigned shard in the fetched data, returns null if none is found.
*/
private TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
assert shard.currentNodeId() != null;
DiscoveryNode primaryNode = allocation.nodes().get(shard.currentNodeId());
if (primaryNode == null) {
return null;
}
TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData primaryNodeFilesStore = data.getData().get(primaryNode);
if (primaryNodeFilesStore == null) {
return null;
}
return primaryNodeFilesStore.storeFilesMetaData();
}
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
if (storeFilesMetaData == null) {
// already allocated on that node...
continue;
}
RoutingNode node = allocation.routingNodes().node(discoNode.id());
if (node == null) {
continue;
}
// check if we can allocate on that node...
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.NO) {
continue;
}
// if it is already allocated, we can't assign to it... (and it might be primary as well)
if (storeFilesMetaData.allocated()) {
continue;
}
// we don't have any files at all, it is an empty index
if (storeFilesMetaData.iterator().hasNext() == false) {
continue;
}
String primarySyncId = primaryStore.syncId();
String replicaSyncId = storeFilesMetaData.syncId();
// see if we have a sync id we can make use of
if (replicaSyncId != null && replicaSyncId.equals(primarySyncId)) {
logger.trace("{}: node [{}] has same sync id {} as primary", shard, discoNode.name(), replicaSyncId);
nodesToSize.put(discoNode, Long.MAX_VALUE);
} else {
long sizeMatched = 0;
for (StoreFileMetaData storeFileMetaData : storeFilesMetaData) {
String metaDataFileName = storeFileMetaData.name();
if (primaryStore.fileExists(metaDataFileName) && primaryStore.file(metaDataFileName).isSame(storeFileMetaData)) {
sizeMatched += storeFileMetaData.length();
}
}
logger.trace("{}: node [{}] has [{}/{}] bytes of re-usable data",
shard, discoNode.name(), new ByteSizeValue(sizeMatched), sizeMatched);
nodesToSize.put(discoNode, sizeMatched);
}
}
return new MatchingNodes(nodesToSize);
}
protected abstract AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation);
static class MatchingNodes {
private final ObjectLongMap<DiscoveryNode> nodesToSize;
private final DiscoveryNode nodeWithHighestMatch;
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize) {
this.nodesToSize = nodesToSize;
long highestMatchSize = 0;
DiscoveryNode highestMatchNode = null;
for (ObjectLongCursor<DiscoveryNode> cursor : nodesToSize) {
if (cursor.value > highestMatchSize) {
highestMatchSize = cursor.value;
highestMatchNode = cursor.key;
}
}
nodeWithHighestMatch = highestMatchNode;
}
/**
* Returns the node with the highest "non zero byte" match compared to
* the primary.
*/
@Nullable
public DiscoveryNode getNodeWithHighestMatch() {
return this.nodeWithHighestMatch;
}
/**
* Did we manage to find any data, regardless how well they matched or not.
*/
public boolean hasAnyData() {
return nodesToSize.isEmpty() == false;
}
}
}

View File

@ -26,7 +26,6 @@ import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.KeyedLock;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
@ -44,7 +43,6 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentMap;
import static org.elasticsearch.index.mapper.MappedFieldType.Names;
@ -138,7 +136,6 @@ public class IndexFieldDataService extends AbstractIndexComponent {
}
private final IndicesFieldDataCache indicesFieldDataCache;
private final ConcurrentMap<String, IndexFieldData<?>> loadedFieldData = ConcurrentCollections.newConcurrentMap();
private final KeyedLock.GlobalLockable<String> fieldLoadingLock = new KeyedLock.GlobalLockable<>();
private final Map<String, IndexFieldDataCache> fieldDataCaches = Maps.newHashMap(); // no need for concurrency support, always used under lock
@ -161,15 +158,6 @@ public class IndexFieldDataService extends AbstractIndexComponent {
fieldLoadingLock.globalLock().lock();
try {
List<Throwable> exceptions = new ArrayList<>(0);
final Collection<IndexFieldData<?>> fieldDataValues = loadedFieldData.values();
for (IndexFieldData<?> fieldData : fieldDataValues) {
try {
fieldData.clear();
} catch (Throwable t) {
exceptions.add(t);
}
}
fieldDataValues.clear();
final Collection<IndexFieldDataCache> fieldDataCacheValues = fieldDataCaches.values();
for (IndexFieldDataCache cache : fieldDataCacheValues) {
try {
@ -189,14 +177,6 @@ public class IndexFieldDataService extends AbstractIndexComponent {
fieldLoadingLock.acquire(fieldName);
try {
List<Throwable> exceptions = new ArrayList<>(0);
final IndexFieldData<?> fieldData = loadedFieldData.remove(fieldName);
if (fieldData != null) {
try {
fieldData.clear();
} catch (Throwable t) {
exceptions.add(t);
}
}
final IndexFieldDataCache cache = fieldDataCaches.remove(fieldName);
if (cache != null) {
try {
@ -211,17 +191,6 @@ public class IndexFieldDataService extends AbstractIndexComponent {
}
}
public void onMappingUpdate() {
// synchronize to make sure to not miss field data instances that are being loaded
fieldLoadingLock.globalLock().lock();
try {
// important: do not clear fieldDataCaches: the cache may be reused
loadedFieldData.clear();
} finally {
fieldLoadingLock.globalLock().unlock();
}
}
@SuppressWarnings("unchecked")
public <IFD extends IndexFieldData<?>> IFD getForField(MappedFieldType fieldType) {
final Names fieldNames = fieldType.names();
@ -231,57 +200,49 @@ public class IndexFieldDataService extends AbstractIndexComponent {
}
final boolean docValues = fieldType.hasDocValues();
final String key = fieldNames.indexName();
IndexFieldData<?> fieldData = loadedFieldData.get(key);
if (fieldData == null) {
fieldLoadingLock.acquire(key);
try {
fieldData = loadedFieldData.get(key);
if (fieldData == null) {
IndexFieldData.Builder builder = null;
String format = type.getFormat(indexSettings);
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
logger.warn("field [" + fieldNames.fullName() + "] has no doc values, will use default field data format");
format = null;
}
if (format != null) {
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
if (builder == null) {
logger.warn("failed to find format [" + format + "] for field [" + fieldNames.fullName() + "], will use default");
}
}
if (builder == null && docValues) {
builder = docValuesBuildersByType.get(type.getType());
}
if (builder == null) {
builder = buildersByType.get(type.getType());
}
if (builder == null) {
throw new IllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType());
}
IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName());
if (cache == null) {
// we default to node level cache, which in turn defaults to be unbounded
// this means changing the node level settings is simple, just set the bounds there
String cacheType = type.getSettings().get("cache", indexSettings.get(FIELDDATA_CACHE_KEY, FIELDDATA_CACHE_VALUE_NODE));
if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
cache = indicesFieldDataCache.buildIndexFieldDataCache(indexService, index, fieldNames, type);
} else if ("none".equals(cacheType)){
cache = new IndexFieldDataCache.None();
} else {
throw new IllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]");
}
fieldDataCaches.put(fieldNames.indexName(), cache);
}
fieldData = builder.build(index, indexSettings, fieldType, cache, circuitBreakerService, indexService.mapperService());
loadedFieldData.put(fieldNames.indexName(), fieldData);
}
} finally {
fieldLoadingLock.release(key);
fieldLoadingLock.acquire(key);
try {
IndexFieldData.Builder builder = null;
String format = type.getFormat(indexSettings);
if (format != null && FieldDataType.DOC_VALUES_FORMAT_VALUE.equals(format) && !docValues) {
logger.warn("field [" + fieldNames.fullName() + "] has no doc values, will use default field data format");
format = null;
}
if (format != null) {
builder = buildersByTypeAndFormat.get(Tuple.tuple(type.getType(), format));
if (builder == null) {
logger.warn("failed to find format [" + format + "] for field [" + fieldNames.fullName() + "], will use default");
}
}
if (builder == null && docValues) {
builder = docValuesBuildersByType.get(type.getType());
}
if (builder == null) {
builder = buildersByType.get(type.getType());
}
if (builder == null) {
throw new IllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType());
}
IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName());
if (cache == null) {
// we default to node level cache, which in turn defaults to be unbounded
// this means changing the node level settings is simple, just set the bounds there
String cacheType = type.getSettings().get("cache", indexSettings.get(FIELDDATA_CACHE_KEY, FIELDDATA_CACHE_VALUE_NODE));
if (FIELDDATA_CACHE_VALUE_NODE.equals(cacheType)) {
cache = indicesFieldDataCache.buildIndexFieldDataCache(indexService, index, fieldNames, type);
} else if ("none".equals(cacheType)){
cache = new IndexFieldDataCache.None();
} else {
throw new IllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]");
}
fieldDataCaches.put(fieldNames.indexName(), cache);
}
return (IFD) builder.build(index, indexSettings, fieldType, cache, circuitBreakerService, indexService.mapperService());
} finally {
fieldLoadingLock.release(key);
}
return (IFD) fieldData;
}
}

View File

@ -53,7 +53,6 @@ import org.elasticsearch.common.util.concurrent.ReleasableLock;
import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.Mapper.BuilderContext;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.mapper.object.ObjectMapper;
@ -104,7 +103,6 @@ public class MapperService extends AbstractIndexComponent {
};
private final AnalysisService analysisService;
private final IndexFieldDataService fieldDataService;
/**
* Will create types automatically if they do not exists in the mapping definition yet
@ -139,12 +137,11 @@ public class MapperService extends AbstractIndexComponent {
private volatile ImmutableSet<String> parentTypes = ImmutableSet.of();
@Inject
public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService, IndexFieldDataService fieldDataService,
public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService,
SimilarityLookupService similarityLookupService,
ScriptService scriptService) {
super(index, indexSettings);
this.analysisService = analysisService;
this.fieldDataService = fieldDataService;
this.fieldTypes = new FieldTypeLookup();
this.documentParser = new DocumentMapperParser(indexSettings, this, analysisService, similarityLookupService, scriptService);
this.indexAnalyzer = new MapperAnalyzerWrapper(analysisService.defaultIndexAnalyzer(), INDEX_ANALYZER_EXTRACTOR);
@ -291,7 +288,6 @@ public class MapperService extends AbstractIndexComponent {
logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.buildConflicts()));
}
}
fieldDataService.onMappingUpdate();
return oldMapper;
} else {
List<ObjectMapper> newObjectMappers = new ArrayList<>();

View File

@ -16,9 +16,9 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.info;
package org.elasticsearch.plugins;
import org.elasticsearch.common.Strings;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@ -27,26 +27,35 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Properties;
public class PluginInfo implements Streamable, ToXContent {
public static final String DESCRIPTION_NOT_AVAILABLE = "No description found.";
public static final String VERSION_NOT_AVAILABLE = "NA";
public static final String ES_PLUGIN_PROPERTIES = "plugin-descriptor.properties";
static final class Fields {
static final XContentBuilderString NAME = new XContentBuilderString("name");
static final XContentBuilderString DESCRIPTION = new XContentBuilderString("description");
static final XContentBuilderString URL = new XContentBuilderString("url");
static final XContentBuilderString JVM = new XContentBuilderString("jvm");
static final XContentBuilderString SITE = new XContentBuilderString("site");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString JVM = new XContentBuilderString("jvm");
static final XContentBuilderString CLASSNAME = new XContentBuilderString("classname");
static final XContentBuilderString ISOLATED = new XContentBuilderString("isolated");
}
private String name;
private String description;
private boolean site;
private boolean jvm;
private String version;
private boolean jvm;
private String classname;
private boolean isolated;
public PluginInfo() {
}
@ -57,18 +66,58 @@ public class PluginInfo implements Streamable, ToXContent {
* @param description Its description
* @param site true if it's a site plugin
* @param jvm true if it's a jvm plugin
* @param version Version number is applicable (NA otherwise)
* @param version Version number
*/
public PluginInfo(String name, String description, boolean site, boolean jvm, String version) {
PluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) {
this.name = name;
this.description = description;
this.site = site;
this.jvm = jvm;
if (Strings.hasText(version)) {
this.version = version;
} else {
this.version = VERSION_NOT_AVAILABLE;
this.version = version;
this.classname = classname;
this.isolated = isolated;
}
/** reads (and validates) plugin metadata descriptor file */
public static PluginInfo readFromProperties(Path dir) throws IOException {
Path descriptor = dir.resolve(ES_PLUGIN_PROPERTIES);
Properties props = new Properties();
try (InputStream stream = Files.newInputStream(descriptor)) {
props.load(stream);
}
String name = dir.getFileName().toString();
String description = props.getProperty("description");
if (description == null) {
throw new IllegalArgumentException("Property [description] is missing for plugin [" + name + "]");
}
String version = props.getProperty("version");
if (version == null) {
throw new IllegalArgumentException("Property [version] is missing for plugin [" + name + "]");
}
boolean jvm = Boolean.parseBoolean(props.getProperty("jvm"));
boolean site = Boolean.parseBoolean(props.getProperty("site"));
if (jvm == false && site == false) {
throw new IllegalArgumentException("Plugin [" + name + "] must be at least a jvm or site plugin");
}
boolean isolated = true;
String classname = "NA";
if (jvm) {
String esVersionString = props.getProperty("elasticsearch.version");
if (esVersionString == null) {
throw new IllegalArgumentException("Property [elasticsearch.version] is missing for jvm plugin [" + name + "]");
}
Version esVersion = Version.fromString(esVersionString);
if (esVersion.equals(Version.CURRENT) == false) {
throw new IllegalArgumentException("Elasticsearch version [" + esVersionString + "] is too old for plugin [" + name + "]");
}
isolated = Boolean.parseBoolean(props.getProperty("isolated", "true"));
classname = props.getProperty("classname");
if (classname == null) {
throw new IllegalArgumentException("Property [classname] is missing for jvm plugin [" + name + "]");
}
}
return new PluginInfo(name, description, site, version, jvm, classname, isolated);
}
/**
@ -99,6 +148,20 @@ public class PluginInfo implements Streamable, ToXContent {
return jvm;
}
/**
* @return true if jvm plugin has isolated classloader
*/
public boolean isIsolated() {
return isolated;
}
/**
* @return jvm plugin's classname
*/
public String getClassname() {
return classname;
}
/**
* We compute the URL for sites: "/_plugin/" + name + "/"
*
@ -119,7 +182,7 @@ public class PluginInfo implements Streamable, ToXContent {
return version;
}
public static PluginInfo readPluginInfo(StreamInput in) throws IOException {
public static PluginInfo readFromStream(StreamInput in) throws IOException {
PluginInfo info = new PluginInfo();
info.readFrom(in);
return info;
@ -132,6 +195,8 @@ public class PluginInfo implements Streamable, ToXContent {
this.site = in.readBoolean();
this.jvm = in.readBoolean();
this.version = in.readString();
this.classname = in.readString();
this.isolated = in.readBoolean();
}
@Override
@ -141,6 +206,8 @@ public class PluginInfo implements Streamable, ToXContent {
out.writeBoolean(site);
out.writeBoolean(jvm);
out.writeString(version);
out.writeString(classname);
out.writeBoolean(isolated);
}
@Override
@ -153,6 +220,10 @@ public class PluginInfo implements Streamable, ToXContent {
builder.field(Fields.URL, getUrl());
}
builder.field(Fields.JVM, jvm);
if (jvm) {
builder.field(Fields.CLASSNAME, classname);
builder.field(Fields.ISOLATED, isolated);
}
builder.field(Fields.SITE, site);
builder.endObject();
@ -184,6 +255,10 @@ public class PluginInfo implements Streamable, ToXContent {
sb.append(", description='").append(description).append('\'');
sb.append(", site=").append(site);
sb.append(", jvm=").append(jvm);
if (jvm) {
sb.append(", classname=").append(classname);
sb.append(", isolated=").append(isolated);
}
sb.append(", version='").append(version).append('\'');
sb.append('}');
return sb.toString();

View File

@ -19,37 +19,43 @@
package org.elasticsearch.plugins;
import com.google.common.base.Charsets;
import com.google.common.collect.*;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import org.apache.lucene.util.Constants;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
import org.elasticsearch.bootstrap.Bootstrap;
import org.elasticsearch.bootstrap.JarHell;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.Environment;
import java.io.BufferedReader;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.lang.reflect.Method;
import java.net.URL;
import java.nio.file.*;
import java.util.*;
import java.net.URLClassLoader;
import java.nio.file.DirectoryStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory;
@ -57,29 +63,14 @@ import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory;
*
*/
public class PluginsService extends AbstractComponent {
public static final String ES_PLUGIN_PROPERTIES_FILE_KEY = "plugins.properties_file";
public static final String ES_PLUGIN_PROPERTIES = "es-plugin.properties";
public static final String LOAD_PLUGIN_FROM_CLASSPATH = "plugins.load_classpath_plugins";
public static final String PLUGINS_CHECK_LUCENE_KEY = "plugins.check_lucene";
public static final String PLUGINS_INFO_REFRESH_INTERVAL_KEY = "plugins.info_refresh_interval";
private final Environment environment;
/**
* We keep around a list of jvm plugins
* We keep around a list of plugins
*/
private final ImmutableList<Tuple<PluginInfo, Plugin>> plugins;
private final PluginsInfo info;
private final ImmutableMap<Plugin, List<OnModuleReference>> onModuleReferences;
private final String esPluginPropertiesFile;
private final boolean loadClasspathPlugins;
private PluginsInfo cachedPluginsInfo;
private final TimeValue refreshInterval;
private final boolean checkLucene;
private long lastRefreshNS;
static class OnModuleReference {
public final Class<? extends Module> moduleClass;
@ -98,54 +89,53 @@ public class PluginsService extends AbstractComponent {
*/
public PluginsService(Settings settings, Environment environment) {
super(settings);
this.environment = environment;
this.checkLucene = settings.getAsBoolean(PLUGINS_CHECK_LUCENE_KEY, true);
this.esPluginPropertiesFile = settings.get(ES_PLUGIN_PROPERTIES_FILE_KEY, ES_PLUGIN_PROPERTIES);
this.loadClasspathPlugins = settings.getAsBoolean(LOAD_PLUGIN_FROM_CLASSPATH, true);
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> tupleBuilder = ImmutableList.builder();
// first we load all the default plugins from the settings
// first we load specified plugins via 'plugin.types' settings parameter.
// this is a hack for what is between unit and integration tests...
String[] defaultPluginsClasses = settings.getAsArray("plugin.types");
for (String pluginClass : defaultPluginsClasses) {
Plugin plugin = loadPlugin(pluginClass, settings);
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), hasSite(plugin.name()), true, PluginInfo.VERSION_NOT_AVAILABLE);
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), false, "NA", true, pluginClass, false);
if (logger.isTraceEnabled()) {
logger.trace("plugin loaded from settings [{}]", pluginInfo);
}
tupleBuilder.add(new Tuple<>(pluginInfo, plugin));
}
// now, find all the ones that are in the classpath
if (loadClasspathPlugins) {
tupleBuilder.addAll(loadPluginsFromClasspath(settings));
// now, find all the ones that are in plugins/
try {
List<Bundle> bundles = getPluginBundles(environment);
tupleBuilder.addAll(loadBundles(bundles));
} catch (IOException ex) {
throw new IllegalStateException("Can't load plugins into classloader", ex);
}
plugins = tupleBuilder.build();
info = new PluginsInfo();
for (Tuple<PluginInfo, Plugin> tuple : plugins) {
info.add(tuple.v1());
}
this.plugins = tupleBuilder.build();
// We need to build a List of jvm and site plugins for checking mandatory plugins
Map<String, Plugin> jvmPlugins = Maps.newHashMap();
List<String> sitePlugins = Lists.newArrayList();
Map<String, Plugin> jvmPlugins = new HashMap<>();
List<String> sitePlugins = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> tuple : this.plugins) {
jvmPlugins.put(tuple.v2().name(), tuple.v2());
if (tuple.v1().isSite()) {
sitePlugins.add(tuple.v1().getName());
for (Tuple<PluginInfo, Plugin> tuple : plugins) {
PluginInfo info = tuple.v1();
if (info.isJvm()) {
jvmPlugins.put(tuple.v2().name(), tuple.v2());
}
}
try {
// we load site plugins
ImmutableList<Tuple<PluginInfo, Plugin>> tuples = loadSitePlugins();
for (Tuple<PluginInfo, Plugin> tuple : tuples) {
sitePlugins.add(tuple.v1().getName());
if (info.isSite()) {
sitePlugins.add(info.getName());
}
} catch (IOException ex) {
throw new IllegalStateException("Can't load site plugins", ex);
}
// Checking expected plugins
String[] mandatoryPlugins = settings.getAsArray("plugin.mandatory", null);
if (mandatoryPlugins != null) {
Set<String> missingPlugins = Sets.newHashSet();
Set<String> missingPlugins = new HashSet<>();
for (String mandatoryPlugin : mandatoryPlugins) {
if (!jvmPlugins.containsKey(mandatoryPlugin) && !sitePlugins.contains(mandatoryPlugin) && !missingPlugins.contains(mandatoryPlugin)) {
missingPlugins.add(mandatoryPlugin);
@ -160,8 +150,8 @@ public class PluginsService extends AbstractComponent {
MapBuilder<Plugin, List<OnModuleReference>> onModuleReferences = MapBuilder.newMapBuilder();
for (Plugin plugin : jvmPlugins.values()) {
List<OnModuleReference> list = Lists.newArrayList();
for (Method method : plugin.getClass().getDeclaredMethods()) {
List<OnModuleReference> list = new ArrayList<>();
for (Method method : plugin.getClass().getMethods()) {
if (!method.getName().equals("onModule")) {
continue;
}
@ -174,7 +164,6 @@ public class PluginsService extends AbstractComponent {
logger.warn("Plugin: {} implementing onModule by the type is not of Module type {}", plugin.name(), moduleClass);
continue;
}
method.setAccessible(true);
list.add(new OnModuleReference(moduleClass, method));
}
if (!list.isEmpty()) {
@ -182,8 +171,6 @@ public class PluginsService extends AbstractComponent {
}
}
this.onModuleReferences = onModuleReferences.immutableMap();
this.refreshInterval = settings.getAsTime(PLUGINS_INFO_REFRESH_INTERVAL_KEY, TimeValue.timeValueSeconds(10));
}
public ImmutableList<Tuple<PluginInfo, Plugin>> plugins() {
@ -225,7 +212,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Class<? extends Module>> modules() {
List<Class<? extends Module>> modules = Lists.newArrayList();
List<Class<? extends Module>> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().modules());
}
@ -233,7 +220,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Module> modules(Settings settings) {
List<Module> modules = Lists.newArrayList();
List<Module> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().modules(settings));
}
@ -241,7 +228,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Class<? extends LifecycleComponent>> services() {
List<Class<? extends LifecycleComponent>> services = Lists.newArrayList();
List<Class<? extends LifecycleComponent>> services = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().services());
}
@ -249,7 +236,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Class<? extends Module>> indexModules() {
List<Class<? extends Module>> modules = Lists.newArrayList();
List<Class<? extends Module>> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().indexModules());
}
@ -257,7 +244,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Module> indexModules(Settings settings) {
List<Module> modules = Lists.newArrayList();
List<Module> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().indexModules(settings));
}
@ -265,7 +252,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Class<? extends Closeable>> indexServices() {
List<Class<? extends Closeable>> services = Lists.newArrayList();
List<Class<? extends Closeable>> services = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().indexServices());
}
@ -273,7 +260,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Class<? extends Module>> shardModules() {
List<Class<? extends Module>> modules = Lists.newArrayList();
List<Class<? extends Module>> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().shardModules());
}
@ -281,7 +268,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Module> shardModules(Settings settings) {
List<Module> modules = Lists.newArrayList();
List<Module> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().shardModules(settings));
}
@ -289,7 +276,7 @@ public class PluginsService extends AbstractComponent {
}
public Collection<Class<? extends Closeable>> shardServices() {
List<Class<? extends Closeable>> services = Lists.newArrayList();
List<Class<? extends Closeable>> services = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().shardServices());
}
@ -298,249 +285,125 @@ public class PluginsService extends AbstractComponent {
/**
* Get information about plugins (jvm and site plugins).
* Information are cached for 10 seconds by default. Modify `plugins.info_refresh_interval` property if needed.
* Setting `plugins.info_refresh_interval` to `-1` will cause infinite caching.
* Setting `plugins.info_refresh_interval` to `0` will disable caching.
* @return List of plugins information
*/
synchronized public PluginsInfo info() {
if (refreshInterval.millis() != 0) {
if (cachedPluginsInfo != null &&
(refreshInterval.millis() < 0 || (System.nanoTime() - lastRefreshNS) < refreshInterval.nanos())) {
if (logger.isTraceEnabled()) {
logger.trace("using cache to retrieve plugins info");
}
return cachedPluginsInfo;
}
lastRefreshNS = System.nanoTime();
}
if (logger.isTraceEnabled()) {
logger.trace("starting to fetch info on plugins");
}
cachedPluginsInfo = new PluginsInfo();
// We first add all JvmPlugins
for (Tuple<PluginInfo, Plugin> plugin : this.plugins) {
if (logger.isTraceEnabled()) {
logger.trace("adding jvm plugin [{}]", plugin.v1());
}
cachedPluginsInfo.add(plugin.v1());
}
try {
// We reload site plugins (in case of some changes)
for (Tuple<PluginInfo, Plugin> plugin : loadSitePlugins()) {
if (logger.isTraceEnabled()) {
logger.trace("adding site plugin [{}]", plugin.v1());
}
cachedPluginsInfo.add(plugin.v1());
}
} catch (IOException ex) {
logger.warn("can load site plugins info", ex);
}
return cachedPluginsInfo;
public PluginsInfo info() {
return info;
}
// a "bundle" is a group of plugins in a single classloader
// really should be 1-1, but we are not so fortunate
static class Bundle {
List<PluginInfo> plugins = new ArrayList<>();
List<URL> urls = new ArrayList<>();
}
static List<Bundle> getPluginBundles(Environment environment) throws IOException {
ESLogger logger = Loggers.getLogger(Bootstrap.class);
private List<Tuple<PluginInfo,Plugin>> loadPluginsFromClasspath(Settings settings) {
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> plugins = ImmutableList.builder();
Path pluginsDirectory = environment.pluginsFile();
if (!isAccessibleDirectory(pluginsDirectory, logger)) {
return Collections.emptyList();
}
// Trying JVM plugins: looking for es-plugin.properties files
try {
Enumeration<URL> pluginUrls = settings.getClassLoader().getResources(esPluginPropertiesFile);
List<Bundle> bundles = new ArrayList<>();
// a special purgatory for plugins that directly depend on each other
bundles.add(new Bundle());
// use a set for uniqueness as some classloaders such as groovy's can return the same URL multiple times and
// these plugins should only be loaded once
HashSet<URL> uniqueUrls = new HashSet<>(Collections.list(pluginUrls));
for (URL pluginUrl : uniqueUrls) {
Properties pluginProps = new Properties();
InputStream is = null;
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsDirectory)) {
for (Path plugin : stream) {
try {
is = pluginUrl.openStream();
pluginProps.load(is);
String pluginClassName = pluginProps.getProperty("plugin");
String pluginVersion = pluginProps.getProperty("version", PluginInfo.VERSION_NOT_AVAILABLE);
Plugin plugin = loadPlugin(pluginClassName, settings);
// Is it a site plugin as well? Does it have also an embedded _site structure
Path siteFile = environment.pluginsFile().resolve(plugin.name()).resolve("_site");
boolean isSite = isAccessibleDirectory(siteFile, logger);
if (logger.isTraceEnabled()) {
logger.trace("found a jvm plugin [{}], [{}]{}",
plugin.name(), plugin.description(), isSite ? ": with _site structure" : "");
logger.trace("--- adding plugin [{}]", plugin.toAbsolutePath());
PluginInfo info = PluginInfo.readFromProperties(plugin);
List<URL> urls = new ArrayList<>();
if (info.isJvm()) {
// a jvm plugin: gather urls for jar files
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
for (Path jar : jarStream) {
urls.add(jar.toUri().toURL());
}
}
}
PluginInfo pluginInfo = new PluginInfo(plugin.name(), plugin.description(), isSite, true, pluginVersion);
plugins.add(new Tuple<>(pluginInfo, plugin));
final Bundle bundle;
if (info.isJvm() && info.isIsolated() == false) {
bundle = bundles.get(0); // purgatory
} else {
bundle = new Bundle();
bundles.add(bundle);
}
bundle.plugins.add(info);
bundle.urls.addAll(urls);
} catch (Throwable e) {
logger.warn("failed to load plugin from [" + pluginUrl + "]", e);
} finally {
IOUtils.closeWhileHandlingException(is);
logger.warn("failed to add plugin [" + plugin + "]", e);
}
}
}
return bundles;
}
private List<Tuple<PluginInfo,Plugin>> loadBundles(List<Bundle> bundles) {
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> plugins = ImmutableList.builder();
for (Bundle bundle : bundles) {
// jar-hell check the bundle against the parent classloader
// pluginmanager does it, but we do it again, in case lusers mess with jar files manually
try {
final List<URL> jars = new ArrayList<>();
ClassLoader parentLoader = settings.getClassLoader();
if (parentLoader instanceof URLClassLoader) {
for (URL url : ((URLClassLoader) parentLoader).getURLs()) {
jars.add(url);
}
}
jars.addAll(bundle.urls);
JarHell.checkJarHell(jars.toArray(new URL[0]));
} catch (Exception e) {
logger.warn("failed to load bundle {} due to jar hell", bundle.urls);
}
// create a child to load the plugins in this bundle
ClassLoader loader = URLClassLoader.newInstance(bundle.urls.toArray(new URL[0]), settings.getClassLoader());
Settings settings = Settings.builder()
.put(this.settings)
.classLoader(loader)
.build();
for (PluginInfo pluginInfo : bundle.plugins) {
try {
final Plugin plugin;
if (pluginInfo.isJvm()) {
plugin = loadPlugin(pluginInfo.getClassname(), settings);
} else {
plugin = null;
}
plugins.add(new Tuple<>(pluginInfo, plugin));
} catch (Throwable e) {
logger.warn("failed to load plugin from [" + bundle.urls + "]", e);
}
}
} catch (IOException e) {
logger.warn("failed to find jvm plugins from classpath", e);
}
return plugins.build();
}
private ImmutableList<Tuple<PluginInfo,Plugin>> loadSitePlugins() throws IOException {
ImmutableList.Builder<Tuple<PluginInfo, Plugin>> sitePlugins = ImmutableList.builder();
List<String> loadedJvmPlugins = new ArrayList<>();
// Already known jvm plugins are ignored
for(Tuple<PluginInfo, Plugin> tuple : plugins) {
if (tuple.v1().isSite()) {
loadedJvmPlugins.add(tuple.v1().getName());
}
}
// Let's try to find all _site plugins we did not already found
Path pluginsFile = environment.pluginsFile();
if (FileSystemUtils.isAccessibleDirectory(pluginsFile, logger) == false) {
return sitePlugins.build();
}
try (DirectoryStream<Path> stream = Files.newDirectoryStream(pluginsFile)) {
for (Path pluginFile : stream) {
if (!loadedJvmPlugins.contains(pluginFile.getFileName().toString())) {
Path sitePluginDir = pluginFile.resolve("_site");
if (isAccessibleDirectory(sitePluginDir, logger)) {
// We have a _site plugin. Let's try to get more information on it
String name = pluginFile.getFileName().toString();
String version = PluginInfo.VERSION_NOT_AVAILABLE;
String description = PluginInfo.DESCRIPTION_NOT_AVAILABLE;
// We check if es-plugin.properties exists in plugin/_site dir
final Path pluginPropFile = sitePluginDir.resolve(esPluginPropertiesFile);
if (Files.exists(pluginPropFile)) {
final Properties pluginProps = new Properties();
try (final BufferedReader reader = Files.newBufferedReader(pluginPropFile, Charsets.UTF_8)) {
pluginProps.load(reader);
description = pluginProps.getProperty("description", PluginInfo.DESCRIPTION_NOT_AVAILABLE);
version = pluginProps.getProperty("version", PluginInfo.VERSION_NOT_AVAILABLE);
} catch (Exception e) {
// Can not load properties for this site plugin. Ignoring.
logger.debug("can not load {} file.", e, esPluginPropertiesFile);
}
}
if (logger.isTraceEnabled()) {
logger.trace("found a site plugin name [{}], version [{}], description [{}]",
name, version, description);
}
sitePlugins.add(new Tuple<PluginInfo, Plugin>(new PluginInfo(name, description, true, false, version), null));
}
}
}
}
return sitePlugins.build();
}
/**
* @param name plugin name
* @return if this jvm plugin has also a _site structure
*/
private boolean hasSite(String name) {
// Let's try to find all _site plugins we did not already found
Path pluginsFile = environment.pluginsFile();
if (!Files.isDirectory(pluginsFile)) {
return false;
}
Path sitePluginDir = pluginsFile.resolve(name).resolve("_site");
return isAccessibleDirectory(sitePluginDir, logger);
}
private Plugin loadPlugin(String className, Settings settings) {
try {
Class<? extends Plugin> pluginClass = (Class<? extends Plugin>) settings.getClassLoader().loadClass(className);
Plugin plugin;
Class<? extends Plugin> pluginClass = settings.getClassLoader().loadClass(className).asSubclass(Plugin.class);
if (!checkLucene || checkLuceneCompatibility(pluginClass, settings, logger, esPluginPropertiesFile)) {
try {
return pluginClass.getConstructor(Settings.class).newInstance(settings);
} catch (NoSuchMethodException e) {
try {
plugin = pluginClass.getConstructor(Settings.class).newInstance(settings);
} catch (NoSuchMethodException e) {
try {
plugin = pluginClass.getConstructor().newInstance();
} catch (NoSuchMethodException e1) {
throw new ElasticsearchException("No constructor for [" + pluginClass + "]. A plugin class must " +
"have either an empty default constructor or a single argument constructor accepting a " +
"Settings instance");
}
return pluginClass.getConstructor().newInstance();
} catch (NoSuchMethodException e1) {
throw new ElasticsearchException("No constructor for [" + pluginClass + "]. A plugin class must " +
"have either an empty default constructor or a single argument constructor accepting a " +
"Settings instance");
}
} else {
throw new ElasticsearchException("Plugin is incompatible with the current node");
}
return plugin;
} catch (Throwable e) {
throw new ElasticsearchException("Failed to load plugin class [" + className + "]", e);
}
}
/**
* <p>Check that a plugin is Lucene compatible with the current running node using `lucene` property
* in `es-plugin.properties` file.</p>
* <p>If plugin does not provide `lucene` property, we consider that the plugin is compatible.</p>
* <p>If plugin provides `lucene` property, we try to load related Enum org.apache.lucene.util.Version. If this
* fails, it means that the node is too "old" comparing to the Lucene version the plugin was built for.</p>
* <p>We compare then two first digits of current node lucene version against two first digits of plugin Lucene
* version. If not equal, it means that the plugin is too "old" for the current node.</p>
*
* @param pluginClass Plugin class we are checking
* @return true if the plugin is Lucene compatible
*/
public static boolean checkLuceneCompatibility(Class<? extends Plugin> pluginClass, Settings settings, ESLogger logger, String propertiesFile) {
String luceneVersion = null;
try {
// We try to read the es-plugin.properties file
// But as we can have several plugins in the same classloader,
// we have to find the right es-plugin.properties file
Enumeration<URL> pluginUrls = settings.getClassLoader().getResources(propertiesFile);
while (pluginUrls.hasMoreElements()) {
URL pluginUrl = pluginUrls.nextElement();
try (InputStream is = pluginUrl.openStream()) {
Properties pluginProps = new Properties();
pluginProps.load(is);
String plugin = pluginProps.getProperty("plugin");
// If we don't have the expected plugin, let's continue to the next one
if (pluginClass.getName().equals(plugin)) {
luceneVersion = pluginProps.getProperty("lucene");
break;
}
logger.debug("skipping [{}]", pluginUrl);
}
}
if (luceneVersion != null) {
// Should fail if the running node is too old!
org.apache.lucene.util.Version luceneExpectedVersion = Lucene.parseVersionLenient(luceneVersion, null);
if (Version.CURRENT.luceneVersion.equals(luceneExpectedVersion)) {
logger.debug("starting analysis plugin for Lucene [{}].", luceneExpectedVersion);
return true;
}
} else {
logger.debug("lucene property is not set in plugin {} file. Skipping test.", propertiesFile);
return true;
}
} catch (Throwable t) {
// We don't have the expected version... Let's fail after.
logger.debug("exception raised while checking plugin Lucene version.", t);
}
logger.error("cannot start plugin due to incorrect Lucene version: plugin [{}], node [{}].",
luceneVersion, Constants.LUCENE_MAIN_VERSION);
return false;
}
}

View File

@ -22,7 +22,7 @@ package org.elasticsearch.rest.action.cat;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client;

View File

@ -19,8 +19,6 @@
package org.elasticsearch.script.groovy;
import com.google.common.base.Charsets;
import com.google.common.hash.Hashing;
import groovy.lang.Binding;
import groovy.lang.GroovyClassLoader;
import groovy.lang.Script;
@ -51,6 +49,7 @@ import java.io.IOException;
import java.math.BigDecimal;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
/**
* Provides the infrastructure for Groovy as a scripting language for Elasticsearch
@ -58,6 +57,7 @@ import java.util.Map;
public class GroovyScriptEngineService extends AbstractComponent implements ScriptEngineService {
public static final String NAME = "groovy";
private final AtomicLong counter = new AtomicLong();
private final GroovyClassLoader loader;
@Inject
@ -111,7 +111,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
@Override
public Object compile(String script) {
try {
return loader.parseClass(script, Hashing.sha1().hashString(script, Charsets.UTF_8).toString());
return loader.parseClass(script, generateScriptName());
} catch (Throwable e) {
if (logger.isTraceEnabled()) {
logger.trace("exception compiling Groovy script:", e);
@ -190,6 +190,10 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri
return value;
}
private String generateScriptName() {
return "Script" + counter.incrementAndGet() + ".groovy";
}
public static final class GroovyScript implements ExecutableScript, LeafSearchScript {
private final CompiledScript compiledScript;

View File

@ -47,11 +47,6 @@ grant codeBase "${es.security.jar.elasticsearch.securemock}" {
permission java.lang.RuntimePermission "reflectionFactoryAccess";
};
grant codeBase "${es.security.jar.bouncycastle.bcprov}" {
// needed to allow installation of bouncycastle crypto provider
permission java.security.SecurityPermission "putProviderProperty.BC";
};
//// Everything else:
grant {
@ -123,4 +118,7 @@ grant {
// needed to install SSLFactories, advanced SSL configuration, etc.
permission java.lang.RuntimePermission "setFactory";
// needed to allow installation of bouncycastle crypto provider
permission java.security.SecurityPermission "putProviderProperty.BC";
};

View File

@ -1,51 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.node.info;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.junit.Test;
import java.util.List;
import static org.hamcrest.Matchers.contains;
public class PluginsInfoTest extends ElasticsearchTestCase {
@Test
public void testPluginListSorted() {
PluginsInfo pluginsInfo = new PluginsInfo(5);
pluginsInfo.add(new PluginInfo("c", "foo", true, true, "dummy"));
pluginsInfo.add(new PluginInfo("b", "foo", true, true, "dummy"));
pluginsInfo.add(new PluginInfo("e", "foo", true, true, "dummy"));
pluginsInfo.add(new PluginInfo("a", "foo", true, true, "dummy"));
pluginsInfo.add(new PluginInfo("d", "foo", true, true, "dummy"));
final List<PluginInfo> infos = pluginsInfo.getInfos();
List<String> names = Lists.transform(infos, new Function<PluginInfo, String>() {
@Override
public String apply(PluginInfo input) {
return input.getName();
}
});
assertThat(names, contains("a", "b", "c", "d", "e"));
}
}

View File

@ -139,6 +139,7 @@ public class IndicesShardStoreRequestTests extends ElasticsearchIntegrationTest
}
@Test
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/12416")
public void testCorruptedShards() throws Exception {
String index = "test";
internalCluster().ensureAtLeastNumDataNodes(2);

View File

@ -1,47 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.junit.Test;
import java.nio.file.Path;
import java.nio.file.PathMatcher;
public class BootstrapTests extends ElasticsearchTestCase {
@Test
public void testHasLibExtension() {
PathMatcher matcher = PathUtils.getDefaultFileSystem().getPathMatcher(Bootstrap.PLUGIN_LIB_PATTERN);
Path p = PathUtils.get("path", "to", "plugin.jar");
assertTrue(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin.zip");
assertTrue(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin.tar.gz");
assertFalse(matcher.matches(p));
p = PathUtils.get("path", "to", "plugin");
assertFalse(matcher.matches(p));
}
}

View File

@ -62,7 +62,6 @@ public class TransportClientRetryTests extends ElasticsearchIntegrationTest {
Settings.Builder builder = settingsBuilder().put("client.transport.nodes_sampler_interval", "1s")
.put("name", "transport_client_retry_test")
.put("node.mode", InternalTestCluster.nodeMode())
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false)
.put(ClusterName.SETTING, internalCluster().getClusterName())
.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true)
.put("path.home", createTempDir());

View File

@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.store.Store;
@ -118,8 +119,9 @@ public class ReplicaShardAllocatorTests extends ElasticsearchAllocationTestCase
/**
* When we can't find primary data, but still find replica data, we go ahead and keep it unassigned
* to be allocated.
* TODO: this might be the wrong decision here, and we should restart the fetching process maybe to really find a primary copy?
* to be allocated. This is today behavior, which relies on a primary corruption identified with
* adding a replica and having that replica actually recover and cause the corruption to be identified
* See CorruptFileTest#
*/
@Test
public void testNoPrimaryData() {
@ -194,15 +196,42 @@ public class ReplicaShardAllocatorTests extends ElasticsearchAllocationTestCase
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
}
@Test
public void testDelayedAllocation() {
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(),
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
testAllocator.addData(node1, true, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
if (randomBoolean()) {
// we sometime return empty list of files, make sure we test this as well
testAllocator.addData(node2, false, null);
}
boolean changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(),
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING, TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
testAllocator.addData(node2, false, "MATCH", new StoreFileMetaData("file1", 10, "MATCH_CHECKSUM"));
changed = testAllocator.allocateUnassigned(allocation);
assertThat(changed, equalTo(true));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
}
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders) {
return onePrimaryOnNode1And1Replica(deciders, Settings.EMPTY, UnassignedInfo.Reason.INDEX_CREATED);
}
private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) {
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0))
.put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)).numberOfShards(1).numberOfReplicas(0))
.build();
RoutingTable routingTable = RoutingTable.builder()
.add(IndexRoutingTable.builder(shardId.getIndex())
.addIndexShard(new IndexShardRoutingTable.Builder(shardId)
.addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10))
.addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)))
.addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(reason, null)))
.build())
)
.build();

View File

@ -101,6 +101,7 @@ import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY;
import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA;
import static org.hamcrest.Matchers.*;
public class InternalEngineTests extends ElasticsearchTestCase {
private static final Pattern PARSE_LEGACY_ID_PATTERN = Pattern.compile("^" + Translog.TRANSLOG_FILE_PREFIX + "(\\d+)((\\.recovering))?$");
@ -1923,7 +1924,7 @@ public class InternalEngineTests extends ElasticsearchTestCase {
Index index = new Index(indexName);
AnalysisService analysisService = new AnalysisService(index, settings);
SimilarityLookupService similarityLookupService = new SimilarityLookupService(index, settings);
MapperService mapperService = new MapperService(index, settings, analysisService, null, similarityLookupService, null);
MapperService mapperService = new MapperService(index, settings, analysisService, similarityLookupService, null);
DocumentMapper.Builder b = new DocumentMapper.Builder(settings, rootBuilder, mapperService);
DocumentMapperParser parser = new DocumentMapperParser(settings, mapperService, analysisService, similarityLookupService, null);
this.docMapper = b.build(mapperService, parser);

View File

@ -150,7 +150,6 @@ public class IndexFieldDataServiceTests extends ElasticsearchSingleNodeTest {
writer.addDocument(doc);
final IndexReader reader2 = DirectoryReader.open(writer, true);
final MappedFieldType mapper2 = MapperBuilders.stringField("s").tokenized(false).docValues(true).fieldDataSettings(Settings.builder().put(FieldDataType.FORMAT_KEY, "doc_values").build()).build(ctx).fieldType();
ifdService.onMappingUpdate();
ifd = ifdService.getForField(mapper2);
assertThat(ifd, instanceOf(SortedSetDVOrdinalsIndexFieldData.class));
reader1.close();

View File

@ -20,27 +20,33 @@
package org.elasticsearch.indices.store;
import com.google.common.base.Predicate;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.*;
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoverySource;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.disruption.SlowClusterStateProcessing;
import org.elasticsearch.test.disruption.BlockClusterStateProcessing;
import org.elasticsearch.test.disruption.SingleNodeDisruption;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportService;
import org.junit.Test;
import java.io.IOException;
@ -48,9 +54,10 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.Future;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static java.lang.Thread.sleep;
import static org.elasticsearch.common.settings.Settings.settingsBuilder;
import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@ -69,6 +76,7 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
// which is between 1 and 2 sec can cause each of the shard deletion requests to timeout.
// to prevent this we are setting the timeout here to something highish ie. the default in practice
.put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS))
.put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName())
.build();
}
@ -79,7 +87,7 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
}
@Test
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/11989")
@Slow
public void indexCleanup() throws Exception {
final String masterNode = internalCluster().startNode(Settings.builder().put("node.data", false));
final String node_1 = internalCluster().startNode(Settings.builder().put("node.master", false));
@ -115,24 +123,30 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false));
logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish");
SlowClusterStateProcessing disruption = null;
if (randomBoolean()) { // sometimes add cluster-state delay to trigger observers in IndicesStore.ShardActiveRequestHandler
disruption = new SlowClusterStateProcessing(node_3, getRandom(), 0, 0, 1000, 2000);
SingleNodeDisruption disruption = new BlockClusterStateProcessing(node_3, getRandom());
internalCluster().setDisruptionScheme(disruption);
MockTransportService transportServiceNode3 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_3);
CountDownLatch beginRelocationLatch = new CountDownLatch(1);
CountDownLatch endRelocationLatch = new CountDownLatch(1);
transportServiceNode3.addTracer(new ReclocationStartEndTracer(logger, beginRelocationLatch, endRelocationLatch));
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_3)).get();
// wait for relocation to start
beginRelocationLatch.await();
disruption.startDisrupting();
// wait for relocation to finish
endRelocationLatch.await();
// wait a little so that cluster state observer is registered
sleep(50);
disruption.stopDisrupting();
} else {
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_3)).get();
}
internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_3)).get();
clusterHealth = client().admin().cluster().prepareHealth()
.setWaitForNodes("4")
.setWaitForRelocatingShards(0)
.get();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
if (disruption != null) {
// we must stop the disruption here, else the delayed cluster state processing on the disrupted node
// can potentially delay registering the observer in IndicesStore.ShardActiveRequestHandler.messageReceived()
// and therefore sending the response for the shard active request for more than 10s
disruption.stopDisrupting();
}
assertThat(waitForShardDeletion(node_1, "test", 0), equalTo(false));
assertThat(waitForIndexDeletion(node_1, "test"), equalTo(false));
@ -203,7 +217,8 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
assertThat(waitForShardDeletion(node_4, "test", 0), equalTo(false));
}
@Test @Slow
@Test
@Slow
public void testShardActiveElseWhere() throws Exception {
List<String> nodes = internalCluster().startNodesAsync(2).get();
@ -258,6 +273,7 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
.build();
}
@Override
public boolean runOnlyOnMaster() {
return false;
}
@ -306,4 +322,45 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest {
});
return Files.exists(indexDirectory(server, index));
}
/**
* This Tracer can be used to signal start and end of a recovery.
* This is used to test the following:
* Whenever a node deletes a shard because it was relocated somewhere else, it first
* checks if enough other copies are started somewhere else. The node sends a ShardActiveRequest
* to the other nodes that should have a copy according to cluster state.
* The nodes that receive this request check if the shard is in state STARTED in which case they
* respond with "true". If they have the shard in POST_RECOVERY they register a cluster state
* observer that checks at each update if the shard has moved to STARTED.
* To test that this mechanism actually works, this can be triggered by blocking the cluster
* state processing when a recover starts and only unblocking it shortly after the node receives
* the ShardActiveRequest.
*/
static class ReclocationStartEndTracer extends MockTransportService.Tracer {
private final ESLogger logger;
private final CountDownLatch beginRelocationLatch;
private final CountDownLatch receivedShardExistsRequestLatch;
ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) {
this.logger = logger;
this.beginRelocationLatch = beginRelocationLatch;
this.receivedShardExistsRequestLatch = receivedShardExistsRequestLatch;
}
@Override
public void receivedRequest(long requestId, String action) {
if (action.equals(IndicesStore.ACTION_SHARD_EXISTS)) {
receivedShardExistsRequestLatch.countDown();
logger.info("received: {}, relocation done", action);
}
}
@Override
public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) {
if (action.equals(RecoverySource.Actions.START_RECOVERY)) {
logger.info("sent: {}, relocation starts", action);
beginRelocationLatch.countDown();
}
}
}
}

View File

@ -23,7 +23,6 @@ import com.google.common.collect.Lists;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.nodesinfo.plugin.dummy1.TestPlugin;
@ -94,66 +93,4 @@ public class SimpleNodesInfoTests extends PluginTestCase {
assertThat(response.getNodes().length, is(1));
assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
}
/**
* Use case is to start 4 nodes:
* <ul>
* <li>1 : no plugin</li>
* <li>2 : one site plugin (with a es-plugin.properties file)</li>
* <li>3 : one java plugin</li>
* <li>4 : one site plugin and 2 java plugins (included the previous one)</li>
* </ul>
* We test here that NodeInfo API with plugin option give us the right results.
* @throws URISyntaxException
*/
@Test
public void testNodeInfoPlugin() throws URISyntaxException {
// We start four nodes
// The first has no plugin
String server1NodeId = startNodeWithPlugins(1);
// The second has one site plugin with a es-plugin.properties file (description and version)
String server2NodeId = startNodeWithPlugins(2);
// The third has one java plugin
String server3NodeId = startNodeWithPlugins(3,TestPlugin.class.getName());
// The fourth has one java plugin and one site plugin
String server4NodeId = startNodeWithPlugins(4,TestNoVersionPlugin.class.getName());
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("4")).actionGet();
logger.info("--> done cluster_health, status " + clusterHealth.getStatus());
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).execute().actionGet();
logger.info("--> full json answer, status " + response.toString());
ElasticsearchAssertions.assertNodeContainsPlugins(response, server1NodeId,
Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No Site Plugin
ElasticsearchAssertions.assertNodeContainsPlugins(response, server2NodeId,
Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST, // No JVM Plugin
Lists.newArrayList(Fields.SITE_PLUGIN), // Site Plugin
Lists.newArrayList(Fields.SITE_PLUGIN_DESCRIPTION),
Lists.newArrayList(Fields.SITE_PLUGIN_VERSION));
ElasticsearchAssertions.assertNodeContainsPlugins(response, server3NodeId,
Lists.newArrayList(TestPlugin.Fields.NAME), // JVM Plugin
Lists.newArrayList(TestPlugin.Fields.DESCRIPTION),
Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
Collections.EMPTY_LIST, Collections.EMPTY_LIST, Collections.EMPTY_LIST);// No site Plugin
ElasticsearchAssertions.assertNodeContainsPlugins(response, server4NodeId,
Lists.newArrayList(TestNoVersionPlugin.Fields.NAME), // JVM Plugin
Lists.newArrayList(TestNoVersionPlugin.Fields.DESCRIPTION),
Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE),
Lists.newArrayList(Fields.SITE_PLUGIN, TestNoVersionPlugin.Fields.NAME),// Site Plugin
Lists.newArrayList(PluginInfo.DESCRIPTION_NOT_AVAILABLE),
Lists.newArrayList(PluginInfo.VERSION_NOT_AVAILABLE));
}
public String startNodeWithPlugins(int nodeId, String ... pluginClassNames) throws URISyntaxException {
return startNodeWithPlugins(Settings.EMPTY, "/org/elasticsearch/nodesinfo/node" + Integer.toString(nodeId) + "/", pluginClassNames);
}
}

View File

@ -0,0 +1,197 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins;
import com.google.common.base.Function;
import com.google.common.collect.Lists;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.test.ElasticsearchTestCase;
import org.junit.Test;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import java.util.Properties;
import static org.hamcrest.Matchers.contains;
public class PluginInfoTests extends ElasticsearchTestCase {
void writeProperties(Path pluginDir, String... stringProps) throws IOException {
assert stringProps.length % 2 == 0;
Files.createDirectories(pluginDir);
Path propertiesFile = pluginDir.resolve(PluginInfo.ES_PLUGIN_PROPERTIES);
Properties properties = new Properties();
for (int i = 0; i < stringProps.length; i += 2) {
properties.put(stringProps[i], stringProps[i + 1]);
}
try (OutputStream out = Files.newOutputStream(propertiesFile)) {
properties.store(out, "");
}
}
public void testReadFromProperties() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"jvm", "true",
"classname", "FakePlugin");
PluginInfo info = PluginInfo.readFromProperties(pluginDir);
assertEquals("fake-plugin", info.getName());
assertEquals("fake desc", info.getDescription());
assertEquals("1.0", info.getVersion());
assertEquals("FakePlugin", info.getClassname());
assertTrue(info.isJvm());
assertTrue(info.isIsolated());
assertFalse(info.isSite());
assertNull(info.getUrl());
}
public void testReadFromPropertiesDescriptionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir);
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected missing description exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("[description] is missing"));
}
}
public void testReadFromPropertiesVersionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir, "description", "fake desc");
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected missing version exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("[version] is missing"));
}
}
public void testReadFromPropertiesJvmAndSiteMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0");
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected jvm or site exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("must be at least a jvm or site plugin"));
}
}
public void testReadFromPropertiesElasticsearchVersionMissing() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"jvm", "true");
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected missing elasticsearch version exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("[elasticsearch.version] is missing"));
}
}
public void testReadFromPropertiesBogusElasticsearchVersion() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"jvm", "true",
"elasticsearch.version", "bogus");
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected bogus elasticsearch version exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("version needs to contain major, minor and revision"));
}
}
public void testReadFromPropertiesOldElasticsearchVersion() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"jvm", "true",
"elasticsearch.version", Version.V_1_7_0.toString());
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected old elasticsearch version exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Elasticsearch version [1.7.0] is too old"));
}
}
public void testReadFromPropertiesJvmMissingClassname() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"jvm", "true");
try {
PluginInfo.readFromProperties(pluginDir);
fail("expected old elasticsearch version exception");
} catch (IllegalArgumentException e) {
assertTrue(e.getMessage().contains("Property [classname] is missing"));
}
}
public void testReadFromPropertiesSitePlugin() throws Exception {
Path pluginDir = createTempDir().resolve("fake-plugin");
writeProperties(pluginDir,
"description", "fake desc",
"version", "1.0",
"elasticsearch.version", Version.CURRENT.toString(),
"site", "true");
PluginInfo info = PluginInfo.readFromProperties(pluginDir);
assertTrue(info.isSite());
assertFalse(info.isJvm());
assertEquals("NA", info.getClassname());
}
public void testPluginListSorted() {
PluginsInfo pluginsInfo = new PluginsInfo(5);
pluginsInfo.add(new PluginInfo("c", "foo", true, "dummy", true, "dummyclass", true));
pluginsInfo.add(new PluginInfo("b", "foo", true, "dummy", true, "dummyclass", true));
pluginsInfo.add(new PluginInfo("e", "foo", true, "dummy", true, "dummyclass", true));
pluginsInfo.add(new PluginInfo("a", "foo", true, "dummy", true, "dummyclass", true));
pluginsInfo.add(new PluginInfo("d", "foo", true, "dummy", true, "dummyclass", true));
final List<PluginInfo> infos = pluginsInfo.getInfos();
List<String> names = Lists.transform(infos, new Function<PluginInfo, String>() {
@Override
public String apply(PluginInfo input) {
return input.getName();
}
});
assertThat(names, contains("a", "b", "c", "d", "e"));
}
}

View File

@ -18,19 +18,14 @@
*/
package org.elasticsearch.plugins;
import com.google.common.base.Predicate;
import org.apache.http.impl.client.HttpClients;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.common.cli.CliTool;
import org.elasticsearch.common.cli.CliToolTestCase.CaptureOutputTerminal;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.junit.annotations.Network;
@ -48,7 +43,6 @@ import java.nio.file.attribute.PosixFileAttributeView;
import java.nio.file.attribute.PosixFileAttributes;
import java.nio.file.attribute.PosixFilePermission;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.cli.CliTool.ExitStatus.USAGE;
import static org.elasticsearch.common.cli.CliToolTestCase.args;
@ -90,23 +84,10 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
@Test
public void testThatPluginNameMustBeSupplied() throws IOException {
String pluginUrl = getPluginUrlForResource("plugin_single_folder.zip");
String pluginUrl = getPluginUrlForResource("plugin_with_bin_and_config.zip");
assertStatus("install --url " + pluginUrl, USAGE);
}
@Test
public void testLocalPluginInstallSingleFolder() throws Exception {
//When we have only a folder in top-level (no files either) we remove that folder while extracting
String pluginName = "plugin-test";
String pluginUrl = getPluginUrlForResource("plugin_single_folder.zip");
String installCommand = String.format(Locale.ROOT, "install %s --url %s", pluginName, pluginUrl);
assertStatusOk(installCommand);
internalCluster().startNode(initialSettings.v1());
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
}
@Test
public void testLocalPluginInstallWithBinAndConfig() throws Exception {
String pluginName = "plugin-test";
@ -215,43 +196,6 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
assertDirectoryExists(pluginBinDir);
}
@Test
public void testLocalPluginInstallSiteFolder() throws Exception {
//When we have only a folder in top-level (no files either) but it's called _site, we make it work
//we can either remove the folder while extracting and then re-add it manually or just leave it as it is
String pluginName = "plugin-test";
assertStatusOk(String.format(Locale.ROOT, "install %s --url %s --verbose", pluginName, getPluginUrlForResource("plugin_folder_site.zip")));
internalCluster().startNode(initialSettings.v1());
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
}
@Test
public void testLocalPluginWithoutFolders() throws Exception {
//When we don't have folders at all in the top-level, but only files, we don't modify anything
String pluginName = "plugin-test";
assertStatusOk(String.format(Locale.ROOT, "install %s --url %s --verbose", pluginName, getPluginUrlForResource("plugin_without_folders.zip")));
internalCluster().startNode(initialSettings.v1());
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
}
@Test
public void testLocalPluginFolderAndFile() throws Exception {
//When we have a single top-level folder but also files in the top-level, we don't modify anything
String pluginName = "plugin-test";
assertStatusOk(String.format(Locale.ROOT, "install %s --url %s --verbose", pluginName, getPluginUrlForResource("plugin_folder_file.zip")));
internalCluster().startNode(initialSettings.v1());
assertPluginLoaded(pluginName);
assertPluginAvailable(pluginName);
}
@Test
public void testSitePluginWithSourceDoesNotInstall() throws Exception {
String pluginName = "plugin-with-source";
@ -261,54 +205,6 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
assertThat(terminal.getTerminalOutput(), hasItem(containsString("Plugin installation assumed to be site plugin, but contains source code, aborting installation")));
}
private void assertPluginLoaded(String pluginName) {
NodesInfoResponse nodesInfoResponse = client().admin().cluster().prepareNodesInfo().clear().setPlugins(true).get();
assertThat(nodesInfoResponse.getNodes().length, equalTo(1));
assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos(), notNullValue());
assertThat(nodesInfoResponse.getNodes()[0].getPlugins().getInfos().size(), not(0));
boolean pluginFound = false;
for (PluginInfo pluginInfo : nodesInfoResponse.getNodes()[0].getPlugins().getInfos()) {
if (pluginInfo.getName().equals(pluginName)) {
pluginFound = true;
break;
}
}
assertThat(pluginFound, is(true));
}
private void assertPluginAvailable(String pluginName) throws InterruptedException, IOException {
final HttpRequestBuilder httpRequestBuilder = httpClient();
//checking that the http connector is working properly
// We will try it for some seconds as it could happen that the REST interface is not yet fully started
assertThat(awaitBusy(new Predicate<Object>() {
@Override
public boolean apply(Object obj) {
try {
HttpResponse response = httpRequestBuilder.method("GET").path("/").execute();
if (response.getStatusCode() != RestStatus.OK.getStatus()) {
// We want to trace what's going on here before failing the test
logger.info("--> error caught [{}], headers [{}]", response.getStatusCode(), response.getHeaders());
logger.info("--> cluster state [{}]", internalCluster().clusterService().state());
return false;
}
return true;
} catch (IOException e) {
throw new ElasticsearchException("HTTP problem", e);
}
}
}, 5, TimeUnit.SECONDS), equalTo(true));
//checking now that the plugin is available
HttpResponse response = httpClient().method("GET").path("/_plugin/" + pluginName + "/").execute();
assertThat(response, notNullValue());
assertThat(response.getReasonPhrase(), response.getStatusCode(), equalTo(RestStatus.OK.getStatus()));
}
@Test
public void testListInstalledEmpty() throws IOException {
assertStatusOk("list");
@ -394,7 +290,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest {
* It should find it on github
*/
@Test
@Network
@Network @AwaitsFix(bugUrl = "needs to be adapted to 2.0")
public void testInstallPluginWithGithub() throws IOException {
assumeTrue("github.com is accessible", isDownloadServiceWorking("github.com", 443, "/"));
singlePluginInstallAndRemove("elasticsearch/kibana", "kibana", null);

View File

@ -1,40 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins.lucene.current;
import org.elasticsearch.plugins.AbstractPlugin;
public class CurrentLucenePlugin extends AbstractPlugin {
/**
* The name of the plugin.
*/
@Override
public String name() {
return "current-lucene";
}
/**
* The description of the plugin.
*/
@Override
public String description() {
return "current";
}
}

View File

@ -1,21 +0,0 @@
################################################################
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
################################################################
plugin=org.elasticsearch.plugins.lucene.current.CurrentLucenePlugin
version=2.0.0
lucene=${lucene.version}

View File

@ -1,40 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins.lucene.newer;
import org.elasticsearch.plugins.AbstractPlugin;
public class NewerLucenePlugin extends AbstractPlugin {
/**
* The name of the plugin.
*/
@Override
public String name() {
return "newer-lucene";
}
/**
* The description of the plugin.
*/
@Override
public String description() {
return "newer";
}
}

View File

@ -1,21 +0,0 @@
################################################################
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
################################################################
plugin=org.elasticsearch.plugins.lucene.newer.NewerLucenePlugin
version=3.0.0
lucene=99.0.0

View File

@ -1,40 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.plugins.lucene.old;
import org.elasticsearch.plugins.AbstractPlugin;
public class OldLucenePlugin extends AbstractPlugin {
/**
* The name of the plugin.
*/
@Override
public String name() {
return "old-lucene";
}
/**
* The description of the plugin.
*/
@Override
public String description() {
return "old";
}
}

View File

@ -1,21 +0,0 @@
################################################################
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
################################################################
plugin=org.elasticsearch.plugins.lucene.old.OldLucenePlugin
version=1.0.0
lucene=3.0.0

View File

@ -69,6 +69,14 @@ public class FunctionScoreFieldValueTests extends ElasticsearchIntegrationTest {
.get();
assertOrderedSearchHits(response, "2", "1");
// try again, but this time explicitly use the do-nothing modifier
response = client().prepareSearch("test")
.setExplain(randomBoolean())
.setQuery(functionScoreQuery(simpleQueryStringQuery("foo"),
fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.NONE)))
.get();
assertOrderedSearchHits(response, "2", "1");
// document 1 scores higher because 1/5 > 1/17
response = client().prepareSearch("test")
.setExplain(randomBoolean())

View File

@ -292,7 +292,6 @@ public final class InternalTestCluster extends TestCluster {
builder.put(InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING, true);
builder.put("node.mode", NODE_MODE);
builder.put("http.pipelining", enableHttpPipelining);
builder.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false);
builder.put(NodeEnvironment.SETTING_CUSTOM_DATA_PATH_ENABLED, true);
if (Strings.hasLength(System.getProperty("es.logger.level"))) {
builder.put("logger.level", System.getProperty("es.logger.level"));
@ -908,7 +907,6 @@ public final class InternalTestCluster extends TestCluster {
.put("client.transport.nodes_sampler_interval", "1s")
.put("path.home", baseDir)
.put("name", TRANSPORT_CLIENT_PREFIX + node.settings().get("name"))
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, false)
.put(ClusterName.SETTING, clusterName).put("client.transport.sniff", sniff)
.put("node.mode", nodeSettings.get("node.mode", NODE_MODE))
.put("node.local", nodeSettings.get("node.local", ""))

View File

@ -35,7 +35,7 @@ import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.plugins.PluginInfo;
import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo;
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistResponse;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;

View File

@ -113,7 +113,7 @@ if __name__ == '__main__':
print('Find plugins:')
for name in os.listdir('plugins'):
if name not in ('target', 'pom.xml'):
url = 'file://%s/plugins/%s/target/releases/elasticsearch-%s-2.0.0-SNAPSHOT.zip' % (os.path.abspath('.'), name, name)
url = 'file://%s/plugins/%s/target/releases/elasticsearch-%s-2.0.0-beta1-SNAPSHOT.zip' % (os.path.abspath('.'), name, name)
print(' install plugin %s...' % name)
run('%s; %s install %s --url %s' % (JAVA_ENV, es_plugin_path, name, url))
installed_plugin_names.add(name)

View File

@ -5,6 +5,13 @@
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<files>
<file>
<source>${elasticsearch.tools.directory}/plugin-metadata/plugin-descriptor.properties</source>
<outputDirectory></outputDirectory>
<filtered>true</filtered>
</file>
</files>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>

View File

@ -0,0 +1,32 @@
# elasticsearch plugin descriptor file
#
# example:
# jvm=true
# classname=foo.bar.BazPlugin
# isolated=true
# site=false
# description=My cool plugin
# version=2.0
# elasticsearch.version=2.0
#
# A plugin can be 'jvm', 'site', or both
#
# 'jvm': true if the 'classname' class should be loaded
# from jar files in the root directory of the plugin
jvm=${elasticsearch.plugin.jvm}
# 'classname': the name of the class to load.
classname=${elasticsearch.plugin.classname}
# 'isolated': true if the plugin should have its own classloader.
# passing false is deprecated, and only intended to support plugins
# that have hard dependencies against each other
isolated=${elasticsearch.plugin.isolated}
#
# 'site': true if the contents of _site should be served
site=${elasticsearch.plugin.site}
#
# 'description': simple summary of the plugin
description=${project.description}
# 'version': plugin's version
version=${project.version}
# 'elasticsearch.version' version of elasticsearch compiled against
elasticsearch.version=${elasticsearch.version}

View File

@ -15,6 +15,7 @@
<description>The ICU Analysis plugin integrates Lucene ICU module into elasticsearch, adding ICU relates analysis components.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin</elasticsearch.plugin.classname>
<tests.rest.suite>analysis_icu</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin
version=${project.version}
lucene=${lucene.version}

View File

@ -16,6 +16,7 @@
<description>The Japanese (kuromoji) Analysis plugin integrates Lucene kuromoji analysis module into elasticsearch.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin</elasticsearch.plugin.classname>
<tests.rest.suite>analysis_kuromoji</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.analysis.kuromoji.AnalysisKuromojiPlugin
version=${project.version}
lucene=${lucene.version}

View File

@ -15,9 +15,10 @@
<description>The Phonetic Analysis plugin integrates phonetic token filter analysis with elasticsearch.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.analysis.AnalysisPhoneticPlugin</elasticsearch.plugin.classname>
<tests.rest.suite>analysis_phonetic</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>
</properties>
<dependencies>
<dependency>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.analysis.AnalysisPhoneticPlugin
version=${project.version}
lucene=${lucene.version}

View File

@ -15,6 +15,7 @@
<description>Smart Chinese Analysis plugin integrates Lucene Smart Chinese analysis module into elasticsearch.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin</elasticsearch.plugin.classname>
<tests.rest.suite>analysis_smartcn</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.analysis.smartcn.AnalysisSmartChinesePlugin
version=${project.version}
lucene=${lucene.version}

View File

@ -15,6 +15,7 @@
<description>The Stempel (Polish) Analysis plugin integrates Lucene stempel (polish) analysis module into elasticsearch.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.analysis.stempel.AnalysisStempelPlugin</elasticsearch.plugin.classname>
<tests.rest.suite>analysis_stempel</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.analysis.stempel.AnalysisStempelPlugin
version=${project.version}
lucene=${lucene.version}

View File

@ -15,6 +15,7 @@
<description>The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin</elasticsearch.plugin.classname>
<amazonaws.version>1.10.0</amazonaws.version>
<tests.jvms>1</tests.jvms>
<tests.rest.suite>cloud_aws</tests.rest.suite>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin
version=${project.version}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.FailedToResolveConfigException;
import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ThirdParty;
@ -81,7 +82,7 @@ public abstract class AbstractAwsTest extends ElasticsearchIntegrationTest {
Settings.Builder settings = Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("path.home", createTempDir())
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudAwsPlugin.class.getName())
.put(AwsModule.S3_SERVICE_TYPE_KEY, TestAwsS3Service.class)
.put("cloud.aws.test.random", randomInt())
.put("cloud.aws.test.write_failures", 0.1)

View File

@ -22,6 +22,7 @@ package org.elasticsearch.discovery.ec2;
import org.elasticsearch.cloud.aws.AbstractAwsTest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
@ -40,7 +41,7 @@ public class Ec2DiscoveryITest extends AbstractAwsTest {
@Test
public void testStart() {
Settings nodeSettings = settingsBuilder()
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudAwsPlugin.class.getName())
.put("cloud.enabled", true)
.put("discovery.type", "ec2")
.build();

View File

@ -23,6 +23,7 @@ package org.elasticsearch.discovery.ec2;
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse;
import org.elasticsearch.cloud.aws.AbstractAwsTest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
@ -42,7 +43,7 @@ public class Ec2DiscoveryUpdateSettingsITest extends AbstractAwsTest {
@Test
public void testMinimumMasterNodesStart() {
Settings nodeSettings = settingsBuilder()
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudAwsPlugin.class.getName())
.put("cloud.enabled", true)
.put("discovery.type", "ec2")
.build();

View File

@ -33,6 +33,7 @@ import org.elasticsearch.cloud.aws.AbstractAwsTest;
import org.elasticsearch.cloud.aws.AwsS3Service;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.repositories.RepositoryVerificationException;
@ -63,7 +64,7 @@ abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
.put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false)
.put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false)
.put("cloud.enabled", true)
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudAwsPlugin.class.getName())
.build();
}

View File

@ -26,7 +26,7 @@ governing permissions and limitations under the License. -->
<description>The Azure Cloud plugin allows to use Azure API for the unicast discovery mechanism and add Azure storage repositories.</description>
<properties>
<!-- You can add any specific project property here -->
<elasticsearch.plugin.classname>org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin</elasticsearch.plugin.classname>
<tests.jvms>1</tests.jvms>
<tests.rest.suite>cloud_azure</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,12 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with this work for additional
# information regarding copyright ownership. ElasticSearch licenses this file to you
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
# applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
plugin=org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin
version=${project.version}

View File

@ -23,7 +23,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.FailedToResolveConfigException;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ThirdParty;
@ -40,7 +40,7 @@ public abstract class AbstractAzureTest extends ElasticsearchIntegrationTest {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudAzurePlugin.class.getName())
.put(readSettingsFromFile())
.build();
}

View File

@ -24,7 +24,7 @@ import org.elasticsearch.cloud.azure.management.AzureComputeService;
import org.elasticsearch.cloud.azure.management.AzureComputeService.Discovery;
import org.elasticsearch.cloud.azure.management.AzureComputeService.Management;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
public abstract class AbstractAzureComputeServiceTest extends ElasticsearchIntegrationTest {
@ -40,7 +40,7 @@ public abstract class AbstractAzureComputeServiceTest extends ElasticsearchInteg
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder settings = Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true);
.put("plugin.types", CloudAzurePlugin.class.getName());
return settings.build();
}

View File

@ -20,11 +20,13 @@
package org.elasticsearch.repositories.azure;
import com.microsoft.azure.storage.StorageException;
import org.elasticsearch.cloud.azure.AbstractAzureTest;
import org.elasticsearch.cloud.azure.storage.AzureStorageService;
import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugin.cloud.azure.CloudAzurePlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.repositories.RepositoryMissingException;
import org.elasticsearch.test.store.MockFSDirectoryService;
@ -65,7 +67,7 @@ public abstract class AbstractAzureRepositoryServiceTest extends AbstractAzureTe
@Override
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.settingsBuilder()
.put(PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudAzurePlugin.class.getName())
.put(Storage.API_IMPLEMENTATION, mock)
.put(Storage.CONTAINER, "snapshots");

View File

@ -26,6 +26,7 @@ governing permissions and limitations under the License. -->
<description>The Google Compute Engine (GCE) Cloud plugin allows to use GCE API for the unicast discovery mechanism.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.cloud.gce.CloudGcePlugin</elasticsearch.plugin.classname>
<google.gce.version>v1-rev59-1.20.0</google.gce.version>
<es.plugin.port>9300</es.plugin.port>
<!-- currently has no unit tests -->

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,12 +0,0 @@
# Licensed to ElasticSearch under one or more contributor
# license agreements. See the NOTICE file distributed with this work for additional
# information regarding copyright ownership. ElasticSearch licenses this file to you
# under the Apache License, Version 2.0 (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
# applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
plugin=org.elasticsearch.plugin.cloud.gce.CloudGcePlugin
version=${project.version}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.FailedToResolveConfigException;
import org.elasticsearch.plugin.cloud.gce.CloudGcePlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ThirdParty;
@ -38,7 +39,7 @@ public abstract class AbstractGceTest extends ElasticsearchIntegrationTest {
Settings.Builder settings = Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("path.home", createTempDir())
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true);
.put("plugin.types", CloudGcePlugin.class.getName());
Environment environment = new Environment(settings.build());

View File

@ -20,11 +20,13 @@
package org.elasticsearch.discovery.gce;
import com.google.common.collect.Lists;
import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.cloud.gce.GceComputeService;
import org.elasticsearch.cloud.gce.GceComputeService.Fields;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.discovery.gce.mock.*;
import org.elasticsearch.plugin.cloud.gce.CloudGcePlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.junit.Ignore;
@ -73,7 +75,7 @@ public class GceComputeEngineTest extends ElasticsearchIntegrationTest {
// We disable http
.put("http.enabled", false)
// We force plugin loading
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudGcePlugin.class.getName())
.put(settings)
.put(super.nodeSettings(nodeOrdinal));

View File

@ -22,6 +22,7 @@ package org.elasticsearch.gce.itest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.cloud.gce.AbstractGceTest;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugin.cloud.gce.CloudGcePlugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.hamcrest.Matchers;
@ -44,7 +45,7 @@ public class GceSimpleITest extends AbstractGceTest {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
.put("plugin.types", CloudGcePlugin.class.getName())
.build();
}

1
plugins/delete-by-query/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/bin/

View File

@ -26,6 +26,7 @@ governing permissions and limitations under the License. -->
<description>The Delete By Query plugin allows to delete documents in Elasticsearch with a single query.</description>
<properties>
<elasticsearch.plugin.classname>org.elasticsearch.plugin.deletebyquery.DeleteByQueryPlugin</elasticsearch.plugin.classname>
<tests.ifNoTests>warn</tests.ifNoTests>
<tests.rest.suite>delete_by_query</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>

View File

@ -1,34 +0,0 @@
<?xml version="1.0"?>
<!-- Licensed to ElasticSearch under one or more contributor
license agreements. See the NOTICE file distributed with this work for additional
information regarding copyright ownership. ElasticSearch licenses this file to you
under the Apache License, Version 2.0 (the "License"); you may not use this
file except in compliance with the License. You may obtain a copy of the
License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by
applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License. -->
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,3 +0,0 @@
plugin=org.elasticsearch.plugin.deletebyquery.DeleteByQueryPlugin
version=${project.version}
lucene=${lucene.version}

View File

@ -55,6 +55,13 @@ import static org.hamcrest.Matchers.nullValue;
@ClusterScope(scope = SUITE, transportClientRatio = 0)
public class DeleteByQueryTests extends ElasticsearchIntegrationTest {
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder settings = Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugin.types", DeleteByQueryPlugin.class.getName());
return settings.build();
}
@Test(expected = ActionRequestValidationException.class)
public void testDeleteByQueryWithNoSource() {
newDeleteByQuery().get();

View File

@ -21,7 +21,10 @@ package org.elasticsearch.plugin.deletebyquery.test.rest;
import com.carrotsearch.randomizedtesting.annotations.Name;
import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugin.deletebyquery.DeleteByQueryPlugin;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.elasticsearch.test.rest.ElasticsearchRestTestCase;
import org.elasticsearch.test.rest.ElasticsearchRestTestCase.Rest;
@ -46,5 +49,12 @@ public class DeleteByQueryRestTests extends ElasticsearchRestTestCase {
public static Iterable<Object[]> parameters() throws IOException, RestTestParseException {
return ElasticsearchRestTestCase.createParameters(0, 1);
}
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder settings = Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put("plugin.types", DeleteByQueryPlugin.class.getName());
return settings.build();
}
}

View File

@ -15,7 +15,7 @@
<description>The JavaScript language plugin allows to have javascript as the language of scripts to execute.</description>
<properties>
<!-- You can add any specific project property here -->
<elasticsearch.plugin.classname>org.elasticsearch.plugin.javascript.JavaScriptPlugin</elasticsearch.plugin.classname>
<tests.rest.suite>lang_javascript</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,2 +0,0 @@
plugin=org.elasticsearch.plugin.javascript.JavaScriptPlugin
version=${project.version}

View File

@ -15,7 +15,7 @@
<description>The Python language plugin allows to have python as the language of scripts to execute.</description>
<properties>
<!-- You can add any specific project property here -->
<elasticsearch.plugin.classname>org.elasticsearch.plugin.python.PythonPlugin</elasticsearch.plugin.classname>
<tests.rest.suite>lang_python</tests.rest.suite>
<tests.rest.load_packaged>false</tests.rest.load_packaged>
</properties>

View File

@ -1,19 +0,0 @@
<?xml version="1.0"?>
<assembly>
<id>plugin</id>
<formats>
<format>zip</format>
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<dependencySets>
<dependencySet>
<outputDirectory>/</outputDirectory>
<useProjectArtifact>true</useProjectArtifact>
<useTransitiveFiltering>true</useTransitiveFiltering>
<useTransitiveDependencies>true</useTransitiveDependencies>
<excludes>
<exclude>org.elasticsearch:elasticsearch</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -1,2 +0,0 @@
plugin=org.elasticsearch.plugin.python.PythonPlugin
version=${project.version}

View File

@ -19,8 +19,11 @@
</parent>
<properties>
<elasticsearch.assembly.descriptor>${basedir}/src/main/assemblies/plugin.xml</elasticsearch.assembly.descriptor>
<elasticsearch.assembly.descriptor>${elasticsearch.tools.directory}/plugin-metadata/plugin-assembly.xml</elasticsearch.assembly.descriptor>
<elasticsearch.assembly.appendId>false</elasticsearch.assembly.appendId>
<elasticsearch.plugin.jvm>true</elasticsearch.plugin.jvm>
<elasticsearch.plugin.isolated>true</elasticsearch.plugin.isolated>
<elasticsearch.plugin.site>false</elasticsearch.plugin.site>
</properties>
<dependencies>
@ -349,6 +352,25 @@
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-plugin-classname</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireProperty>
<property>elasticsearch.plugin.classname</property>
</requireProperty>
</rules>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</pluginManagement>
</build>

View File

@ -909,6 +909,7 @@
<goal>copy</goal>
</goals>
<configuration>
<skip>${skip.integ.tests}</skip>
<artifactItems>
<artifactItem>
<groupId>org.elasticsearch</groupId>
@ -1349,6 +1350,7 @@ org.eclipse.jdt.ui.text.custom_code_templates=<?xml version\="1.0" encoding\="UT
<properties>
<skip.unit.tests>true</skip.unit.tests>
<skip.integ.tests>true</skip.integ.tests>
<enforcer.skip>true</enforcer.skip>
</properties>
<build>
<plugins>