From c151f2137a23c36d59aed30153a6d989872a5262 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 18 Aug 2015 21:32:51 -0700 Subject: [PATCH 01/51] Internal: Remove all uses of ImmutableList We are in the process of getting rid of guava, and this removes a major use. The replacement is mostly Collections.emptyList(), Arrays.asList and Collections.unmodifiableList. While it is questionable whether we need the last one (as these are usually placed in final members), we can continue to refactor later by removing unnecessary wrappings. --- .../cluster/health/ClusterHealthResponse.java | 4 +- .../cluster/health/ClusterIndexHealth.java | 5 +- .../get/GetRepositoriesResponse.java | 11 +- .../get/TransportGetRepositoriesAction.java | 11 +- .../snapshots/get/GetSnapshotsResponse.java | 11 +- .../get/TransportGetSnapshotsAction.java | 7 +- .../snapshots/status/SnapshotStatus.java | 11 +- .../status/SnapshotsStatusResponse.java | 14 +- .../TransportSnapshotsStatusAction.java | 15 ++- .../indices/alias/IndicesAliasesRequest.java | 5 +- .../indices/alias/get/GetAliasesResponse.java | 4 +- .../alias/get/TransportGetAliasesAction.java | 2 +- .../admin/indices/get/GetIndexResponse.java | 36 ++--- .../indices/get/TransportGetIndexAction.java | 7 +- .../admin/indices/segments/ShardSegments.java | 4 +- .../shards/IndicesShardStoresResponse.java | 14 +- .../TransportIndicesShardStoresAction.java | 17 ++- .../validate/query/ValidateQueryResponse.java | 6 +- .../warmer/get/GetWarmersResponse.java | 20 +-- .../warmer/get/TransportGetWarmersAction.java | 5 +- .../percolate/PercolateShardResponse.java | 4 +- .../action/update/TransportUpdateAction.java | 6 +- .../bootstrap/JNAKernel32Library.java | 6 +- .../TransportClientNodesService.java | 27 ++-- .../cluster/ClusterChangedEvent.java | 12 +- .../cluster/RestoreInProgress.java | 20 +-- .../cluster/SnapshotsInProgress.java | 38 +++--- .../metadata/IndexNameExpressionResolver.java | 5 +- .../cluster/metadata/MetaData.java | 13 +- .../metadata/RepositoriesMetaData.java | 9 +- .../cluster/node/DiscoveryNode.java | 5 +- .../cluster/node/DiscoveryNodes.java | 12 +- .../cluster/routing/IndexRoutingTable.java | 6 +- .../routing/IndexShardRoutingTable.java | 59 ++++----- .../routing/RoutingTableValidation.java | 12 +- .../routing/allocation/AllocationService.java | 4 +- .../common/blobstore/BlobPath.java | 16 ++- .../common/inject/EncounterImpl.java | 14 +- .../inject/InjectionRequestProcessor.java | 3 +- .../common/inject/InjectorImpl.java | 6 +- .../common/inject/MembersInjectorImpl.java | 13 +- .../common/inject/MembersInjectorStore.java | 12 +- .../common/inject/ProvisionException.java | 4 +- .../elasticsearch/common/inject/State.java | 4 +- .../common/inject/TypeLiteral.java | 5 +- .../assistedinject/FactoryProvider2.java | 12 +- .../common/inject/internal/Errors.java | 5 +- .../inject/internal/PrivateElementsImpl.java | 6 +- .../inject/multibindings/Multibinder.java | 3 +- .../common/inject/spi/Elements.java | 3 +- .../common/inject/spi/InjectionPoint.java | 13 +- .../common/inject/spi/Message.java | 11 +- .../common/logging/log4j/LogConfigurator.java | 4 +- .../common/util/CollectionUtils.java | 1 - .../common/util/concurrent/AtomicArray.java | 4 +- .../filtering/FilteringJsonGenerator.java | 7 +- .../discovery/zen/ping/ZenPingService.java | 16 ++- .../gateway/DanglingIndicesState.java | 4 +- .../index/fielddata/ScriptDocValues.java | 10 +- .../index/get/ShardGetService.java | 6 +- .../index/mapper/FieldMapper.java | 9 +- .../index/mapper/MapperService.java | 4 +- .../index/mapper/ObjectMappers.java | 124 ------------------ .../index/query/IdsQueryParser.java | 4 +- .../search/stats/StatsGroupsParseElement.java | 4 +- .../index/shard/CommitPoint.java | 12 +- .../index/shard/CommitPoints.java | 6 +- .../BlobStoreIndexShardSnapshot.java | 10 +- .../BlobStoreIndexShardSnapshots.java | 29 ++-- .../snapshots/blobstore/SnapshotFiles.java | 1 - .../org/elasticsearch/index/store/Store.java | 9 +- .../indices/recovery/RecoveryState.java | 5 +- .../internal/InternalSettingsPreparer.java | 4 +- .../percolator/PercolateContext.java | 3 +- .../elasticsearch/plugins/PluginsService.java | 13 +- .../blobstore/BlobStoreRepository.java | 13 +- .../indices/get/RestGetIndicesAction.java | 6 +- .../warmer/get/RestGetWarmerAction.java | 5 +- .../aggregations/InternalAggregation.java | 4 +- .../aggregations/InternalAggregations.java | 6 +- .../search/builder/SearchSourceBuilder.java | 3 +- .../search/fetch/FetchPhase.java | 4 +- .../search/highlight/HighlightPhase.java | 8 +- .../search/highlight/HighlightUtils.java | 4 +- .../search/internal/DefaultSearchContext.java | 3 +- .../search/internal/SubSearchContext.java | 4 +- .../search/warmer/IndexWarmersMetaData.java | 7 +- .../elasticsearch/snapshots/RestoreInfo.java | 11 +- .../snapshots/RestoreService.java | 5 +- .../org/elasticsearch/snapshots/Snapshot.java | 9 +- .../elasticsearch/snapshots/SnapshotInfo.java | 13 +- .../snapshots/SnapshotUtils.java | 8 +- .../snapshots/SnapshotsService.java | 29 ++-- .../transport/netty/NettyTransport.java | 27 ++-- .../action/admin/indices/get/GetIndexIT.java | 9 +- .../shards/IndicesShardStoreResponseTest.java | 5 +- .../GetIndexBackwardsCompatibilityIT.java | 11 +- .../cluster/ClusterStateDiffIT.java | 6 +- .../org/elasticsearch/cluster/ack/AckIT.java | 4 +- .../cluster/routing/UnassignedInfoTests.java | 4 +- .../allocation/FailedShardsRoutingTests.java | 4 +- .../structure/RoutingIteratorTests.java | 11 +- .../common/util/CollectionUtilsTests.java | 3 +- .../DiscoveryWithServiceDisruptionsIT.java | 8 +- .../TokenCountFieldMapperIntegrationIT.java | 8 +- .../indices/warmer/IndicesWarmerBlocksIT.java | 4 +- .../indices/warmer/SimpleIndicesWarmerIT.java | 4 +- .../search/suggest/SuggestSearchIT.java | 9 +- .../DedicatedClusterSnapshotRestoreIT.java | 5 +- .../SharedClusterSnapshotRestoreIT.java | 8 +- .../snapshots/SnapshotUtilsTests.java | 5 +- .../org/elasticsearch/test/VersionUtils.java | 5 +- .../test/disruption/NetworkPartition.java | 15 ++- .../test/rest/section/ApiCallSection.java | 4 +- 114 files changed, 539 insertions(+), 628 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java index 7172c254dd8..e227a97e5c9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.health; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Maps; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; @@ -35,6 +34,7 @@ import org.elasticsearch.common.xcontent.*; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Locale; @@ -267,7 +267,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable, Streama ClusterShardHealth shardHealth = readClusterShardHealth(in); shards.put(shardHealth.getId(), shardHealth); } - validationFailures = ImmutableList.copyOf(in.readStringArray()); + validationFailures = Arrays.asList(in.readStringArray()); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java index 2d930309d02..c933156fcb0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,6 +26,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -35,13 +36,13 @@ import java.util.List; */ public class GetRepositoriesResponse extends ActionResponse implements Iterable { - private ImmutableList repositories = ImmutableList.of(); + private List repositories = Collections.emptyList(); GetRepositoriesResponse() { } - GetRepositoriesResponse(ImmutableList repositories) { + GetRepositoriesResponse(List repositories) { this.repositories = repositories; } @@ -59,7 +60,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable< public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableList.Builder repositoryListBuilder = ImmutableList.builder(); + List repositoryListBuilder = new ArrayList<>(); for (int j = 0; j < size; j++) { repositoryListBuilder.add(new RepositoryMetaData( in.readString(), @@ -67,7 +68,7 @@ public class GetRepositoriesResponse extends ActionResponse implements Iterable< Settings.readSettingsFromStream(in)) ); } - repositories = repositoryListBuilder.build(); + repositories = Collections.unmodifiableList(repositoryListBuilder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index bf7d7e4e9c1..1e2e2fd7335 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -37,6 +36,10 @@ import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + /** * Transport action for get repositories operation */ @@ -71,11 +74,11 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio if (repositories != null) { listener.onResponse(new GetRepositoriesResponse(repositories.repositories())); } else { - listener.onResponse(new GetRepositoriesResponse(ImmutableList.of())); + listener.onResponse(new GetRepositoriesResponse(Collections.emptyList())); } } else { if (repositories != null) { - ImmutableList.Builder repositoryListBuilder = ImmutableList.builder(); + List repositoryListBuilder = new ArrayList<>(); for (String repository : request.repositories()) { RepositoryMetaData repositoryMetaData = repositories.repository(repository); if (repositoryMetaData == null) { @@ -84,7 +87,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio } repositoryListBuilder.add(repositoryMetaData); } - listener.onResponse(new GetRepositoriesResponse(repositoryListBuilder.build())); + listener.onResponse(new GetRepositoriesResponse(Collections.unmodifiableList(repositoryListBuilder))); } else { listener.onFailure(new RepositoryMissingException(request.repositories()[0])); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 71b8fa34a2a..4ca88daad54 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +28,8 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.snapshots.SnapshotInfo; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -36,12 +37,12 @@ import java.util.List; */ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { - private ImmutableList snapshots = ImmutableList.of(); + private List snapshots = Collections.emptyList(); GetSnapshotsResponse() { } - GetSnapshotsResponse(ImmutableList snapshots) { + GetSnapshotsResponse(List snapshots) { this.snapshots = snapshots; } @@ -58,11 +59,11 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { builder.add(SnapshotInfo.readSnapshotInfo(in)); } - snapshots = builder.build(); + snapshots = Collections.unmodifiableList(builder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 40a00c73d4f..b21e16d2d66 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; @@ -37,6 +36,8 @@ import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -71,7 +72,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction listener) { try { - ImmutableList.Builder snapshotInfoBuilder = ImmutableList.builder(); + List snapshotInfoBuilder = new ArrayList<>(); if (isAllSnapshots(request.snapshots())) { List snapshots = snapshotsService.snapshots(request.repository()); for (Snapshot snapshot : snapshots) { @@ -88,7 +89,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction shards; + private List shards; private ImmutableMap indicesStatus; @@ -57,7 +58,7 @@ public class SnapshotStatus implements ToXContent, Streamable { private SnapshotStats stats; - SnapshotStatus(SnapshotId snapshotId, State state, ImmutableList shards) { + SnapshotStatus(SnapshotId snapshotId, State state, List shards) { this.snapshotId = snapshotId; this.state = state; this.shards = shards; @@ -127,11 +128,11 @@ public class SnapshotStatus implements ToXContent, Streamable { snapshotId = SnapshotId.readSnapshotId(in); state = State.fromValue(in.readByte()); int size = in.readVInt(); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { builder.add(SnapshotIndexShardStatus.readShardSnapshotStatus(in)); } - shards = builder.build(); + shards = Collections.unmodifiableList(builder); updateShardStats(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index 6191a45d6b3..e5692374fcb 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -28,18 +27,21 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; /** * Snapshot status response */ public class SnapshotsStatusResponse extends ActionResponse implements ToXContent { - private ImmutableList snapshots = ImmutableList.of(); + private List snapshots = Collections.emptyList(); SnapshotsStatusResponse() { } - SnapshotsStatusResponse(ImmutableList snapshots) { + SnapshotsStatusResponse(List snapshots) { this.snapshots = snapshots; } @@ -48,7 +50,7 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten * * @return the list of snapshots */ - public ImmutableList getSnapshots() { + public List getSnapshots() { return snapshots; } @@ -56,11 +58,11 @@ public class SnapshotsStatusResponse extends ActionResponse implements ToXConten public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (int i = 0; i < size; i++) { builder.add(SnapshotStatus.readSnapshotStatus(in)); } - snapshots = builder.build(); + snapshots = Collections.unmodifiableList(builder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 65ceaa2c533..12a8135cf44 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; @@ -42,6 +41,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -138,7 +139,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction currentSnapshots, TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException { // First process snapshot that are currently processed - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); Set currentSnapshotIds = newHashSet(); if (!currentSnapshots.isEmpty()) { Map nodeSnapshotStatusMap; @@ -150,7 +151,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatusBuilder = ImmutableList.builder(); + List shardStatusBuilder = new ArrayList<>(); for (ImmutableMap.Entry shardEntry : entry.shards().entrySet()) { SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.getValue(); if (status.nodeId() != null) { @@ -189,7 +190,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatusBuilder = ImmutableList.builder(); + List shardStatusBuilder = new ArrayList<>(); if (snapshot.state().completed()) { ImmutableMap shardStatues = snapshotsService.snapshotShards(snapshotId); for (ImmutableMap.Entry shardStatus : shardStatues.entrySet()) { @@ -222,13 +223,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction> aliasMetaData = metaData.findAliases(aliases, indexAsArray); + ImmutableOpenMap> aliasMetaData = metaData.findAliases(aliases, indexAsArray); List finalAliases = new ArrayList<>(); - for (ObjectCursor> curAliases : aliasMetaData.values()) { + for (ObjectCursor> curAliases : aliasMetaData.values()) { for (AliasMetaData aliasMeta: curAliases.value) { finalAliases.add(aliasMeta.alias()); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 106e864a367..e23faa1cbbf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.alias.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -29,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -61,7 +61,7 @@ public class GetAliasesResponse extends ActionResponse { for (int j = 0; j < valueSize; j++) { value.add(AliasMetaData.Builder.readFrom(in)); } - aliasesBuilder.put(key, ImmutableList.copyOf(value)); + aliasesBuilder.put(key, Collections.unmodifiableList(value)); } aliases = aliasesBuilder.build(); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 496b8a3e8d1..7c7dfb039bf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -64,7 +64,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction listener) { String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - @SuppressWarnings("unchecked") // ImmutableList to List results incompatible type + @SuppressWarnings("unchecked") ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); listener.onResponse(new GetAliasesResponse(result)); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 3bc0ad0e1ff..0930f8f1d4e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -32,21 +31,24 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; /** * A response for a delete index action. */ public class GetIndexResponse extends ActionResponse { - private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); + private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); private ImmutableOpenMap> mappings = ImmutableOpenMap.of(); - private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); + private ImmutableOpenMap> aliases = ImmutableOpenMap.of(); private ImmutableOpenMap settings = ImmutableOpenMap.of(); private String[] indices; - GetIndexResponse(String[] indices, ImmutableOpenMap> warmers, + GetIndexResponse(String[] indices, ImmutableOpenMap> warmers, ImmutableOpenMap> mappings, - ImmutableOpenMap> aliases, ImmutableOpenMap settings) { + ImmutableOpenMap> aliases, ImmutableOpenMap settings) { this.indices = indices; if (warmers != null) { this.warmers = warmers; @@ -73,11 +75,11 @@ public class GetIndexResponse extends ActionResponse { return indices(); } - public ImmutableOpenMap> warmers() { + public ImmutableOpenMap> warmers() { return warmers; } - public ImmutableOpenMap> getWarmers() { + public ImmutableOpenMap> getWarmers() { return warmers(); } @@ -89,11 +91,11 @@ public class GetIndexResponse extends ActionResponse { return mappings(); } - public ImmutableOpenMap> aliases() { + public ImmutableOpenMap> aliases() { return aliases; } - public ImmutableOpenMap> getAliases() { + public ImmutableOpenMap> getAliases() { return aliases(); } @@ -110,11 +112,11 @@ public class GetIndexResponse extends ActionResponse { super.readFrom(in); this.indices = in.readStringArray(); int warmersSize = in.readVInt(); - ImmutableOpenMap.Builder> warmersMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> warmersMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < warmersSize; i++) { String key = in.readString(); int valueSize = in.readVInt(); - ImmutableList.Builder warmerEntryBuilder = ImmutableList.builder(); + List warmerEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { warmerEntryBuilder.add(new IndexWarmersMetaData.Entry( in.readString(), @@ -123,7 +125,7 @@ public class GetIndexResponse extends ActionResponse { in.readBytesReference()) ); } - warmersMapBuilder.put(key, warmerEntryBuilder.build()); + warmersMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder)); } warmers = warmersMapBuilder.build(); int mappingsSize = in.readVInt(); @@ -139,15 +141,15 @@ public class GetIndexResponse extends ActionResponse { } mappings = mappingsMapBuilder.build(); int aliasesSize = in.readVInt(); - ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> aliasesMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < aliasesSize; i++) { String key = in.readString(); int valueSize = in.readVInt(); - ImmutableList.Builder aliasEntryBuilder = ImmutableList.builder(); + List aliasEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { aliasEntryBuilder.add(AliasMetaData.Builder.readFrom(in)); } - aliasesMapBuilder.put(key, aliasEntryBuilder.build()); + aliasesMapBuilder.put(key, Collections.unmodifiableList(aliasEntryBuilder)); } aliases = aliasesMapBuilder.build(); int settingsSize = in.readVInt(); @@ -164,7 +166,7 @@ public class GetIndexResponse extends ActionResponse { super.writeTo(out); out.writeStringArray(indices); out.writeVInt(warmers.size()); - for (ObjectObjectCursor> indexEntry : warmers) { + for (ObjectObjectCursor> indexEntry : warmers) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) { @@ -184,7 +186,7 @@ public class GetIndexResponse extends ActionResponse { } } out.writeVInt(aliases.size()); - for (ObjectObjectCursor> indexEntry : aliases) { + for (ObjectObjectCursor> indexEntry : aliases) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (AliasMetaData aliasEntry : indexEntry.value) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 89360ce42b0..e398541fa99 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; @@ -41,6 +40,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; + /** * Get index action. */ @@ -71,9 +72,9 @@ public class TransportGetIndexAction extends TransportClusterInfoAction listener) { - ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); + ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); ImmutableOpenMap> mappingsResult = ImmutableOpenMap.of(); - ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); + ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); ImmutableOpenMap settings = ImmutableOpenMap.of(); Feature[] features = request.features(); boolean doneAliases = false; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java index 6e754a26210..c901f533a3b 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.segments; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,6 +27,7 @@ import org.elasticsearch.index.engine.Segment; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -93,7 +93,7 @@ public class ShardSegments extends BroadcastShardResponse implements Iterable(size); for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 50d305efe90..84b39d4c689 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.shards; import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ShardOperationFailedException; @@ -38,6 +37,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus.*; @@ -258,15 +258,15 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } private ImmutableOpenMap>> storeStatuses; - private ImmutableList failures; + private List failures; - public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, ImmutableList failures) { + public IndicesShardStoresResponse(ImmutableOpenMap>> storeStatuses, List failures) { this.storeStatuses = storeStatuses; this.failures = failures; } IndicesShardStoresResponse() { - this(ImmutableOpenMap.>>of(), ImmutableList.of()); + this(ImmutableOpenMap.>>of(), Collections.emptyList()); } /** @@ -281,7 +281,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon * Returns node {@link Failure}s encountered * while executing the request */ - public ImmutableList getFailures() { + public List getFailures() { return failures; } @@ -306,12 +306,12 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon storeStatusesBuilder.put(index, shardEntries.build()); } int numFailure = in.readVInt(); - ImmutableList.Builder failureBuilder = ImmutableList.builder(); + List failureBuilder = new ArrayList<>(); for (int i = 0; i < numFailure; i++) { failureBuilder.add(Failure.readFailure(in)); } storeStatuses = storeStatusesBuilder.build(); - failures = failureBuilder.build(); + failures = Collections.unmodifiableList(failureBuilder); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index b783ce112ac..01613d69086 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.admin.indices.shards; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; @@ -34,7 +33,11 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; @@ -48,7 +51,11 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Queue; +import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; /** @@ -157,7 +164,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc void finish() { ImmutableOpenMap.Builder>> indicesStoreStatusesBuilder = ImmutableOpenMap.builder(); - ImmutableList.Builder failureBuilder = ImmutableList.builder(); + java.util.List failureBuilder = new ArrayList<>(); for (Response fetchResponse : fetchResponses) { ImmutableOpenIntMap> indexStoreStatuses = indicesStoreStatusesBuilder.get(fetchResponse.shardId.getIndex()); final ImmutableOpenIntMap.Builder> indexShardsBuilder; @@ -183,7 +190,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc failureBuilder.add(new IndicesShardStoresResponse.Failure(failure.nodeId(), fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), failure.getCause())); } } - listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), failureBuilder.build())); + listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); } private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 3d1ef78d2bf..2d3c0a0a90e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.validate.query; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import static org.elasticsearch.action.admin.indices.validate.query.QueryExplanation.readQueryExplanation; @@ -51,7 +51,7 @@ public class ValidateQueryResponse extends BroadcastResponse { this.valid = valid; this.queryExplanations = queryExplanations; if (queryExplanations == null) { - this.queryExplanations = ImmutableList.of(); + this.queryExplanations = Collections.emptyList(); } } @@ -67,7 +67,7 @@ public class ValidateQueryResponse extends BroadcastResponse { */ public List getQueryExplanation() { if (queryExplanations == null) { - return ImmutableList.of(); + return Collections.emptyList(); } return queryExplanations; } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java index cb45d36d39b..3ed444c88dd 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/GetWarmersResponse.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.warmer.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.bytes.BytesReference; @@ -30,6 +29,9 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; /** * Holds a warmer-name to a list of {@link IndexWarmersMetaData} mapping for each warmer specified @@ -38,20 +40,20 @@ import java.io.IOException; */ public class GetWarmersResponse extends ActionResponse { - private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); + private ImmutableOpenMap> warmers = ImmutableOpenMap.of(); - GetWarmersResponse(ImmutableOpenMap> warmers) { + GetWarmersResponse(ImmutableOpenMap> warmers) { this.warmers = warmers; } GetWarmersResponse() { } - public ImmutableOpenMap> warmers() { + public ImmutableOpenMap> warmers() { return warmers; } - public ImmutableOpenMap> getWarmers() { + public ImmutableOpenMap> getWarmers() { return warmers(); } @@ -59,11 +61,11 @@ public class GetWarmersResponse extends ActionResponse { public void readFrom(StreamInput in) throws IOException { super.readFrom(in); int size = in.readVInt(); - ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> indexMapBuilder = ImmutableOpenMap.builder(); for (int i = 0; i < size; i++) { String key = in.readString(); int valueSize = in.readVInt(); - ImmutableList.Builder warmerEntryBuilder = ImmutableList.builder(); + List warmerEntryBuilder = new ArrayList<>(); for (int j = 0; j < valueSize; j++) { String name = in.readString(); String[] types = in.readStringArray(); @@ -77,7 +79,7 @@ public class GetWarmersResponse extends ActionResponse { source) ); } - indexMapBuilder.put(key, warmerEntryBuilder.build()); + indexMapBuilder.put(key, Collections.unmodifiableList(warmerEntryBuilder)); } warmers = indexMapBuilder.build(); } @@ -86,7 +88,7 @@ public class GetWarmersResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(warmers.size()); - for (ObjectObjectCursor> indexEntry : warmers) { + for (ObjectObjectCursor> indexEntry : warmers) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (IndexWarmersMetaData.Entry warmerEntry : indexEntry.value) { diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java index 0504e329a1e..50d972b3e61 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.warmer.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; @@ -35,6 +34,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.List; + /** * Internal Actions executed on the master fetching the warmer from the cluster state metadata. * @@ -66,7 +67,7 @@ public class TransportGetWarmersAction extends TransportClusterInfoAction listener) { - ImmutableOpenMap> result = state.metaData().findWarmers( + ImmutableOpenMap> result = state.metaData().findWarmers( concreteIndices, request.types(), request.warmers() ); listener.onResponse(new GetWarmersResponse(result)); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java b/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java index c626cda581e..5416e2f66d7 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/PercolateShardResponse.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.percolate; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; import org.elasticsearch.common.bytes.BytesReference; @@ -35,6 +34,7 @@ import org.elasticsearch.search.query.QuerySearchResult; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -45,7 +45,7 @@ public class PercolateShardResponse extends BroadcastShardResponse { private static final BytesRef[] EMPTY_MATCHES = new BytesRef[0]; private static final float[] EMPTY_SCORES = new float[0]; - private static final List> EMPTY_HL = ImmutableList.of(); + private static final List> EMPTY_HL = Collections.emptyList(); private long count; private float[] scores; diff --git a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index 1c77bed9747..c2840441ac1 100644 --- a/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/core/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.update; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -58,6 +57,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; import java.util.Map; /** @@ -153,10 +153,10 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio ShardRouting shard; while ((shard = shardIterator.nextOrNull()) != null) { if (shard.primary()) { - return new PlainShardIterator(shardIterator.shardId(), ImmutableList.of(shard)); + return new PlainShardIterator(shardIterator.shardId(), Collections.singletonList(shard)); } } - return new PlainShardIterator(shardIterator.shardId(), ImmutableList.of()); + return new PlainShardIterator(shardIterator.shardId(), Collections.emptyList()); } @Override diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index 57af6b145df..a500e1784dd 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -19,7 +19,6 @@ package org.elasticsearch.bootstrap; -import com.google.common.collect.ImmutableList; import com.sun.jna.*; import com.sun.jna.win32.StdCallLibrary; @@ -29,6 +28,7 @@ import org.elasticsearch.common.logging.Loggers; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; @@ -85,8 +85,8 @@ class JNAKernel32Library { return result; } - ImmutableList getCallbacks() { - return ImmutableList.builder().addAll(callbacks).build(); + List getCallbacks() { + return Collections.unmodifiableList(new ArrayList(callbacks)); } /** diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index 4ffc2abae2b..118229d8a96 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -20,7 +20,6 @@ package org.elasticsearch.client.transport; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.ExceptionsHelper; @@ -73,12 +72,12 @@ public class TransportClientNodesService extends AbstractComponent { private final Headers headers; // nodes that are added to be discovered - private volatile ImmutableList listedNodes = ImmutableList.of(); + private volatile List listedNodes = Collections.emptyList(); private final Object mutex = new Object(); - private volatile List nodes = ImmutableList.of(); - private volatile List filteredNodes = ImmutableList.of(); + private volatile List nodes = Collections.emptyList(); + private volatile List filteredNodes = Collections.emptyList(); private final AtomicInteger tempNodeIdGenerator = new AtomicInteger(); @@ -119,11 +118,11 @@ public class TransportClientNodesService extends AbstractComponent { } public List transportAddresses() { - ImmutableList.Builder lstBuilder = ImmutableList.builder(); + List lstBuilder = new ArrayList<>(); for (DiscoveryNode listedNode : listedNodes) { lstBuilder.add(listedNode.address()); } - return lstBuilder.build(); + return Collections.unmodifiableList(lstBuilder); } public List connectedNodes() { @@ -160,14 +159,14 @@ public class TransportClientNodesService extends AbstractComponent { if (filtered.isEmpty()) { return this; } - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); builder.addAll(listedNodes()); for (TransportAddress transportAddress : filtered) { DiscoveryNode node = new DiscoveryNode("#transport#-" + tempNodeIdGenerator.incrementAndGet(), transportAddress, minCompatibilityVersion); logger.debug("adding address [{}]", node); builder.add(node); } - listedNodes = builder.build(); + listedNodes = Collections.unmodifiableList(builder); nodesSampler.sample(); } return this; @@ -178,7 +177,7 @@ public class TransportClientNodesService extends AbstractComponent { if (closed) { throw new IllegalStateException("transport client is closed, can't remove an address"); } - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (DiscoveryNode otherNode : listedNodes) { if (!otherNode.address().equals(transportAddress)) { builder.add(otherNode); @@ -186,7 +185,7 @@ public class TransportClientNodesService extends AbstractComponent { logger.debug("removing address [{}]", otherNode); } } - listedNodes = builder.build(); + listedNodes = Collections.unmodifiableList(builder); nodesSampler.sample(); } return this; @@ -261,7 +260,7 @@ public class TransportClientNodesService extends AbstractComponent { for (DiscoveryNode listedNode : listedNodes) { transportService.disconnectFromNode(listedNode); } - nodes = ImmutableList.of(); + nodes = Collections.emptyList(); } } @@ -311,7 +310,7 @@ public class TransportClientNodesService extends AbstractComponent { } } - return new ImmutableList.Builder().addAll(nodes).build(); + return Collections.unmodifiableList(new ArrayList<>(nodes)); } } @@ -376,7 +375,7 @@ public class TransportClientNodesService extends AbstractComponent { } nodes = validateNewNodes(newNodes); - filteredNodes = ImmutableList.copyOf(newFilteredNodes); + filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); } } @@ -476,7 +475,7 @@ public class TransportClientNodesService extends AbstractComponent { } nodes = validateNewNodes(newNodes); - filteredNodes = ImmutableList.copyOf(newFilteredNodes); + filteredNodes = Collections.unmodifiableList(new ArrayList<>(newFilteredNodes)); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index 1ceb822abfd..b63b59087b6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -20,13 +20,13 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import java.util.Arrays; +import java.util.Collections; import java.util.List; /** @@ -86,7 +86,7 @@ public class ClusterChangedEvent { return Arrays.asList(state.metaData().indices().keys().toArray(String.class)); } if (!metaDataChanged()) { - return ImmutableList.of(); + return Collections.emptyList(); } List created = null; for (ObjectCursor cursor : state.metaData().indices().keys()) { @@ -98,7 +98,7 @@ public class ClusterChangedEvent { created.add(index); } } - return created == null ? ImmutableList.of() : created; + return created == null ? Collections.emptyList() : created; } /** @@ -116,10 +116,10 @@ public class ClusterChangedEvent { // See discussion on https://github.com/elastic/elasticsearch/pull/9952 and // https://github.com/elastic/elasticsearch/issues/11665 if (hasNewMaster() || previousState == null) { - return ImmutableList.of(); + return Collections.emptyList(); } if (!metaDataChanged()) { - return ImmutableList.of(); + return Collections.emptyList(); } List deleted = null; for (ObjectCursor cursor : previousState.metaData().indices().keys()) { @@ -131,7 +131,7 @@ public class ClusterChangedEvent { deleted.add(index); } } - return deleted == null ? ImmutableList.of() : deleted; + return deleted == null ? Collections.emptyList() : deleted; } public boolean metaDataChanged() { diff --git a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 857f1a34d3f..eabe615d587 100644 --- a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.metadata.SnapshotId; @@ -30,6 +29,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -42,14 +44,14 @@ public class RestoreInProgress extends AbstractDiffable implements Custo public static final RestoreInProgress PROTO = new RestoreInProgress(); - private final ImmutableList entries; + private final List entries; /** * Constructs new restore metadata * * @param entries list of currently running restore processes */ - public RestoreInProgress(ImmutableList entries) { + public RestoreInProgress(List entries) { this.entries = entries; } @@ -59,7 +61,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * @param entries list of currently running restore processes */ public RestoreInProgress(Entry... entries) { - this.entries = ImmutableList.copyOf(entries); + this.entries = Arrays.asList(entries); } /** @@ -111,7 +113,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo private final State state; private final SnapshotId snapshotId; private final ImmutableMap shards; - private final ImmutableList indices; + private final List indices; /** * Creates new restore metadata @@ -121,7 +123,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * @param indices list of indices being restored * @param shards list of shards being restored and thier current restore status */ - public Entry(SnapshotId snapshotId, State state, ImmutableList indices, ImmutableMap shards) { + public Entry(SnapshotId snapshotId, State state, List indices, ImmutableMap shards) { this.snapshotId = snapshotId; this.state = state; this.indices = indices; @@ -164,7 +166,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo * * @return list of indices */ - public ImmutableList indices() { + public List indices() { return indices; } @@ -413,7 +415,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo SnapshotId snapshotId = SnapshotId.readSnapshotId(in); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); + List indexBuilder = new ArrayList<>(); for (int j = 0; j < indices; j++) { indexBuilder.add(in.readString()); } @@ -424,7 +426,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); builder.put(shardId, shardState); } - entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + entries[i] = new Entry(snapshotId, state, Collections.unmodifiableList(indexBuilder), builder.build()); } return new RestoreInProgress(entries); } diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index a315e682c3b..a6babbbd9fc 100644 --- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.metadata.SnapshotId; @@ -31,7 +30,10 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -67,11 +69,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus private final SnapshotId snapshotId; private final boolean includeGlobalState; private final ImmutableMap shards; - private final ImmutableList indices; - private final ImmutableMap> waitingIndices; + private final List indices; + private final ImmutableMap> waitingIndices; private final long startTime; - public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, ImmutableList indices, long startTime, ImmutableMap shards) { + public Entry(SnapshotId snapshotId, boolean includeGlobalState, State state, List indices, long startTime, ImmutableMap shards) { this.state = state; this.snapshotId = snapshotId; this.includeGlobalState = includeGlobalState; @@ -106,11 +108,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return state; } - public ImmutableList indices() { + public List indices() { return indices; } - public ImmutableMap> waitingIndices() { + public ImmutableMap> waitingIndices() { return waitingIndices; } @@ -152,22 +154,22 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus return result; } - private ImmutableMap> findWaitingIndices(ImmutableMap shards) { - Map> waitingIndicesMap = newHashMap(); + private ImmutableMap> findWaitingIndices(ImmutableMap shards) { + Map> waitingIndicesMap = newHashMap(); for (ImmutableMap.Entry entry : shards.entrySet()) { if (entry.getValue().state() == State.WAITING) { - ImmutableList.Builder waitingShards = waitingIndicesMap.get(entry.getKey().getIndex()); + List waitingShards = waitingIndicesMap.get(entry.getKey().getIndex()); if (waitingShards == null) { - waitingShards = ImmutableList.builder(); + waitingShards = new ArrayList<>(); waitingIndicesMap.put(entry.getKey().getIndex(), waitingShards); } waitingShards.add(entry.getKey()); } } if (!waitingIndicesMap.isEmpty()) { - ImmutableMap.Builder> waitingIndicesBuilder = ImmutableMap.builder(); - for (Map.Entry> entry : waitingIndicesMap.entrySet()) { - waitingIndicesBuilder.put(entry.getKey(), entry.getValue().build()); + ImmutableMap.Builder> waitingIndicesBuilder = ImmutableMap.builder(); + for (Map.Entry> entry : waitingIndicesMap.entrySet()) { + waitingIndicesBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); } return waitingIndicesBuilder.build(); } else { @@ -324,15 +326,15 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus } } - private final ImmutableList entries; + private final List entries; - public SnapshotsInProgress(ImmutableList entries) { + public SnapshotsInProgress(List entries) { this.entries = entries; } public SnapshotsInProgress(Entry... entries) { - this.entries = ImmutableList.copyOf(entries); + this.entries = Arrays.asList(entries); } public List entries() { @@ -361,7 +363,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus boolean includeGlobalState = in.readBoolean(); State state = State.fromValue(in.readByte()); int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); + List indexBuilder = new ArrayList<>(); for (int j = 0; j < indices; j++) { indexBuilder.add(in.readString()); } @@ -374,7 +376,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus State shardState = State.fromValue(in.readByte()); builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } - entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + entries[i] = new Entry(snapshotId, includeGlobalState, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build()); } return new SnapshotsInProgress(entries); } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 2f28dcaf77d..c930f2949d1 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -50,13 +49,13 @@ import static com.google.common.collect.Maps.newHashMap; public class IndexNameExpressionResolver extends AbstractComponent { - private final ImmutableList expressionResolvers; + private final List expressionResolvers; private final DateMathExpressionResolver dateMathExpressionResolver; @Inject public IndexNameExpressionResolver(Settings settings) { super(settings); - expressionResolvers = ImmutableList.of( + expressionResolvers = Arrays.asList( dateMathExpressionResolver = new DateMathExpressionResolver(settings), new WildcardExpressionResolver() ); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 32098e58c83..a06c71f8ecb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -234,7 +234,7 @@ public class MetaData implements Iterable, Diffable, Fr * @param concreteIndices The concrete indexes the index aliases must point to order to be returned. * @return the found index aliases grouped by index */ - public ImmutableOpenMap> findAliases(final String[] aliases, String[] concreteIndices) { + public ImmutableOpenMap> findAliases(final String[] aliases, String[] concreteIndices) { assert aliases != null; assert concreteIndices != null; if (concreteIndices.length == 0) { @@ -242,7 +242,7 @@ public class MetaData implements Iterable, Diffable, Fr } boolean matchAllAliases = matchAllAliases(aliases); - ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); @@ -262,7 +262,7 @@ public class MetaData implements Iterable, Diffable, Fr return o1.alias().compareTo(o2.alias()); } }); - mapBuilder.put(index, ImmutableList.copyOf(filteredValues)); + mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); } } return mapBuilder.build(); @@ -345,7 +345,7 @@ public class MetaData implements Iterable, Diffable, Fr return indexMapBuilder.build(); } - public ImmutableOpenMap> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) { + public ImmutableOpenMap> findWarmers(String[] concreteIndices, final String[] types, final String[] uncheckedWarmers) { assert uncheckedWarmers != null; assert concreteIndices != null; if (concreteIndices.length == 0) { @@ -354,7 +354,7 @@ public class MetaData implements Iterable, Diffable, Fr // special _all check to behave the same like not specifying anything for the warmers (not for the indices) final String[] warmers = Strings.isAllOrWildcard(uncheckedWarmers) ? Strings.EMPTY_ARRAY : uncheckedWarmers; - ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); + ImmutableOpenMap.Builder> mapBuilder = ImmutableOpenMap.builder(); Iterable intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys()); for (String index : intersection) { IndexMetaData indexMetaData = indices.get(index); @@ -363,6 +363,7 @@ public class MetaData implements Iterable, Diffable, Fr continue; } + // TODO: make this a List so we don't have to copy below Collection filteredWarmers = Collections2.filter(indexWarmersMetaData.entries(), new Predicate() { @Override @@ -380,7 +381,7 @@ public class MetaData implements Iterable, Diffable, Fr }); if (!filteredWarmers.isEmpty()) { - mapBuilder.put(index, ImmutableList.copyOf(filteredWarmers)); + mapBuilder.put(index, Collections.unmodifiableList(new ArrayList<>(filteredWarmers))); } } return mapBuilder.build(); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 48e40d1a54f..23a4c32017d 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.MetaData.Custom; @@ -33,9 +32,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumSet; import java.util.List; -import java.util.Map; /** * Contains metadata about registered snapshot repositories @@ -46,7 +45,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); - private final ImmutableList repositories; + private final List repositories; /** * Constructs new repository metadata @@ -54,7 +53,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me * @param repositories list of repositories */ public RepositoriesMetaData(RepositoryMetaData... repositories) { - this.repositories = ImmutableList.copyOf(repositories); + this.repositories = Arrays.asList(repositories); } /** @@ -62,7 +61,7 @@ public class RepositoriesMetaData extends AbstractDiffable implements Me * * @return list of repositories */ - public ImmutableList repositories() { + public List repositories() { return this.repositories; } diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index cc5ff81e8d8..84e6b57660c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.node; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; @@ -35,6 +34,8 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.net.InetAddress; +import java.util.Collections; +import java.util.List; import java.util.Map; import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream; @@ -92,7 +93,7 @@ public class DiscoveryNode implements Streamable, ToXContent { return Booleans.isExplicitTrue(data); } - public static final ImmutableList EMPTY_LIST = ImmutableList.of(); + public static final List EMPTY_LIST = Collections.emptyList(); private String nodeName = ""; private String nodeId; diff --git a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 1b95aed4636..5151d662b24 100644 --- a/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.node; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractDiffable; @@ -35,6 +34,7 @@ import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.transport.TransportAddress; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -434,7 +434,7 @@ public class DiscoveryNodes extends AbstractDiffable implements newMasterNode = masterNode(); } } - return new Delta(previousMasterNode, newMasterNode, localNodeId, ImmutableList.copyOf(removed), ImmutableList.copyOf(added)); + return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed), Collections.unmodifiableList(added)); } @Override @@ -473,14 +473,14 @@ public class DiscoveryNodes extends AbstractDiffable implements private final String localNodeId; private final DiscoveryNode previousMasterNode; private final DiscoveryNode newMasterNode; - private final ImmutableList removed; - private final ImmutableList added; + private final List removed; + private final List added; - public Delta(String localNodeId, ImmutableList removed, ImmutableList added) { + public Delta(String localNodeId, List removed, List added) { this(null, null, localNodeId, removed, added); } - public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, ImmutableList removed, ImmutableList added) { + public Delta(@Nullable DiscoveryNode previousMasterNode, @Nullable DiscoveryNode newMasterNode, String localNodeId, List removed, List added) { this.previousMasterNode = previousMasterNode; this.newMasterNode = newMasterNode; this.localNodeId = localNodeId; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 85a14e23008..5dfff2dfce3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.IntCursor; import com.carrotsearch.hppc.cursors.IntObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; @@ -37,6 +36,7 @@ import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Set; @@ -76,7 +76,7 @@ public class IndexRoutingTable extends AbstractDiffable imple this.index = index; this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); this.shards = shards; - ImmutableList.Builder allActiveShards = ImmutableList.builder(); + List allActiveShards = new ArrayList<>(); for (IntObjectCursor cursor : shards) { for (ShardRouting shardRouting : cursor.value) { shardRouting.freeze(); @@ -85,7 +85,7 @@ public class IndexRoutingTable extends AbstractDiffable imple } } } - this.allActiveShards = allActiveShards.build(); + this.allActiveShards = Collections.unmodifiableList(allActiveShards); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index bc13e081563..e1aa7a085d3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Sets; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -48,30 +47,30 @@ public class IndexShardRoutingTable implements Iterable { final ShardId shardId; final ShardRouting primary; - final ImmutableList primaryAsList; - final ImmutableList replicas; - final ImmutableList shards; - final ImmutableList activeShards; - final ImmutableList assignedShards; - final static ImmutableList NO_SHARDS = ImmutableList.of(); + final List primaryAsList; + final List replicas; + final List shards; + final List activeShards; + final List assignedShards; + final static List NO_SHARDS = Collections.emptyList(); final boolean allShardsStarted; /** * The initializing list, including ones that are initializing on a target node because of relocation. * If we can come up with a better variable name, it would be nice... */ - final ImmutableList allInitializingShards; + final List allInitializingShards; IndexShardRoutingTable(ShardId shardId, List shards) { this.shardId = shardId; this.shuffler = new RotationShardShuffler(ThreadLocalRandom.current().nextInt()); - this.shards = ImmutableList.copyOf(shards); + this.shards = Collections.unmodifiableList(shards); ShardRouting primary = null; - ImmutableList.Builder replicas = ImmutableList.builder(); - ImmutableList.Builder activeShards = ImmutableList.builder(); - ImmutableList.Builder assignedShards = ImmutableList.builder(); - ImmutableList.Builder allInitializingShards = ImmutableList.builder(); + List replicas = new ArrayList<>(); + List activeShards = new ArrayList<>(); + List assignedShards = new ArrayList<>(); + List allInitializingShards = new ArrayList<>(); boolean allShardsStarted = true; for (ShardRouting shard : shards) { if (shard.primary()) { @@ -100,14 +99,14 @@ public class IndexShardRoutingTable implements Iterable { this.primary = primary; if (primary != null) { - this.primaryAsList = ImmutableList.of(primary); + this.primaryAsList = Collections.singletonList(primary); } else { - this.primaryAsList = ImmutableList.of(); + this.primaryAsList = Collections.emptyList(); } - this.replicas = replicas.build(); - this.activeShards = activeShards.build(); - this.assignedShards = assignedShards.build(); - this.allInitializingShards = allInitializingShards.build(); + this.replicas = Collections.unmodifiableList(replicas); + this.activeShards = Collections.unmodifiableList(activeShards); + this.assignedShards = Collections.unmodifiableList(assignedShards); + this.allInitializingShards = Collections.unmodifiableList(allInitializingShards); } /** @@ -141,7 +140,7 @@ public class IndexShardRoutingTable implements Iterable { shardRoutings.add(new ShardRouting(shards.get(i), highestVersion)); } } - return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shardRoutings)); + return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shardRoutings)); } /** @@ -464,11 +463,11 @@ public class IndexShardRoutingTable implements Iterable { static class AttributesRoutings { - public final ImmutableList withSameAttribute; - public final ImmutableList withoutSameAttribute; + public final List withSameAttribute; + public final List withoutSameAttribute; public final int totalSize; - AttributesRoutings(ImmutableList withSameAttribute, ImmutableList withoutSameAttribute) { + AttributesRoutings(List withSameAttribute, List withoutSameAttribute) { this.withSameAttribute = withSameAttribute; this.withoutSameAttribute = withoutSameAttribute; this.totalSize = withoutSameAttribute.size() + withSameAttribute.size(); @@ -484,9 +483,9 @@ public class IndexShardRoutingTable implements Iterable { if (shardRoutings == null) { synchronized (shardsByAttributeMutex) { ArrayList from = new ArrayList<>(activeShards); - ImmutableList to = collectAttributeShards(key, nodes, from); + List to = collectAttributeShards(key, nodes, from); - shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from)); + shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from)); activeShardsByAttributes = MapBuilder.newMapBuilder(activeShardsByAttributes).put(key, shardRoutings).immutableMap(); } } @@ -498,15 +497,15 @@ public class IndexShardRoutingTable implements Iterable { if (shardRoutings == null) { synchronized (shardsByAttributeMutex) { ArrayList from = new ArrayList<>(allInitializingShards); - ImmutableList to = collectAttributeShards(key, nodes, from); - shardRoutings = new AttributesRoutings(to, ImmutableList.copyOf(from)); + List to = collectAttributeShards(key, nodes, from); + shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from)); initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap(); } } return shardRoutings; } - private static ImmutableList collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList from) { + private static List collectAttributeShards(AttributesKey key, DiscoveryNodes nodes, ArrayList from) { final ArrayList to = new ArrayList<>(); for (final String attribute : key.attributes) { final String localAttributeValue = nodes.localNode().attributes().get(attribute); @@ -523,7 +522,7 @@ public class IndexShardRoutingTable implements Iterable { } } } - return ImmutableList.copyOf(to); + return Collections.unmodifiableList(to); } public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes) { @@ -612,7 +611,7 @@ public class IndexShardRoutingTable implements Iterable { } public IndexShardRoutingTable build() { - return new IndexShardRoutingTable(shardId, ImmutableList.copyOf(shards)); + return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shards)); } public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java index bd1e283f436..32f565459de 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingTableValidation.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import org.elasticsearch.common.io.stream.StreamInput; @@ -27,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -55,7 +55,7 @@ public class RoutingTableValidation implements Streamable { public List allFailures() { if (failures().isEmpty() && indicesFailures().isEmpty()) { - return ImmutableList.of(); + return Collections.emptyList(); } List allFailures = newArrayList(failures()); for (Map.Entry> entry : indicesFailures().entrySet()) { @@ -68,7 +68,7 @@ public class RoutingTableValidation implements Streamable { public List failures() { if (failures == null) { - return ImmutableList.of(); + return Collections.emptyList(); } return failures; } @@ -82,11 +82,11 @@ public class RoutingTableValidation implements Streamable { public List indexFailures(String index) { if (indicesFailures == null) { - return ImmutableList.of(); + return Collections.emptyList(); } List indexFailures = indicesFailures.get(index); if (indexFailures == null) { - return ImmutableList.of(); + return Collections.emptyList(); } return indexFailures; } @@ -122,7 +122,7 @@ public class RoutingTableValidation implements Streamable { valid = in.readBoolean(); int size = in.readVInt(); if (size == 0) { - failures = ImmutableList.of(); + failures = Collections.emptyList(); } else { failures = Lists.newArrayListWithCapacity(size); for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index e5a46855e0c..e93b8460fbd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; @@ -36,6 +35,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -86,7 +86,7 @@ public class AllocationService extends AbstractComponent { } public RoutingAllocation.Result applyFailedShard(ClusterState clusterState, ShardRouting failedShard) { - return applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); + return applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(failedShard, null, null))); } /** diff --git a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java index 5c1ff00601c..7636097e288 100644 --- a/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java +++ b/core/src/main/java/org/elasticsearch/common/blobstore/BlobPath.java @@ -19,26 +19,28 @@ package org.elasticsearch.common.blobstore; -import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; +import java.util.List; /** * */ public class BlobPath implements Iterable { - private final ImmutableList paths; + private final List paths; public BlobPath() { - this.paths = ImmutableList.of(); + this.paths = Collections.emptyList(); } public static BlobPath cleanPath() { return new BlobPath(); } - private BlobPath(ImmutableList paths) { + private BlobPath(List paths) { this.paths = paths; } @@ -52,8 +54,10 @@ public class BlobPath implements Iterable { } public BlobPath add(String path) { - ImmutableList.Builder builder = ImmutableList.builder(); - return new BlobPath(builder.addAll(paths).add(path).build()); + List paths = new ArrayList<>(); + paths.addAll(this.paths); + paths.add(path); + return new BlobPath(Collections.unmodifiableList(paths)); } public String buildAsString(String separator) { diff --git a/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java b/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java index 4a06a48c9cf..3a60676999d 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/EncounterImpl.java @@ -16,13 +16,13 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.Message; import org.elasticsearch.common.inject.spi.TypeEncounter; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkState; @@ -47,16 +47,16 @@ final class EncounterImpl implements TypeEncounter { valid = false; } - public ImmutableList> getMembersInjectors() { + public List> getMembersInjectors() { return membersInjectors == null - ? ImmutableList.>of() - : ImmutableList.copyOf(membersInjectors); + ? Collections.>emptyList() + : Collections.unmodifiableList(membersInjectors); } - public ImmutableList> getInjectionListeners() { + public List> getInjectionListeners() { return injectionListeners == null - ? ImmutableList.>of() - : ImmutableList.copyOf(injectionListeners); + ? Collections.>emptyList() + : Collections.unmodifiableList(injectionListeners); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java b/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java index 35e69d33195..5af88fd46e7 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java +++ b/core/src/main/java/org/elasticsearch/common/inject/InjectionRequestProcessor.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; @@ -85,7 +84,7 @@ class InjectionRequestProcessor extends AbstractProcessor { final InjectorImpl injector; final Object source; final StaticInjectionRequest request; - ImmutableList memberInjectors; + List memberInjectors; public StaticInjection(InjectorImpl injector, StaticInjectionRequest request) { this.injector = injector; diff --git a/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java b/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java index 6f2540c97be..5e576b31d62 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/InjectorImpl.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -27,6 +26,7 @@ import org.elasticsearch.common.inject.util.Providers; import java.lang.annotation.Annotation; import java.lang.reflect.*; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; @@ -142,7 +142,7 @@ class InjectorImpl implements Injector, Lookups { @Override public Injector createChildInjector(Module... modules) { - return createChildInjector(ImmutableList.copyOf(modules)); + return createChildInjector(Arrays.asList(modules)); } /** @@ -694,7 +694,7 @@ class InjectorImpl implements Injector, Lookups { List> bindings = multimap.get(type); return bindings != null ? Collections.>unmodifiableList((List) multimap.get(type)) - : ImmutableList.>of(); + : Collections.>emptyList(); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java index 0bac3d8acde..399a231461d 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorImpl.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; @@ -24,6 +23,8 @@ import org.elasticsearch.common.inject.internal.InternalContext; import org.elasticsearch.common.inject.spi.InjectionListener; import org.elasticsearch.common.inject.spi.InjectionPoint; +import java.util.List; + /** * Injects members of instances of a given type. * @@ -32,12 +33,12 @@ import org.elasticsearch.common.inject.spi.InjectionPoint; class MembersInjectorImpl implements MembersInjector { private final TypeLiteral typeLiteral; private final InjectorImpl injector; - private final ImmutableList memberInjectors; - private final ImmutableList> userMembersInjectors; - private final ImmutableList> injectionListeners; + private final List memberInjectors; + private final List> userMembersInjectors; + private final List> injectionListeners; MembersInjectorImpl(InjectorImpl injector, TypeLiteral typeLiteral, - EncounterImpl encounter, ImmutableList memberInjectors) { + EncounterImpl encounter, List memberInjectors) { this.injector = injector; this.typeLiteral = typeLiteral; this.memberInjectors = memberInjectors; @@ -45,7 +46,7 @@ class MembersInjectorImpl implements MembersInjector { this.injectionListeners = encounter.getInjectionListeners(); } - public ImmutableList getMemberInjectors() { + public List getMemberInjectors() { return memberInjectors; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java index 99d0924576d..fdb66530ce4 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java +++ b/core/src/main/java/org/elasticsearch/common/inject/MembersInjectorStore.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.ErrorsException; @@ -25,6 +24,7 @@ import org.elasticsearch.common.inject.spi.InjectionPoint; import org.elasticsearch.common.inject.spi.TypeListenerBinding; import java.lang.reflect.Field; +import java.util.Collections; import java.util.List; import java.util.Set; @@ -35,7 +35,7 @@ import java.util.Set; */ class MembersInjectorStore { private final InjectorImpl injector; - private final ImmutableList typeListenerBindings; + private final List typeListenerBindings; private final FailableCache, MembersInjectorImpl> cache = new FailableCache, MembersInjectorImpl>() { @@ -49,7 +49,7 @@ class MembersInjectorStore { MembersInjectorStore(InjectorImpl injector, List typeListenerBindings) { this.injector = injector; - this.typeListenerBindings = ImmutableList.copyOf(typeListenerBindings); + this.typeListenerBindings = Collections.unmodifiableList(typeListenerBindings); } /** @@ -82,7 +82,7 @@ class MembersInjectorStore { errors.merge(e.getErrorMessages()); injectionPoints = e.getPartialValue(); } - ImmutableList injectors = getInjectors(injectionPoints, errors); + List injectors = getInjectors(injectionPoints, errors); errors.throwIfNewErrors(numErrorsBefore); EncounterImpl encounter = new EncounterImpl<>(errors, injector.lookups); @@ -104,7 +104,7 @@ class MembersInjectorStore { /** * Returns the injectors for the specified injection points. */ - ImmutableList getInjectors( + List getInjectors( Set injectionPoints, Errors errors) { List injectors = Lists.newArrayList(); for (InjectionPoint injectionPoint : injectionPoints) { @@ -120,6 +120,6 @@ class MembersInjectorStore { // ignored for now } } - return ImmutableList.copyOf(injectors); + return Collections.unmodifiableList(injectors); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java b/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java index 4c0c3652128..b124dfc5ad6 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java +++ b/core/src/main/java/org/elasticsearch/common/inject/ProvisionException.java @@ -16,12 +16,12 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.spi.Message; import java.util.Collection; +import java.util.Collections; import static com.google.common.base.Preconditions.checkArgument; @@ -47,7 +47,7 @@ public final class ProvisionException extends RuntimeException { public ProvisionException(String message, Throwable cause) { super(cause); - this.messages = ImmutableSet.of(new Message(ImmutableList.of(), message, cause)); + this.messages = ImmutableSet.of(new Message(Collections.emptyList(), message, cause)); } public ProvisionException(String message) { diff --git a/core/src/main/java/org/elasticsearch/common/inject/State.java b/core/src/main/java/org/elasticsearch/common/inject/State.java index b3f662c5fd8..53d1bddd313 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/State.java +++ b/core/src/main/java/org/elasticsearch/common/inject/State.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.elasticsearch.common.inject.internal.BindingImpl; import org.elasticsearch.common.inject.internal.Errors; @@ -24,6 +23,7 @@ import org.elasticsearch.common.inject.internal.MatcherAndConverter; import org.elasticsearch.common.inject.spi.TypeListenerBinding; import java.lang.annotation.Annotation; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -89,7 +89,7 @@ interface State { @Override public List getTypeListenerBindings() { - return ImmutableList.of(); + return Collections.emptyList(); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java b/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java index 37dfebbe98b..b83df0914d6 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java +++ b/core/src/main/java/org/elasticsearch/common/inject/TypeLiteral.java @@ -16,11 +16,12 @@ package org.elasticsearch.common.inject; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.internal.MoreTypes; import org.elasticsearch.common.inject.util.Types; import java.lang.reflect.*; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkArgument; @@ -174,7 +175,7 @@ public class TypeLiteral { for (int t = 0; t < types.length; t++) { result[t] = resolve(types[t]); } - return ImmutableList.copyOf(result); + return Arrays.asList(result); } /** diff --git a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java index 2bfbcefe6b7..21c094b5a23 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java +++ b/core/src/main/java/org/elasticsearch/common/inject/assistedinject/FactoryProvider2.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.assistedinject; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; @@ -31,6 +30,7 @@ import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkState; @@ -81,7 +81,7 @@ final class FactoryProvider2 implements InvocationHandler, Provider { */ private final Key producedType; private final ImmutableMap> returnTypesByMethod; - private final ImmutableMap>> paramTypes; + private final ImmutableMap>> paramTypes; /** * the hosting injector, or null if we haven't been initialized yet @@ -108,7 +108,7 @@ final class FactoryProvider2 implements InvocationHandler, Provider { try { ImmutableMap.Builder> returnTypesBuilder = ImmutableMap.builder(); - ImmutableMap.Builder>> paramTypesBuilder + ImmutableMap.Builder>> paramTypesBuilder = ImmutableMap.builder(); // TODO: also grab methods from superinterfaces for (Method method : factoryRawType.getMethods()) { @@ -123,7 +123,7 @@ final class FactoryProvider2 implements InvocationHandler, Provider { Key paramKey = getKey(param, method, paramAnnotations[p++], errors); keys.add(assistKey(method, paramKey, errors)); } - paramTypesBuilder.put(method, ImmutableList.copyOf(keys)); + paramTypesBuilder.put(method, Collections.unmodifiableList(keys)); } returnTypesByMethod = returnTypesBuilder.build(); paramTypes = paramTypesBuilder.build(); @@ -165,8 +165,8 @@ final class FactoryProvider2 implements InvocationHandler, Provider { @Inject void initialize(Injector injector) { if (this.injector != null) { - throw new ConfigurationException(ImmutableList.of(new Message(FactoryProvider2.class, - "Factories.create() factories may only be used in one Injector!"))); + throw new ConfigurationException(Collections.singletonList(new Message(FactoryProvider2.class, + "Factories.create() factories may only be used in one Injector!"))); } this.injector = injector; diff --git a/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java b/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java index e898007231c..5bd9fd066c7 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java +++ b/core/src/main/java/org/elasticsearch/common/inject/internal/Errors.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.internal; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import org.apache.lucene.util.CollectionUtil; @@ -436,7 +435,7 @@ public final class Errors implements Serializable { public List getMessages() { if (root.errors == null) { - return ImmutableList.of(); + return Collections.emptyList(); } List result = Lists.newArrayList(root.errors); @@ -553,7 +552,7 @@ public final class Errors implements Serializable { abstract String toString(T t); } - private static final Collection> converters = ImmutableList.of( + private static final Collection> converters = Arrays.asList( new Converter(Class.class) { @Override public String toString(Class c) { diff --git a/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java b/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java index 0e6a33f199f..4d0a3158a4f 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java +++ b/core/src/main/java/org/elasticsearch/common/inject/internal/PrivateElementsImpl.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.internal; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -28,6 +27,7 @@ import org.elasticsearch.common.inject.spi.Element; import org.elasticsearch.common.inject.spi.ElementVisitor; import org.elasticsearch.common.inject.spi.PrivateElements; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -53,7 +53,7 @@ public final class PrivateElementsImpl implements PrivateElements { /** * lazily instantiated */ - private ImmutableList elements; + private List elements; /** * lazily instantiated @@ -73,7 +73,7 @@ public final class PrivateElementsImpl implements PrivateElements { @Override public List getElements() { if (elements == null) { - elements = ImmutableList.copyOf(elementsMutable); + elements = Collections.unmodifiableList(elementsMutable); elementsMutable = null; } diff --git a/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java b/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java index fac73d3ef57..d8874886d47 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java +++ b/core/src/main/java/org/elasticsearch/common/inject/multibindings/Multibinder.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.multibindings; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import org.elasticsearch.common.inject.*; @@ -320,6 +319,6 @@ public abstract class Multibinder { NullPointerException npe = new NullPointerException(name); throw new ConfigurationException(ImmutableSet.of( - new Message(ImmutableList.of(), npe.toString(), npe))); + new Message(Collections.emptyList(), npe.toString(), npe))); } } diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java index 217024deb02..42844f28bf3 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/Elements.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.spi; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.bootstrap.Bootstrap; @@ -239,7 +238,7 @@ public final class Elements { @Override public void addError(Throwable t) { String message = "An exception was caught and reported. Message: " + t.getMessage(); - elements.add(new Message(ImmutableList.of(getSource()), message, t)); + elements.add(new Message(Collections.singletonList(getSource()), message, t)); } @Override diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java b/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java index 8aef647f994..daf4e5fee0b 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/InjectionPoint.java @@ -16,7 +16,6 @@ package org.elasticsearch.common.inject.spi; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import org.elasticsearch.common.inject.ConfigurationException; @@ -43,10 +42,10 @@ public final class InjectionPoint { private final boolean optional; private final Member member; - private final ImmutableList> dependencies; + private final List> dependencies; private InjectionPoint(Member member, - ImmutableList> dependencies, boolean optional) { + List> dependencies, boolean optional) { this.member = member; this.dependencies = dependencies; this.optional = optional; @@ -84,11 +83,11 @@ public final class InjectionPoint { } errors.throwConfigurationExceptionIfErrorsExist(); - this.dependencies = ImmutableList.>of( - newDependency(key, Nullability.allowsNull(annotations), -1)); + this.dependencies = Collections.>singletonList( + newDependency(key, Nullability.allowsNull(annotations), -1)); } - private ImmutableList> forMember(Member member, TypeLiteral type, + private List> forMember(Member member, TypeLiteral type, Annotation[][] parameterAnnotations) { Errors errors = new Errors(member); Iterator annotationsIterator = Arrays.asList(parameterAnnotations).iterator(); @@ -108,7 +107,7 @@ public final class InjectionPoint { } errors.throwConfigurationExceptionIfErrorsExist(); - return ImmutableList.copyOf(dependencies); + return Collections.unmodifiableList(dependencies); } // This metohd is necessary to create a Dependency with proper generic type information diff --git a/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java b/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java index fb778e136f7..0723c0e1377 100644 --- a/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java +++ b/core/src/main/java/org/elasticsearch/common/inject/spi/Message.java @@ -17,13 +17,14 @@ package org.elasticsearch.common.inject.spi; import com.google.common.base.Objects; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.inject.Binder; import org.elasticsearch.common.inject.internal.Errors; import org.elasticsearch.common.inject.internal.SourceProvider; import java.io.ObjectStreamException; import java.io.Serializable; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import static com.google.common.base.Preconditions.checkNotNull; @@ -50,17 +51,17 @@ public final class Message implements Serializable, Element { * @since 2.0 */ public Message(List sources, String message, Throwable cause) { - this.sources = ImmutableList.copyOf(sources); + this.sources = Collections.unmodifiableList(sources); this.message = checkNotNull(message, "message"); this.cause = cause; } public Message(Object source, String message) { - this(ImmutableList.of(source), message, null); + this(Collections.singletonList(source), message, null); } public Message(String message) { - this(ImmutableList.of(), message, null); + this(Collections.emptyList(), message, null); } @Override @@ -138,7 +139,7 @@ public final class Message implements Serializable, Element { for (int i = 0; i < sourcesAsStrings.length; i++) { sourcesAsStrings[i] = Errors.convert(sourcesAsStrings[i]).toString(); } - return new Message(ImmutableList.copyOf(sourcesAsStrings), message, cause); + return new Message(Arrays.asList(sourcesAsStrings), message, cause); } private static final long serialVersionUID = 0; diff --git a/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java b/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java index 0b4cdbdbd44..2e86beff192 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java +++ b/core/src/main/java/org/elasticsearch/common/logging/log4j/LogConfigurator.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.logging.log4j; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.log4j.PropertyConfigurator; import org.elasticsearch.ElasticsearchException; @@ -35,6 +34,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.util.Arrays; import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -47,7 +47,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class LogConfigurator { - static final List ALLOWED_SUFFIXES = ImmutableList.of(".yml", ".yaml", ".json", ".properties"); + static final List ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties"); private static boolean loaded; diff --git a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 48150c704f7..3ac810dc514 100644 --- a/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/core/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.FloatArrayList; import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.ObjectArrayList; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import org.apache.lucene.util.*; diff --git a/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java b/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java index 38953c51b02..2278220d9dd 100644 --- a/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java +++ b/core/src/main/java/org/elasticsearch/common/util/concurrent/AtomicArray.java @@ -19,10 +19,10 @@ package org.elasticsearch.common.util.concurrent; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchGenerationException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicReferenceArray; @@ -93,7 +93,7 @@ public class AtomicArray { public List> asList() { if (nonNullList == null) { if (array == null || array.length() == 0) { - nonNullList = ImmutableList.of(); + nonNullList = Collections.emptyList(); } else { List> list = new ArrayList<>(array.length()); for (int i = 0; i < array.length(); i++) { diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java index 2748b4b5097..b70a1ae9365 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/support/filtering/FilteringJsonGenerator.java @@ -23,7 +23,6 @@ import com.fasterxml.jackson.core.Base64Variant; import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.SerializableString; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.json.BaseJsonGenerator; @@ -34,6 +33,8 @@ import java.io.OutputStream; import java.math.BigDecimal; import java.math.BigInteger; import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Queue; @@ -61,7 +62,7 @@ public class FilteringJsonGenerator extends BaseJsonGenerator { public FilteringJsonGenerator(JsonGenerator generator, String[] filters) { super(generator); - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); if (filters != null) { for (String filter : filters) { String[] matcher = Strings.delimitedListToStringArray(filter, "."); @@ -72,7 +73,7 @@ public class FilteringJsonGenerator extends BaseJsonGenerator { } // Creates a root context that matches all filtering rules - this.context = get(null, null, builder.build()); + this.context = get(null, null, Collections.unmodifiableList(builder)); } /** diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java index 00fb59e94ba..9d3fa1c2a30 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen.ping; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Nullable; @@ -36,6 +35,9 @@ import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; @@ -46,23 +48,23 @@ import java.util.concurrent.atomic.AtomicReference; */ public class ZenPingService extends AbstractLifecycleComponent implements ZenPing { - private volatile ImmutableList zenPings = ImmutableList.of(); + private volatile List zenPings = Collections.emptyList(); @Inject public ZenPingService(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterName clusterName, NetworkService networkService, Version version, ElectMasterService electMasterService, @Nullable Set unicastHostsProviders) { super(settings); - ImmutableList.Builder zenPingsBuilder = ImmutableList.builder(); + List zenPingsBuilder = new ArrayList<>(); if (this.settings.getAsBoolean("discovery.zen.ping.multicast.enabled", true)) { zenPingsBuilder.add(new MulticastZenPing(settings, threadPool, transportService, clusterName, networkService, version)); } // always add the unicast hosts, so it will be able to receive unicast requests even when working in multicast zenPingsBuilder.add(new UnicastZenPing(settings, threadPool, transportService, clusterName, version, electMasterService, unicastHostsProviders)); - this.zenPings = zenPingsBuilder.build(); + this.zenPings = Collections.unmodifiableList(zenPingsBuilder); } - public ImmutableList zenPings() { + public List zenPings() { return this.zenPings; } @@ -118,7 +120,7 @@ public class ZenPingService extends AbstractLifecycleComponent implemen @Override public void ping(PingListener listener, TimeValue timeout) { - ImmutableList zenPings = this.zenPings; + List zenPings = this.zenPings; CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings); for (ZenPing zenPing : zenPings) { try { @@ -138,7 +140,7 @@ public class ZenPingService extends AbstractLifecycleComponent implemen private PingCollection responses = new PingCollection(); - private CompoundPingListener(PingListener listener, ImmutableList zenPings) { + private CompoundPingListener(PingListener listener, List zenPings) { this.listener = listener; this.counter = new AtomicInteger(zenPings.size()); } diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 869f2dc3e2f..983fcf73b3f 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -28,6 +28,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; +import java.util.ArrayList; +import java.util.Collections; import java.util.Map; import java.util.Set; @@ -139,7 +141,7 @@ public class DanglingIndicesState extends AbstractComponent { return; } try { - allocateDangledIndices.allocateDangled(ImmutableList.copyOf(danglingIndices.values()), new LocalAllocateDangledIndices.Listener() { + allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(danglingIndices.values()), new LocalAllocateDangledIndices.Listener() { @Override public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java index 254b8b344e9..da120657806 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ScriptDocValues.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.fielddata; -import com.google.common.collect.ImmutableList; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.BytesRef; @@ -30,6 +29,7 @@ import org.joda.time.DateTimeZone; import org.joda.time.MutableDateTime; import java.util.AbstractList; +import java.util.Collections; import java.util.List; @@ -85,7 +85,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } @Override @@ -128,7 +128,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } public MutableDateTime getDate() { @@ -175,7 +175,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } @Override @@ -238,7 +238,7 @@ public interface ScriptDocValues extends List { @Override public List getValues() { - return ImmutableList.copyOf(this); + return Collections.unmodifiableList(this); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 632284d2eb8..8331e0af7fe 100644 --- a/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/core/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.get; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import org.apache.lucene.index.Term; @@ -49,6 +48,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -258,7 +258,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { if (value instanceof List) { fields.put(field, new GetField(field, (List) value)); } else { - fields.put(field, new GetField(field, ImmutableList.of(value))); + fields.put(field, new GetField(field, Collections.singletonList(value))); } } } @@ -383,7 +383,7 @@ public final class ShardGetService extends AbstractIndexShardComponent { if (value instanceof List) { fields.put(field, new GetField(field, (List) value)); } else { - fields.put(field, new GetField(field, ImmutableList.of(value))); + fields.put(field, new GetField(field, Collections.singletonList(value))); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index 5f6893ed2aa..2d2ac6c75f8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Function; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterators; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -695,9 +694,9 @@ public abstract class FieldMapper extends Mapper { */ public static class CopyTo { - private final ImmutableList copyToFields; + private final List copyToFields; - private CopyTo(ImmutableList copyToFields) { + private CopyTo(List copyToFields) { this.copyToFields = copyToFields; } @@ -713,7 +712,7 @@ public abstract class FieldMapper extends Mapper { } public static class Builder { - private final ImmutableList.Builder copyToBuilders = ImmutableList.builder(); + private final List copyToBuilders = new ArrayList<>(); public Builder add(String field) { copyToBuilders.add(field); @@ -721,7 +720,7 @@ public abstract class FieldMapper extends Mapper { } public CopyTo build() { - return new CopyTo(copyToBuilders.build()); + return new CopyTo(Collections.unmodifiableList(copyToBuilders)); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 8386932465f..73694d497f8 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectHashSet; import com.google.common.base.Function; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterators; @@ -68,6 +67,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -527,7 +527,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public Collection simpleMatchToIndexNames(String pattern) { if (Regex.isSimpleMatchPattern(pattern) == false) { // no wildcards - return ImmutableList.of(pattern); + return Collections.singletonList(pattern); } return fieldTypes.simpleMatchToIndexNames(pattern); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java b/core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java deleted file mode 100644 index 52d4ea33f44..00000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/ObjectMappers.java +++ /dev/null @@ -1,124 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.UnmodifiableIterator; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -/** - * A holder for several {@link org.elasticsearch.index.mapper.object.ObjectMapper}. - */ -public class ObjectMappers implements Iterable { - - private final ImmutableList objectMappers; - private final boolean hasNested; - - public ObjectMappers() { - this(ImmutableList.of()); - } - - public ObjectMappers(ObjectMapper objectMapper) { - this(new ObjectMapper[]{objectMapper}); - } - - public ObjectMappers(ObjectMapper[] objectMappers) { - this(ImmutableList.copyOf(objectMappers)); - } - - public ObjectMappers(ImmutableList objectMappers) { - this.objectMappers = objectMappers; - boolean hasNested = false; - for (ObjectMapper objectMapper : objectMappers) { - if (objectMapper.nested().isNested()) { - hasNested = true; - break; - } - } - this.hasNested = hasNested; - } - - /** - * Is one of the object mappers has a nested mapping set? - */ - public boolean hasNested() { - return this.hasNested; - } - - public ObjectMapper mapper() { - if (objectMappers.isEmpty()) { - return null; - } - return objectMappers.get(0); - } - - public boolean isEmpty() { - return objectMappers.isEmpty(); - } - - public ImmutableList mappers() { - return this.objectMappers; - } - - @Override - public UnmodifiableIterator iterator() { - return objectMappers.iterator(); - } - - /** - * Concats and returns a new {@link org.elasticsearch.index.mapper.ObjectMappers}. - */ - public ObjectMappers concat(ObjectMapper mapper) { - return new ObjectMappers(new ImmutableList.Builder().addAll(objectMappers).add(mapper).build()); - } - - /** - * Concats and returns a new {@link org.elasticsearch.index.mapper.ObjectMappers}. - */ - public ObjectMappers concat(ObjectMappers mappers) { - return new ObjectMappers(new ImmutableList.Builder().addAll(objectMappers).addAll(mappers).build()); - } - - public ObjectMappers remove(Iterable mappers) { - ImmutableList.Builder builder = new ImmutableList.Builder<>(); - for (ObjectMapper objectMapper : objectMappers) { - boolean found = false; - for (ObjectMapper mapper : mappers) { - if (objectMapper == mapper) { // identify equality - found = true; - } - } - if (!found) { - builder.add(objectMapper); - } - } - return new ObjectMappers(builder.build()); - } - - public ObjectMappers remove(ObjectMapper mapper) { - ImmutableList.Builder builder = new ImmutableList.Builder<>(); - for (ObjectMapper objectMapper : objectMappers) { - if (objectMapper != mapper) { // identify equality - builder.add(objectMapper); - } - } - return new ObjectMappers(builder.build()); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java index 340eb81520c..dcbb19f7bb7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import org.apache.lucene.queries.TermsQuery; @@ -34,6 +33,7 @@ import org.elasticsearch.index.mapper.internal.UidFieldMapper; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; /** @@ -96,7 +96,7 @@ public class IdsQueryParser implements QueryParser { } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) { - types = ImmutableList.of(parser.text()); + types = Collections.singletonList(parser.text()); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else if ("_name".equals(currentFieldName)) { diff --git a/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java b/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java index e35607ac4bf..f4dcebd9d54 100644 --- a/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java +++ b/core/src/main/java/org/elasticsearch/index/search/stats/StatsGroupsParseElement.java @@ -19,12 +19,12 @@ package org.elasticsearch.index.search.stats; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -35,7 +35,7 @@ public class StatsGroupsParseElement implements SearchParseElement { public void parse(XContentParser parser, SearchContext context) throws Exception { XContentParser.Token token = parser.currentToken(); if (token.isValue()) { - context.groupStats(ImmutableList.of(parser.text())); + context.groupStats(Collections.singletonList(parser.text())); } else if (token == XContentParser.Token.START_ARRAY) { List groupStats = new ArrayList<>(4); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java index bae4ae8baf5..6afa0c96e5f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java @@ -19,10 +19,10 @@ package org.elasticsearch.index.shard; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.store.StoreFileMetaData; +import java.util.Collections; import java.util.List; /** @@ -30,7 +30,7 @@ import java.util.List; */ public class CommitPoint { - public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, ImmutableList.of(), ImmutableList.of()); + public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, Collections.emptyList(), Collections.emptyList()); public static class FileInfo { private final String name; @@ -81,16 +81,16 @@ public class CommitPoint { private final Type type; - private final ImmutableList indexFiles; + private final List indexFiles; - private final ImmutableList translogFiles; + private final List translogFiles; public CommitPoint(long version, String name, Type type, List indexFiles, List translogFiles) { this.version = version; this.name = name; this.type = type; - this.indexFiles = ImmutableList.copyOf(indexFiles); - this.translogFiles = ImmutableList.copyOf(translogFiles); + this.indexFiles = Collections.unmodifiableList(indexFiles); + this.translogFiles = Collections.unmodifiableList(translogFiles); } public long version() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java index a56a0f62c24..9f4a2aa96ce 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -28,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Collections; import java.util.Comparator; import java.util.Iterator; import java.util.List; @@ -37,7 +37,7 @@ import java.util.List; */ public class CommitPoints implements Iterable { - private final ImmutableList commitPoints; + private final List commitPoints; public CommitPoints(List commitPoints) { CollectionUtil.introSort(commitPoints, new Comparator() { @@ -46,7 +46,7 @@ public class CommitPoints implements Iterable { return (o2.version() < o1.version() ? -1 : (o2.version() == o1.version() ? 0 : 1)); } }); - this.commitPoints = ImmutableList.copyOf(commitPoints); + this.commitPoints = Collections.unmodifiableList(commitPoints); } public List commits() { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 0e997c14ec0..282e869ef01 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchParseException; @@ -33,6 +32,7 @@ import org.elasticsearch.common.xcontent.*; import org.elasticsearch.index.store.StoreFileMetaData; import java.io.IOException; +import java.util.Collections; import java.util.List; import static com.google.common.collect.Lists.newArrayList; @@ -323,7 +323,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil private final long totalSize; - private final ImmutableList indexFiles; + private final List indexFiles; /** * Constructs new shard snapshot metadata from snapshot metadata @@ -342,7 +342,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil assert indexVersion >= 0; this.snapshot = snapshot; this.indexVersion = indexVersion; - this.indexFiles = ImmutableList.copyOf(indexFiles); + this.indexFiles = Collections.unmodifiableList(indexFiles); this.startTime = startTime; this.time = time; this.numberOfFiles = numberOfFiles; @@ -355,7 +355,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil private BlobStoreIndexShardSnapshot() { this.snapshot = ""; this.indexVersion = 0; - this.indexFiles = ImmutableList.of(); + this.indexFiles = Collections.emptyList(); this.startTime = 0; this.time = 0; this.numberOfFiles = 0; @@ -520,7 +520,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil } } } - return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, ImmutableList.copyOf(indexFiles), + return new BlobStoreIndexShardSnapshot(snapshot, indexVersion, Collections.unmodifiableList(indexFiles), startTime, time, numberOfFiles, totalSize); } } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index 19bf4ee3932..fa06dff558d 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; @@ -28,6 +27,8 @@ import org.elasticsearch.common.xcontent.*; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -45,12 +46,12 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To public static final BlobStoreIndexShardSnapshots PROTO = new BlobStoreIndexShardSnapshots(); - private final ImmutableList shardSnapshots; + private final List shardSnapshots; private final ImmutableMap files; - private final ImmutableMap> physicalFiles; + private final ImmutableMap> physicalFiles; public BlobStoreIndexShardSnapshots(List shardSnapshots) { - this.shardSnapshots = ImmutableList.copyOf(shardSnapshots); + this.shardSnapshots = Collections.unmodifiableList(shardSnapshots); // Map between blob names and file info Map newFiles = newHashMap(); // Map between original physical names and file info @@ -74,15 +75,15 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To physicalFileList.add(newFiles.get(fileInfo.name())); } } - ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); + ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); + mapBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); } this.physicalFiles = mapBuilder.build(); this.files = ImmutableMap.copyOf(newFiles); } - private BlobStoreIndexShardSnapshots(ImmutableMap files, ImmutableList shardSnapshots) { + private BlobStoreIndexShardSnapshots(ImmutableMap files, List shardSnapshots) { this.shardSnapshots = shardSnapshots; this.files = files; Map> physicalFiles = newHashMap(); @@ -96,15 +97,15 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To physicalFileList.add(files.get(fileInfo.name())); } } - ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); + ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), ImmutableList.copyOf(entry.getValue())); + mapBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); } this.physicalFiles = mapBuilder.build(); } private BlobStoreIndexShardSnapshots() { - shardSnapshots = ImmutableList.of(); + shardSnapshots = Collections.emptyList(); files = ImmutableMap.of(); physicalFiles = ImmutableMap.of(); } @@ -285,17 +286,17 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To } ImmutableMap files = filesBuilder.build(); - ImmutableList.Builder snapshots = ImmutableList.builder(); + List snapshots = new ArrayList<>(); for (Map.Entry> entry : snapshotsMap.entrySet()) { - ImmutableList.Builder fileInfosBuilder = ImmutableList.builder(); + List fileInfosBuilder = new ArrayList<>(); for (String file : entry.getValue()) { FileInfo fileInfo = files.get(file); assert fileInfo != null; fileInfosBuilder.add(fileInfo); } - snapshots.add(new SnapshotFiles(entry.getKey(), fileInfosBuilder.build())); + snapshots.add(new SnapshotFiles(entry.getKey(), Collections.unmodifiableList(fileInfosBuilder))); } - return new BlobStoreIndexShardSnapshots(files, snapshots.build()); + return new BlobStoreIndexShardSnapshots(files, Collections.unmodifiableList(snapshots)); } } diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java index aa265f17fbd..44d40c99f37 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/SnapshotFiles.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.snapshots.blobstore; -import com.google.common.collect.ImmutableList; import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; import java.util.List; diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 27367177b6f..847f3e3774f 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.store; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import org.apache.lucene.codecs.CodecUtil; @@ -1005,9 +1004,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * NOTE: this diff will not contain the segments.gen file. This file is omitted on recovery. */ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { - final ImmutableList.Builder identical = ImmutableList.builder(); - final ImmutableList.Builder different = ImmutableList.builder(); - final ImmutableList.Builder missing = ImmutableList.builder(); + final List identical = new ArrayList<>(); + final List different = new ArrayList<>(); + final List missing = new ArrayList<>(); final Map> perSegment = new HashMap<>(); final List perCommitStoreFiles = new ArrayList<>(); @@ -1053,7 +1052,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref different.addAll(identicalFiles); } } - RecoveryDiff recoveryDiff = new RecoveryDiff(identical.build(), different.build(), missing.build()); + RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), Collections.unmodifiableList(different), Collections.unmodifiableList(missing)); assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0) : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]"; return recoveryDiff; diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index a0ef2eff76c..cc58305a49d 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.recovery; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.common.Nullable; @@ -34,6 +33,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -712,7 +713,7 @@ public class RecoveryState implements ToXContent, Streamable { private long targetThrottleTimeInNanos = UNKNOWN; public synchronized List fileDetails() { - return ImmutableList.copyOf(fileDetails.values()); + return Collections.unmodifiableList(new ArrayList<>(fileDetails.values())); } public synchronized void reset() { diff --git a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java index 0f460274fc1..4355c9fa98d 100644 --- a/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java +++ b/core/src/main/java/org/elasticsearch/node/internal/InternalSettingsPreparer.java @@ -20,7 +20,6 @@ package org.elasticsearch.node.internal; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Booleans; @@ -38,6 +37,7 @@ import java.io.InputStreamReader; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.ThreadLocalRandom; @@ -50,7 +50,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; */ public class InternalSettingsPreparer { - static final List ALLOWED_SUFFIXES = ImmutableList.of(".yml", ".yaml", ".json", ".properties"); + static final List ALLOWED_SUFFIXES = Arrays.asList(".yml", ".yaml", ".json", ".properties"); public static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; public static final String TEXT_PROMPT_VALUE = "${prompt.text}"; diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 3e79ef4e783..dd2c5e8024e 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; -import com.google.common.collect.ImmutableList; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; @@ -156,7 +155,7 @@ public class PercolateContext extends SearchContext { Map fields = new HashMap<>(); for (IndexableField field : parsedDocument.rootDoc().getFields()) { - fields.put(field.name(), new InternalSearchHitField(field.name(), ImmutableList.of())); + fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); } hitContext().reset( new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields), diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java index e20418ce01b..bbe238bb40b 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.plugins; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; @@ -65,7 +64,7 @@ public class PluginsService extends AbstractComponent { /** * We keep around a list of plugins */ - private final ImmutableList> plugins; + private final List> plugins; private final PluginsInfo info; private final ImmutableMap> onModuleReferences; @@ -88,7 +87,7 @@ public class PluginsService extends AbstractComponent { public PluginsService(Settings settings, Environment environment) { super(settings); - ImmutableList.Builder> tupleBuilder = ImmutableList.builder(); + List> tupleBuilder = new ArrayList<>(); // first we load specified plugins via 'plugin.types' settings parameter. // this is a hack for what is between unit and integration tests... @@ -110,7 +109,7 @@ public class PluginsService extends AbstractComponent { throw new IllegalStateException(ex); } - plugins = tupleBuilder.build(); + plugins = Collections.unmodifiableList(tupleBuilder); info = new PluginsInfo(); for (Tuple tuple : plugins) { info.add(tuple.v1()); @@ -171,7 +170,7 @@ public class PluginsService extends AbstractComponent { this.onModuleReferences = onModuleReferences.immutableMap(); } - public ImmutableList> plugins() { + public List> plugins() { return plugins; } @@ -323,7 +322,7 @@ public class PluginsService extends AbstractComponent { } private List> loadBundles(List bundles) { - ImmutableList.Builder> plugins = ImmutableList.builder(); + List> plugins = new ArrayList<>(); for (Bundle bundle : bundles) { // jar-hell check the bundle against the parent classloader @@ -355,7 +354,7 @@ public class PluginsService extends AbstractComponent { } } - return plugins.build(); + return Collections.unmodifiableList(plugins); } private Plugin loadPlugin(String className, Settings settings, ClassLoader loader) { diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 3e9fbaaad09..ff67456c845 100644 --- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.blobstore; -import com.google.common.collect.ImmutableList; import com.google.common.io.ByteStreams; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchParseException; @@ -318,13 +317,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots(); if (snapshotIds.contains(snapshotId)) { - ImmutableList.Builder builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (SnapshotId id : snapshotIds) { if (!snapshotId.equals(id)) { builder.add(id); } } - snapshotIds = builder.build(); + snapshotIds = Collections.unmodifiableList(builder); } writeSnapshotList(snapshotIds); // Now delete all indices @@ -365,7 +364,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots(); if (!snapshotIds.contains(snapshotId)) { - snapshotIds = ImmutableList.builder().addAll(snapshotIds).add(snapshotId).build(); + snapshotIds = new ArrayList<>(snapshotIds); + snapshotIds.add(snapshotId); + snapshotIds = Collections.unmodifiableList(snapshotIds); } writeSnapshotList(snapshotIds); return blobStoreSnapshot; @@ -404,7 +405,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent aliases, XContentBuilder builder, Params params) throws IOException { + private void writeAliases(List aliases, XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.ALIASES); if (aliases != null) { for (AliasMetaData alias : aliases) { @@ -144,7 +144,7 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.endObject(); } - private void writeWarmers(ImmutableList warmers, XContentBuilder builder, Params params) throws IOException { + private void writeWarmers(List warmers, XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.WARMERS); if (warmers != null) { for (IndexWarmersMetaData.Entry warmer : warmers) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java index be83ccbe4b5..67e01017678 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices.warmer.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequest; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -32,6 +31,8 @@ import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import java.util.List; + import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestStatus.OK; @@ -68,7 +69,7 @@ public class RestGetWarmerAction extends BaseRestHandler { } builder.startObject(); - for (ObjectObjectCursor> entry : response.warmers()) { + for (ObjectObjectCursor> entry : response.warmers()) { builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 1f32c0a1687..d92e9599e70 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.common.bytes.BytesArray; @@ -36,6 +35,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -226,7 +226,7 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St metaData = in.readMap(); int size = in.readVInt(); if (size == 0) { - pipelineAggregators = ImmutableList.of(); + pipelineAggregators = Collections.emptyList(); } else { pipelineAggregators = Lists.newArrayListWithCapacity(size); for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index ceefcae41b6..960c382dfb8 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations; import com.google.common.base.Function; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; @@ -37,6 +36,7 @@ import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -57,7 +57,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl } }; - private List aggregations = ImmutableList.of(); + private List aggregations = Collections.emptyList(); private Map aggregationsAsMap; @@ -212,7 +212,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl public void readFrom(StreamInput in) throws IOException { int size = in.readVInt(); if (size == 0) { - aggregations = ImmutableList.of(); + aggregations = Collections.emptyList(); aggregationsAsMap = ImmutableMap.of(); } else { aggregations = Lists.newArrayListWithCapacity(size); diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 0eb4c5cc882..a70dd8d3703 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.builder; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchGenerationException; @@ -539,7 +538,7 @@ public class SearchSourceBuilder extends ToXContentToBytes { * per field. */ public SearchSourceBuilder noFields() { - this.fieldNames = ImmutableList.of(); + this.fieldNames = Collections.emptyList(); return this; } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 81f13c32727..ee391adb8bb 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; @@ -67,6 +66,7 @@ import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -294,7 +294,7 @@ public class FetchPhase implements SearchPhase { nestedParsedSource = (List>) extractedValue; } else if (extractedValue instanceof Map) { // nested field has an object value in the _source. This just means the nested field has just one inner object, which is valid, but uncommon. - nestedParsedSource = ImmutableList.of((Map < String, Object >) extractedValue); + nestedParsedSource = Collections.singletonList((Map) extractedValue); } else { throw new IllegalStateException("extracted source isn't an object or an array"); } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java index 5b9ab72641a..07e931c722a 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.highlight; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.Query; import org.elasticsearch.common.component.AbstractComponent; @@ -34,7 +33,10 @@ import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.SearchContext; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; +import java.util.List; import java.util.Map; import static com.google.common.collect.Maps.newHashMap; @@ -44,7 +46,7 @@ import static com.google.common.collect.Maps.newHashMap; */ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { - private static final ImmutableList STANDARD_HIGHLIGHTERS_BY_PRECEDENCE = ImmutableList.of("fvh", "postings", "plain"); + private static final List STANDARD_HIGHLIGHTERS_BY_PRECEDENCE = Arrays.asList("fvh", "postings", "plain"); private final Highlighters highlighters; @@ -82,7 +84,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { DocumentMapper documentMapper = context.mapperService().documentMapper(hitContext.hit().type()); fieldNamesToHighlight = documentMapper.mappers().simpleMatchToFullName(field.field()); } else { - fieldNamesToHighlight = ImmutableList.of(field.field()); + fieldNamesToHighlight = Collections.singletonList(field.field()); } if (context.highlight().forceSource(field)) { diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java index 3358aec0ed7..b26be721d93 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightUtils.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.highlight; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.search.highlight.Encoder; @@ -30,6 +29,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; +import java.util.Collections; import java.util.List; public final class HighlightUtils { @@ -52,7 +52,7 @@ public final class HighlightUtils { textsToHighlight = fieldVisitor.fields().get(mapper.fieldType().names().indexName()); if (textsToHighlight == null) { // Can happen if the document doesn't have the field to highlight - textsToHighlight = ImmutableList.of(); + textsToHighlight = Collections.emptyList(); } } else { SourceLookup sourceLookup = searchContext.lookup().source(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 03c13c61e93..aa855ab5dd8 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.internal; import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.lucene.search.BooleanClause.Occur; @@ -575,7 +574,7 @@ public class DefaultSearchContext extends SearchContext { @Override public void emptyFieldNames() { - this.fieldNames = ImmutableList.of(); + this.fieldNames = Collections.emptyList(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index f70c9a7e1a4..f6a6faac3ab 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.internal; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; @@ -39,6 +38,7 @@ import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; +import java.util.Collections; import java.util.List; /** @@ -243,7 +243,7 @@ public class SubSearchContext extends FilteredSearchContext { @Override public void emptyFieldNames() { - this.fieldNames = ImmutableList.of(); + this.fieldNames = Collections.emptyList(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java b/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java index f2a996d2566..b976f0a27d1 100644 --- a/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java +++ b/core/src/main/java/org/elasticsearch/search/warmer/IndexWarmersMetaData.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.warmer; import com.google.common.base.Objects; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import org.elasticsearch.cluster.AbstractDiffable; @@ -118,14 +117,14 @@ public class IndexWarmersMetaData extends AbstractDiffable } } - private final ImmutableList entries; + private final List entries; public IndexWarmersMetaData(Entry... entries) { - this.entries = ImmutableList.copyOf(entries); + this.entries = Arrays.asList(entries); } - public ImmutableList entries() { + public List entries() { return this.entries; } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java index 548a8d2a55b..cc9eeb04130 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -28,6 +27,8 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; /** @@ -39,7 +40,7 @@ public class RestoreInfo implements ToXContent, Streamable { private String name; - private ImmutableList indices; + private List indices; private int totalShards; @@ -49,7 +50,7 @@ public class RestoreInfo implements ToXContent, Streamable { } - public RestoreInfo(String name, ImmutableList indices, int totalShards, int successfulShards) { + public RestoreInfo(String name, List indices, int totalShards, int successfulShards) { this.name = name; this.indices = indices; this.totalShards = totalShards; @@ -147,11 +148,11 @@ public class RestoreInfo implements ToXContent, Streamable { public void readFrom(StreamInput in) throws IOException { name = in.readString(); int size = in.readVInt(); - ImmutableList.Builder indicesListBuilder = ImmutableList.builder(); + List indicesListBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { indicesListBuilder.add(in.readString()); } - indices = indicesListBuilder.build(); + indices = Collections.unmodifiableList(indicesListBuilder); totalShards = in.readVInt(); successfulShards = in.readVInt(); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index d8c6fdc8b07..4f57940bf6b 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -22,7 +22,6 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; @@ -271,7 +270,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis } shards = shardsBuilder.build(); - RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshotId, RestoreInProgress.State.INIT, ImmutableList.copyOf(renamedIndices.keySet()), shards); + RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshotId, RestoreInProgress.State.INIT, Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards); builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry)); } else { shards = ImmutableMap.of(); @@ -284,7 +283,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis if (completed(shards)) { // We don't have any indices to restore - we are done - restoreInfo = new RestoreInfo(request.name(), ImmutableList.copyOf(renamedIndices.keySet()), + restoreInfo = new RestoreInfo(request.name(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards.size(), shards.size() - failedShards(shards)); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java index 05429eab850..75abc40af61 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java +++ b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseFieldMatcher; @@ -57,7 +56,7 @@ public class Snapshot implements Comparable, ToXContent, FromXContentB private final List shardFailures; - private final static List NO_FAILURES = ImmutableList.of(); + private final static List NO_FAILURES = Collections.emptyList(); public final static Snapshot PROTO = new Snapshot(); @@ -287,7 +286,7 @@ public class Snapshot implements Comparable, ToXContent, FromXContentB Version version = Version.CURRENT; SnapshotState state = SnapshotState.IN_PROGRESS; String reason = null; - ImmutableList indices = ImmutableList.of(); + List indices = Collections.emptyList(); long startTime = 0; long endTime = 0; int totalShard = 0; @@ -331,13 +330,13 @@ public class Snapshot implements Comparable, ToXContent, FromXContentB while (parser.nextToken() != XContentParser.Token.END_ARRAY) { indicesArray.add(parser.text()); } - indices = ImmutableList.copyOf(indicesArray); + indices = Collections.unmodifiableList(indicesArray); } else if ("failures".equals(currentFieldName)) { ArrayList shardFailureArrayList = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { shardFailureArrayList.add(SnapshotShardFailure.fromXContent(parser)); } - shardFailures = ImmutableList.copyOf(shardFailureArrayList); + shardFailures = Collections.unmodifiableList(shardFailureArrayList); } else { // It was probably created by newer version - ignoring parser.skipChildren(); diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index a54b1b3b78e..e7b6ce13f12 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -19,6 +19,8 @@ package org.elasticsearch.snapshots; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; import java.util.List; import org.elasticsearch.Version; @@ -32,7 +34,6 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.rest.RestStatus; -import com.google.common.collect.ImmutableList; /** * Information about snapshot @@ -260,11 +261,11 @@ public class SnapshotInfo implements ToXContent, Streamable { public void readFrom(StreamInput in) throws IOException { name = in.readString(); int size = in.readVInt(); - ImmutableList.Builder indicesListBuilder = ImmutableList.builder(); + List indicesListBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { indicesListBuilder.add(in.readString()); } - indices = indicesListBuilder.build(); + indices = Collections.unmodifiableList(indicesListBuilder); state = SnapshotState.fromValue(in.readByte()); reason = in.readOptionalString(); startTime = in.readVLong(); @@ -273,13 +274,13 @@ public class SnapshotInfo implements ToXContent, Streamable { successfulShards = in.readVInt(); size = in.readVInt(); if (size > 0) { - ImmutableList.Builder failureBuilder = ImmutableList.builder(); + List failureBuilder = new ArrayList<>(); for (int i = 0; i < size; i++) { failureBuilder.add(SnapshotShardFailure.readSnapshotShardFailure(in)); } - shardFailures = failureBuilder.build(); + shardFailures = Collections.unmodifiableList(failureBuilder); } else { - shardFailures = ImmutableList.of(); + shardFailures = Collections.emptyList(); } version = Version.readVersion(in); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java index 84e29063ddc..feda0276bd9 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java @@ -18,11 +18,13 @@ */ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.IndexNotFoundException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -114,8 +116,8 @@ public class SnapshotUtils { } } if (result == null) { - return ImmutableList.copyOf(selectedIndices); + return Arrays.asList(selectedIndices); } - return ImmutableList.copyOf(result); + return Collections.unmodifiableList(new ArrayList<>(result)); } } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index da796d1b502..ab241306f7f 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; @@ -136,7 +135,7 @@ public class SnapshotsService extends AbstractLifecycleComponent snapshotList = newArrayList(snapshotSet); CollectionUtil.timSort(snapshotList); - return ImmutableList.copyOf(snapshotList); + return Collections.unmodifiableList(snapshotList); } /** @@ -152,7 +151,7 @@ public class SnapshotsService extends AbstractLifecycleComponent indices = ImmutableList.copyOf(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices())); + List indices = Arrays.asList(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices())); logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices); newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), State.INIT, indices, System.currentTimeMillis(), null); snapshots = new SnapshotsInProgress(newSnapshot); @@ -297,7 +296,7 @@ public class SnapshotsService extends AbstractLifecycleComponent entries = ImmutableList.builder(); + List entries = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshots.entries()) { if (entry.snapshotId().equals(snapshot.snapshotId())) { // Replace the snapshot that was just created @@ -334,7 +333,7 @@ public class SnapshotsService extends AbstractLifecycleComponentof()); + snapshot.snapshotId(), snapshot.indices(), snapshot.startTime(), ExceptionsHelper.detailedMessage(t), 0, Collections.emptyList()); } catch (Throwable t2) { logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId()); } @@ -373,7 +372,7 @@ public class SnapshotsService extends AbstractLifecycleComponentof()); + ExceptionsHelper.detailedMessage(t), 0, Collections.emptyList()); } catch (Throwable t2) { logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId()); } @@ -399,7 +398,7 @@ public class SnapshotsService extends AbstractLifecycleComponent currentSnapshots(String repository, String[] snapshots) { SnapshotsInProgress snapshotsInProgress = clusterService.state().custom(SnapshotsInProgress.TYPE); if (snapshotsInProgress == null || snapshotsInProgress.entries().isEmpty()) { - return ImmutableList.of(); + return Collections.emptyList(); } if ("_all".equals(repository)) { return snapshotsInProgress.entries(); @@ -409,7 +408,7 @@ public class SnapshotsService extends AbstractLifecycleComponent 0) { for (String snapshot : snapshots) { @@ -417,12 +416,12 @@ public class SnapshotsService extends AbstractLifecycleComponent builder = ImmutableList.builder(); + List builder = new ArrayList<>(); for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) { if (!entry.snapshotId().getRepository().equals(repository)) { continue; @@ -438,7 +437,7 @@ public class SnapshotsService extends AbstractLifecycleComponent shards(ClusterState clusterState, ImmutableList indices) { + private ImmutableMap shards(ClusterState clusterState, List indices) { ImmutableMap.Builder builder = ImmutableMap.builder(); MetaData metaData = clusterState.metaData(); for (String index : indices) { diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 8a43e0581bd..de7f91aa094 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport.netty; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -896,13 +895,15 @@ public class NettyTransport extends AbstractLifecycleComponent implem } } catch (RuntimeException e) { // clean the futures - for (ChannelFuture future : ImmutableList.builder().add(connectRecovery).add(connectBulk).add(connectReg).add(connectState).add(connectPing).build()) { - future.cancel(); - if (future.getChannel() != null && future.getChannel().isOpen()) { - try { - future.getChannel().close(); - } catch (Exception e1) { - // ignore + for (ChannelFuture[] futures : Arrays.asList(connectRecovery, connectBulk, connectReg, connectState, connectPing)) { + for (ChannelFuture future : futures) { + future.cancel(); + if (future.getChannel() != null && future.getChannel().isOpen()) { + try { + future.getChannel().close(); + } catch (Exception e1) { + // ignore + } } } } @@ -1079,7 +1080,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem public static class NodeChannels { - ImmutableList allChannels = ImmutableList.of(); + List allChannels = Collections.emptyList(); private Channel[] recovery; private final AtomicInteger recoveryCounter = new AtomicInteger(); private Channel[] bulk; @@ -1100,7 +1101,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem } public void start() { - this.allChannels = ImmutableList.builder().add(recovery).add(bulk).add(reg).add(state).add(ping).build(); + List newAllChannels = new ArrayList<>(); + newAllChannels.addAll(Arrays.asList(recovery)); + newAllChannels.addAll(Arrays.asList(bulk)); + newAllChannels.addAll(Arrays.asList(reg)); + newAllChannels.addAll(Arrays.asList(state)); + newAllChannels.addAll(Arrays.asList(ping)); + this.allChannels = Collections.unmodifiableList(allChannels); } public boolean hasChannel(Channel channel) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java index ce214dd5a9d..9484c5e07f1 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.get; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -239,10 +238,10 @@ public class GetIndexIT extends ESIntegTestCase { } private void assertWarmers(GetIndexResponse response, String indexName) { - ImmutableOpenMap> warmers = response.warmers(); + ImmutableOpenMap> warmers = response.warmers(); assertThat(warmers, notNullValue()); assertThat(warmers.size(), equalTo(1)); - ImmutableList indexWarmers = warmers.get(indexName); + List indexWarmers = warmers.get(indexName); assertThat(indexWarmers, notNullValue()); assertThat(indexWarmers.size(), equalTo(1)); Entry warmer = indexWarmers.get(0); @@ -297,10 +296,10 @@ public class GetIndexIT extends ESIntegTestCase { } private void assertAliases(GetIndexResponse response, String indexName) { - ImmutableOpenMap> aliases = response.aliases(); + ImmutableOpenMap> aliases = response.aliases(); assertThat(aliases, notNullValue()); assertThat(aliases.size(), equalTo(1)); - ImmutableList indexAliases = aliases.get(indexName); + List indexAliases = aliases.get(indexName); assertThat(indexAliases, notNullValue()); assertThat(indexAliases.size(), equalTo(1)); AliasMetaData alias = indexAliases.get(0); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java index 12b61e2c2a5..925e01e8f2e 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.shards; -import com.google.common.collect.ImmutableList; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -42,7 +41,7 @@ public class IndicesShardStoreResponseTest extends ESTestCase { @Test public void testBasicSerialization() throws Exception { ImmutableOpenMap.Builder>> indexStoreStatuses = ImmutableOpenMap.builder(); - ImmutableList.Builder failures = ImmutableList.builder(); + List failures = new ArrayList<>(); ImmutableOpenIntMap.Builder> storeStatuses = ImmutableOpenIntMap.builder(); DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); @@ -59,7 +58,7 @@ public class IndicesShardStoreResponseTest extends ESTestCase { failures.add(new IndicesShardStoresResponse.Failure("node1", "test", 3, new NodeDisconnectedException(node1, ""))); - IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), failures.build()); + IndicesShardStoresResponse storesResponse = new IndicesShardStoresResponse(indexStoreStatuses.build(), Collections.unmodifiableList(failures)); XContentBuilder contentBuilder = XContentFactory.jsonBuilder(); contentBuilder.startObject(); storesResponse.toXContent(contentBuilder, ToXContent.EMPTY_PARAMS); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java index d26d0491095..a7e9380d02a 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/GetIndexBackwardsCompatibilityIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.bwcompat; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -33,6 +32,8 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData.Entry; import org.elasticsearch.test.ESBackcompatTestCase; import org.junit.Test; +import java.util.List; + import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; @@ -46,10 +47,10 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase { assertAcked(createIndexResponse); GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.ALIASES) .execute().actionGet(); - ImmutableOpenMap> aliasesMap = getIndexResponse.aliases(); + ImmutableOpenMap> aliasesMap = getIndexResponse.aliases(); assertThat(aliasesMap, notNullValue()); assertThat(aliasesMap.size(), equalTo(1)); - ImmutableList aliasesList = aliasesMap.get("test"); + List aliasesList = aliasesMap.get("test"); assertThat(aliasesList, notNullValue()); assertThat(aliasesList.size(), equalTo(1)); AliasMetaData alias = aliasesList.get(0); @@ -100,10 +101,10 @@ public class GetIndexBackwardsCompatibilityIT extends ESBackcompatTestCase { ensureSearchable("test"); GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices("test").addFeatures(Feature.WARMERS) .execute().actionGet(); - ImmutableOpenMap> warmersMap = getIndexResponse.warmers(); + ImmutableOpenMap> warmersMap = getIndexResponse.warmers(); assertThat(warmersMap, notNullValue()); assertThat(warmersMap.size(), equalTo(1)); - ImmutableList warmersList = warmersMap.get("test"); + List warmersList = warmersMap.get("test"); assertThat(warmersList, notNullValue()); assertThat(warmersList.size(), equalTo(1)); Entry warmer = warmersList.get(0); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index edd6254b511..1aa1602b7e5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; @@ -44,6 +43,7 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; +import java.util.Collections; import java.util.List; import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; @@ -659,14 +659,14 @@ public class ClusterStateDiffIT extends ESIntegTestCase { new SnapshotId(randomName("repo"), randomName("snap")), randomBoolean(), SnapshotsInProgress.State.fromValue((byte) randomIntBetween(0, 6)), - ImmutableList.of(), + Collections.emptyList(), Math.abs(randomLong()), ImmutableMap.of())); case 1: return new RestoreInProgress(new RestoreInProgress.Entry( new SnapshotId(randomName("repo"), randomName("snap")), RestoreInProgress.State.fromValue((byte) randomIntBetween(0, 3)), - ImmutableList.of(), + Collections.emptyList(), ImmutableMap.of())); default: throw new IllegalArgumentException("Shouldn't be here"); diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 84b31e3be30..1ec4c23adda 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -46,8 +46,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; +import java.util.List; import java.util.concurrent.TimeUnit; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; @@ -100,7 +100,7 @@ public class AckIT extends ESIntegTestCase { for (Client client : clients()) { GetWarmersResponse getWarmersResponse = client.admin().indices().prepareGetWarmers().setLocal(true).get(); assertThat(getWarmersResponse.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); + ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); assertThat(entry.key, equalTo("test")); assertThat(entry.value.size(), equalTo(1)); assertThat(entry.value.get(0).name(), equalTo("custom_warmer")); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index 657d33f1c85..c6929d89540 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -38,6 +37,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Test; +import java.util.Collections; import java.util.EnumSet; import static org.elasticsearch.cluster.routing.ShardRoutingState.*; @@ -251,7 +251,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase { assertThat(clusterState.getRoutingNodes().hasUnassigned(), equalTo(false)); // fail shard ShardRouting shardToFail = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0); - clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, ImmutableList.of(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail", null)))).build(); + clusterState = ClusterState.builder(clusterState).routingResult(allocation.applyFailedShards(clusterState, Collections.singletonList(new FailedRerouteAllocation.FailedShard(shardToFail, "test fail", null)))).build(); // verify the reason and details assertThat(clusterState.getRoutingNodes().hasUnassigned(), equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 78083a6b902..ff2ae1051ed 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -35,6 +34,7 @@ import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Test; import java.util.ArrayList; +import java.util.Collections; import static org.elasticsearch.cluster.routing.ShardRoutingState.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -562,7 +562,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); // start another replica shard, while keep one initializing - clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, ImmutableList.of(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))).routingTable()).build(); + clusterState = ClusterState.builder(clusterState).routingTable(allocation.applyStartedShards(clusterState, Collections.singletonList(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).get(0))).routingTable()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index 4e6eabc589c..d405fb1d42c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.structure; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -37,6 +36,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Test; +import java.util.Collections; + import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.*; @@ -46,28 +47,28 @@ public class RoutingIteratorTests extends ESAllocationTestCase { @Test public void testEmptyIterator() { ShardShuffler shuffler = new RotationShardShuffler(0); - ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + ShardIterator shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); - shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(ImmutableList.of())); + shardIterator = new PlainShardIterator(new ShardId("test1", 0), shuffler.shuffle(Collections.emptyList())); assertThat(shardIterator.remaining(), equalTo(0)); assertThat(shardIterator.nextOrNull(), nullValue()); assertThat(shardIterator.remaining(), equalTo(0)); diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index 09c4b800568..da2f918720f 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util; -import com.google.common.collect.ImmutableList; import com.google.common.collect.Iterables; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefArray; @@ -37,7 +36,7 @@ public class CollectionUtilsTests extends ESTestCase { @Test public void rotateEmpty() { - assertTrue(CollectionUtils.rotate(ImmutableList.of(), randomInt()).isEmpty()); + assertTrue(CollectionUtils.rotate(Collections.emptyList(), randomInt()).isEmpty()); } @Test diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 84b206dec68..fecb3d9208c 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -286,8 +286,10 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { NetworkPartition networkPartition = addRandomPartition(); - final String isolatedNode = networkPartition.getMinoritySide().get(0); - final String nonIsolatedNode = networkPartition.getMajoritySide().get(0); + assertEquals(1, networkPartition.getMinoritySide().size()); + final String isolatedNode = networkPartition.getMinoritySide().iterator().next(); + assertEquals(2, networkPartition.getMajoritySide().size()); + final String nonIsolatedNode = networkPartition.getMajoritySide().iterator().next(); // Simulate a network issue between the unlucky node and the rest of the cluster. networkPartition.startDisrupting(); @@ -364,7 +366,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { NetworkPartition networkPartition = addRandomIsolation(isolatedNode); networkPartition.startDisrupting(); - String nonIsolatedNode = networkPartition.getMajoritySide().get(0); + String nonIsolatedNode = networkPartition.getMajoritySide().iterator().next(); // make sure cluster reforms ensureStableCluster(2, nonIsolatedNode); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java index 837a7a060c1..613cdde97e6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.mapper.core; import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -36,6 +35,7 @@ import org.junit.Test; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; @@ -98,8 +98,8 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase { public void facetByTokenCount() throws IOException { init(); - String facetField = randomFrom(ImmutableList.of( - "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")); + String facetField = randomFrom(Arrays.asList( + "foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values")); SearchResponse result = searchByNumericRange(1, 10) .addAggregation(AggregationBuilders.terms("facet").field(facetField)).get(); assertSearchReturns(result, "single", "bulk1", "bulk2", "multi", "multibulk1", "multibulk2"); @@ -166,7 +166,7 @@ public class TokenCountFieldMapperIntegrationIT extends ESIntegTestCase { private SearchRequestBuilder searchByNumericRange(int low, int high) { return prepareSearch().setQuery(QueryBuilders.rangeQuery(randomFrom( - ImmutableList.of("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values") + Arrays.asList("foo.token_count", "foo.token_count_unstored", "foo.token_count_with_doc_values") )).gte(low).lte(high)); } diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java index e6afa4c259c..0ee4ab6329f 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.warmer.IndexWarmersMetaData; @@ -30,6 +29,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.junit.Test; import java.util.Arrays; +import java.util.List; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK; @@ -106,7 +106,7 @@ public class IndicesWarmerBlocksIT extends ESIntegTestCase { GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get(); assertThat(response.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = response.warmers().iterator().next(); + ObjectObjectCursor> entry = response.warmers().iterator().next(); assertThat(entry.key, equalTo("test-blocks")); assertThat(entry.value.size(), equalTo(1)); assertThat(entry.value.iterator().next().name(), equalTo("warmer_block")); diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java index 6179af72d34..687da409b40 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.segments.IndexSegments; @@ -46,6 +45,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; import org.junit.Test; +import java.util.List; import java.util.Locale; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -185,7 +185,7 @@ public class SimpleIndicesWarmerIT extends ESIntegTestCase { GetWarmersResponse getWarmersResponse = client().admin().indices().prepareGetWarmers("test").get(); assertThat(getWarmersResponse.warmers().size(), equalTo(1)); - ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); + ObjectObjectCursor> entry = getWarmersResponse.warmers().iterator().next(); assertThat(entry.key, equalTo("test")); assertThat(entry.value.size(), equalTo(1)); assertThat(entry.value.iterator().next().name(), equalTo("custom_warmer")); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index d00658709bb..aae7800c086 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest; import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; import com.google.common.io.Resources; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; @@ -967,7 +966,7 @@ public class SuggestSearchIT extends ESIntegTestCase { assertAcked(builder.addMapping("type1", mapping)); ensureGreen(); - ImmutableList.Builder titles = ImmutableList.builder(); + List titles = new ArrayList<>(); // We're going to be searching for: // united states house of representatives elections in washington 2006 @@ -1058,7 +1057,7 @@ public class SuggestSearchIT extends ESIntegTestCase { } List builders = new ArrayList<>(); - for (String title: titles.build()) { + for (String title: titles) { builders.add(client().prepareIndex("test", "type1").setSource("title", title)); } indexRandom(true, builders); @@ -1113,7 +1112,7 @@ public class SuggestSearchIT extends ESIntegTestCase { assertAcked(builder.addMapping("type1", mapping)); ensureGreen(); - ImmutableList.Builder titles = ImmutableList.builder(); + List titles = new ArrayList<>(); titles.add("United States House of Representatives Elections in Washington 2006"); titles.add("United States House of Representatives Elections in Washington 2005"); @@ -1123,7 +1122,7 @@ public class SuggestSearchIT extends ESIntegTestCase { titles.add("Election"); List builders = new ArrayList<>(); - for (String title: titles.build()) { + for (String title: titles) { builders.add(client().prepareIndex("test", "type1").setSource("title", title)); } indexRandom(true, builders); diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index cea3233b03d..1cfecf3be15 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -21,7 +21,6 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; -import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; @@ -474,14 +473,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest @Override public void run() { SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); - ImmutableList snapshotStatuses = snapshotsStatusResponse.getSnapshots(); + List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertEquals(snapshotStatuses.size(), 1); logger.trace("current snapshot status [{}]", snapshotStatuses.get(0)); assertTrue(snapshotStatuses.get(0).getState().completed()); } }, 1, TimeUnit.MINUTES); SnapshotsStatusResponse snapshotsStatusResponse = client().admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("test-snap-2").get(); - ImmutableList snapshotStatuses = snapshotsStatusResponse.getSnapshots(); + List snapshotStatuses = snapshotsStatusResponse.getSnapshots(); assertThat(snapshotStatuses.size(), equalTo(1)); SnapshotStatus snapshotStatus = snapshotStatuses.get(0); logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason()); diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 7e282305c89..eb388387ea3 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -20,7 +20,6 @@ package org.elasticsearch.snapshots; import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; @@ -73,6 +72,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -1791,9 +1791,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas shards.put(new ShardId("test-idx", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED)); shards.put(new ShardId("test-idx", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED)); - ImmutableList.Builder entries = ImmutableList.builder(); - entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, ImmutableList.of("test-idx"), System.currentTimeMillis(), shards.build())); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(entries.build())).build(); + List entries = new ArrayList<>(); + entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, State.ABORTED, Collections.singletonList("test-idx"), System.currentTimeMillis(), shards.build())); + return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))).build(); } @Override diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java index 76f057a60ff..8e9d7cb8428 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotUtilsTests.java @@ -18,14 +18,13 @@ */ package org.elasticsearch.snapshots; -import com.google.common.collect.ImmutableList; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.test.ESTestCase; import org.junit.Test; +import java.util.Arrays; import java.util.List; -import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; /** @@ -52,7 +51,7 @@ public class SnapshotUtilsTests extends ESTestCase { } private void assertIndexNameFiltering(String[] indices, String[] filter, IndicesOptions indicesOptions, String[] expected) { - List indicesList = ImmutableList.copyOf(indices); + List indicesList = Arrays.asList(indices); List actual = SnapshotUtils.filterIndices(indicesList, filter, indicesOptions); assertThat(actual, containsInAnyOrder(expected)); } diff --git a/core/src/test/java/org/elasticsearch/test/VersionUtils.java b/core/src/test/java/org/elasticsearch/test/VersionUtils.java index 316a3926d5d..a3efe63a02a 100644 --- a/core/src/test/java/org/elasticsearch/test/VersionUtils.java +++ b/core/src/test/java/org/elasticsearch/test/VersionUtils.java @@ -19,7 +19,6 @@ package org.elasticsearch.test; -import com.google.common.collect.ImmutableList; import org.elasticsearch.Version; import java.lang.reflect.Field; @@ -53,11 +52,11 @@ public class VersionUtils { } List idList = new ArrayList<>(ids); Collections.sort(idList); - ImmutableList.Builder version = ImmutableList.builder(); + List version = new ArrayList<>(); for (Integer integer : idList) { version.add(Version.fromId(integer)); } - SORTED_VERSIONS = version.build(); + SORTED_VERSIONS = Collections.unmodifiableList(version); } /** Returns immutable list of all known versions. */ diff --git a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java index 174e83e15a4..88bcb9024a1 100644 --- a/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java +++ b/core/src/test/java/org/elasticsearch/test/disruption/NetworkPartition.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.test.disruption; -import com.google.common.collect.ImmutableList; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -27,6 +26,8 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.TransportService; +import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Random; @@ -68,15 +69,15 @@ public abstract class NetworkPartition implements ServiceDisruptionScheme { } - public List getNodesSideOne() { - return ImmutableList.copyOf(nodesSideOne); + public Collection getNodesSideOne() { + return Collections.unmodifiableCollection(nodesSideOne); } - public List getNodesSideTwo() { - return ImmutableList.copyOf(nodesSideTwo); + public Collection getNodesSideTwo() { + return Collections.unmodifiableCollection(nodesSideTwo); } - public List getMajoritySide() { + public Collection getMajoritySide() { if (nodesSideOne.size() >= nodesSideTwo.size()) { return getNodesSideOne(); } else { @@ -84,7 +85,7 @@ public abstract class NetworkPartition implements ServiceDisruptionScheme { } } - public List getMinoritySide() { + public Collection getMinoritySide() { if (nodesSideOne.size() >= nodesSideTwo.size()) { return getNodesSideTwo(); } else { diff --git a/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java index 2a49cd4c594..ae53366657e 100644 --- a/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java +++ b/core/src/test/java/org/elasticsearch/test/rest/section/ApiCallSection.java @@ -19,11 +19,11 @@ package org.elasticsearch.test.rest.section; import com.google.common.base.Joiner; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -58,7 +58,7 @@ public class ApiCallSection { } public List> getBodies() { - return ImmutableList.copyOf(bodies); + return Collections.unmodifiableList(bodies); } public void addBody(Map body) { From 6520395c1cc8f6cab4895047f8d0f21fc4a5f686 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 19 Aug 2015 09:37:35 -0700 Subject: [PATCH 02/51] tweaks --- .../action/admin/cluster/health/ClusterIndexHealth.java | 2 -- .../java/org/elasticsearch/bootstrap/JNAKernel32Library.java | 2 +- .../java/org/elasticsearch/transport/netty/NettyTransport.java | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java index bd65211d2d3..5637871547e 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterIndexHealth.java @@ -23,7 +23,6 @@ import com.google.common.collect.Maps; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -33,7 +32,6 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.util.Arrays; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Locale; diff --git a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java index a500e1784dd..17923433ba0 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/JNAKernel32Library.java @@ -86,7 +86,7 @@ class JNAKernel32Library { } List getCallbacks() { - return Collections.unmodifiableList(new ArrayList(callbacks)); + return Collections.unmodifiableList(callbacks); } /** diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index de7f91aa094..fdf2552eb41 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -1107,7 +1107,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem newAllChannels.addAll(Arrays.asList(reg)); newAllChannels.addAll(Arrays.asList(state)); newAllChannels.addAll(Arrays.asList(ping)); - this.allChannels = Collections.unmodifiableList(allChannels); + this.allChannels = Collections.unmodifiableList(allChannels ); } public boolean hasChannel(Channel channel) { From 0a4d6b3a1d40335415340e753ee48ccbff8ac32c Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 20 Aug 2015 16:13:14 -0400 Subject: [PATCH 03/51] Enforce supported Maven versions Our builds are currently not compatible with Maven versions < 3.1.0 and >= 3.3.0. This commit will enforce Maven 3 versions that our builds are known to be compatible with. We will consider these our supported Maven versions. Closes #13022 --- pom.xml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pom.xml b/pom.xml index d82b7cab6f9..636ad1fd844 100644 --- a/pom.xml +++ b/pom.xml @@ -542,6 +542,19 @@ + + enforce-maven-version + + enforce + + + + + [3.1.0,3.3.0) + + + + print-versions validate From 57a83fce07bac77ccf787c682e5644999fefcc5b Mon Sep 17 00:00:00 2001 From: Jim Hooker Date: Wed, 19 Aug 2015 22:15:45 +0100 Subject: [PATCH 04/51] Turn DestructiveOperations.java into a Guice module. https://github.com/elastic/elasticsearch/issues/4665 Inject DestructiveOperations object rather than use new. Use constant rather than hard-coded string --- .../java/org/elasticsearch/action/ActionModule.java | 2 ++ .../indices/close/TransportCloseIndexAction.java | 4 ++-- .../indices/delete/TransportDeleteIndexAction.java | 4 ++-- .../indices/open/TransportOpenIndexAction.java | 5 +++-- .../action/support/DestructiveOperations.java | 13 ++++++------- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index a4b71626fa8..4196c695bff 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -156,6 +156,7 @@ import org.elasticsearch.action.suggest.TransportSuggestAction; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; +import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.termvectors.*; import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction; @@ -223,6 +224,7 @@ public class ActionModule extends AbstractModule { } bind(ActionFilters.class).asEagerSingleton(); bind(AutoCreateIndex.class).asEagerSingleton(); + bind(DestructiveOperations.class).asEagerSingleton(); registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 6eb0c0665c1..e4793027559 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -48,10 +48,10 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction Date: Sat, 22 Aug 2015 23:36:13 -0300 Subject: [PATCH 05/51] FunctionScore should work on unampped fields when missing parameter is specified --- .../function/FieldValueFactorFunction.java | 17 ++++++++++++++--- .../FieldValueFactorFunctionParser.java | 12 +++++++++--- .../FunctionScoreFieldValueIT.java | 9 +++++++++ 3 files changed, 32 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 488c5fec603..e6d1362f9e7 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -54,13 +54,24 @@ public class FieldValueFactorFunction extends ScoreFunction { @Override public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) { - final SortedNumericDoubleValues values = this.indexFieldData.load(ctx).getDoubleValues(); + final SortedNumericDoubleValues values; + if(indexFieldData == null) { + values = null; + } else { + values = this.indexFieldData.load(ctx).getDoubleValues(); + } + return new LeafScoreFunction() { @Override public double score(int docId, float subQueryScore) { - values.setDocument(docId); - final int numValues = values.count(); + final int numValues; + if(values == null){ + numValues = 0; + } else { + values.setDocument(docId); + numValues = values.count(); + } double value; if (numValues > 0) { value = values.valueAt(0); diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java index e6a8f2dabcc..6f68db58aca 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java @@ -19,11 +19,13 @@ package org.elasticsearch.index.query.functionscore.fieldvaluefactor; +import org.apache.lucene.document.FieldType; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.search.function.FieldValueFactorFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.fielddata.plain.DoubleArrayIndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryParseContext; @@ -86,11 +88,15 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { SearchContext searchContext = SearchContext.current(); MappedFieldType fieldType = searchContext.mapperService().smartNameFieldType(field); + IndexNumericFieldData fieldData = null; if (fieldType == null) { - throw new ElasticsearchException("Unable to find a field mapper for field [" + field + "]"); + if(missing == null) { + throw new ElasticsearchException("Unable to find a field mapper for field [" + field + "]. No 'missing' value defined."); + } + } else { + fieldData = searchContext.fieldData().getForField(fieldType); } - return new FieldValueFactorFunction(field, boostFactor, modifier, missing, - (IndexNumericFieldData)searchContext.fieldData().getForField(fieldType)); + return new FieldValueFactorFunction(field, boostFactor, modifier, missing, fieldData); } @Override diff --git a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java index 577b3eaea31..24d06701b40 100644 --- a/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java +++ b/core/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueIT.java @@ -104,6 +104,15 @@ public class FunctionScoreFieldValueIT extends ESIntegTestCase { .get(); assertOrderedSearchHits(response, "1", "2", "3"); + // field is not mapped but we're defaulting it to 100 so all documents should have the same score + response = client().prepareSearch("test") + .setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), + fieldValueFactorFunction("notmapped").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100))) + .get(); + assertEquals(response.getHits().getAt(0).score(), response.getHits().getAt(2).score(), 0); + + // n divided by 0 is infinity, which should provoke an exception. try { response = client().prepareSearch("test") From 60ab72e32246cd301b194faaebff91f7c25ef6a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Carvalho?= Date: Mon, 24 Aug 2015 09:19:45 -0300 Subject: [PATCH 06/51] using emptySortedNumericDoubles instead of null in score function --- .../search/function/FieldValueFactorFunction.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index e6d1362f9e7..38c2cc98a2e 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -56,7 +57,7 @@ public class FieldValueFactorFunction extends ScoreFunction { public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) { final SortedNumericDoubleValues values; if(indexFieldData == null) { - values = null; + values = FieldData.emptySortedNumericDoubles(0); } else { values = this.indexFieldData.load(ctx).getDoubleValues(); } @@ -65,13 +66,8 @@ public class FieldValueFactorFunction extends ScoreFunction { @Override public double score(int docId, float subQueryScore) { - final int numValues; - if(values == null){ - numValues = 0; - } else { - values.setDocument(docId); - numValues = values.count(); - } + values.setDocument(docId); + final int numValues = values.count(); double value; if (numValues > 0) { value = values.valueAt(0); From c2c4742f6543991dc06f106b45d96f3a03eb4e7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Carvalho?= Date: Mon, 24 Aug 2015 10:02:18 -0300 Subject: [PATCH 07/51] proper creation of values for unmapped field on score function --- .../common/lucene/search/function/FieldValueFactorFunction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 38c2cc98a2e..cb2babb574f 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -57,7 +57,7 @@ public class FieldValueFactorFunction extends ScoreFunction { public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) { final SortedNumericDoubleValues values; if(indexFieldData == null) { - values = FieldData.emptySortedNumericDoubles(0); + values = FieldData.emptySortedNumericDoubles(ctx.reader().maxDoc()); } else { values = this.indexFieldData.load(ctx).getDoubleValues(); } From 01cfa95d1c33ac8aa222cceef77f4fbdd3086225 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Mon, 24 Aug 2015 21:22:21 -0700 Subject: [PATCH 08/51] do real copies where copyOf was used before --- .../cluster/routing/IndexShardRoutingTable.java | 2 +- .../org/elasticsearch/gateway/DanglingIndicesState.java | 2 +- .../java/org/elasticsearch/index/shard/CommitPoint.java | 5 +++-- .../java/org/elasticsearch/index/shard/CommitPoints.java | 3 ++- .../snapshots/blobstore/BlobStoreIndexShardSnapshot.java | 3 ++- .../snapshots/blobstore/BlobStoreIndexShardSnapshots.java | 6 +++--- .../java/org/elasticsearch/snapshots/SnapshotUtils.java | 2 +- 7 files changed, 13 insertions(+), 10 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index e1aa7a085d3..902628f3d7e 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -611,7 +611,7 @@ public class IndexShardRoutingTable implements Iterable { } public IndexShardRoutingTable build() { - return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(shards)); + return new IndexShardRoutingTable(shardId, Collections.unmodifiableList(new ArrayList<>(shards))); } public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 983fcf73b3f..1ccdd55dad7 100644 --- a/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/core/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -141,7 +141,7 @@ public class DanglingIndicesState extends AbstractComponent { return; } try { - allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(danglingIndices.values()), new LocalAllocateDangledIndices.Listener() { + allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), new LocalAllocateDangledIndices.Listener() { @Override public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java index 6afa0c96e5f..916cf563fb8 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoint.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.shard; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.store.StoreFileMetaData; +import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -89,8 +90,8 @@ public class CommitPoint { this.version = version; this.name = name; this.type = type; - this.indexFiles = Collections.unmodifiableList(indexFiles); - this.translogFiles = Collections.unmodifiableList(translogFiles); + this.indexFiles = Collections.unmodifiableList(new ArrayList<>(indexFiles)); + this.translogFiles = Collections.unmodifiableList(new ArrayList<>(translogFiles)); } public long version() { diff --git a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java index 9f4a2aa96ce..403bd4d6212 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java +++ b/core/src/main/java/org/elasticsearch/index/shard/CommitPoints.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.Iterator; @@ -46,7 +47,7 @@ public class CommitPoints implements Iterable { return (o2.version() < o1.version() ? -1 : (o2.version() == o1.version() ? 0 : 1)); } }); - this.commitPoints = Collections.unmodifiableList(commitPoints); + this.commitPoints = Collections.unmodifiableList(new ArrayList<>(commitPoints)); } public List commits() { diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java index 282e869ef01..afc4db597c8 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshot.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.*; import org.elasticsearch.index.store.StoreFileMetaData; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -342,7 +343,7 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil assert indexVersion >= 0; this.snapshot = snapshot; this.indexVersion = indexVersion; - this.indexFiles = Collections.unmodifiableList(indexFiles); + this.indexFiles = Collections.unmodifiableList(new ArrayList<>(indexFiles)); this.startTime = startTime; this.time = time; this.numberOfFiles = numberOfFiles; diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java index fa06dff558d..098dd5208b0 100644 --- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java +++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardSnapshots.java @@ -51,7 +51,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To private final ImmutableMap> physicalFiles; public BlobStoreIndexShardSnapshots(List shardSnapshots) { - this.shardSnapshots = Collections.unmodifiableList(shardSnapshots); + this.shardSnapshots = Collections.unmodifiableList(new ArrayList<>(shardSnapshots)); // Map between blob names and file info Map newFiles = newHashMap(); // Map between original physical names and file info @@ -77,7 +77,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To } ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); + mapBuilder.put(entry.getKey(), Collections.unmodifiableList(new ArrayList<>(entry.getValue()))); } this.physicalFiles = mapBuilder.build(); this.files = ImmutableMap.copyOf(newFiles); @@ -99,7 +99,7 @@ public class BlobStoreIndexShardSnapshots implements Iterable, To } ImmutableMap.Builder> mapBuilder = ImmutableMap.builder(); for (Map.Entry> entry : physicalFiles.entrySet()) { - mapBuilder.put(entry.getKey(), Collections.unmodifiableList(entry.getValue())); + mapBuilder.put(entry.getKey(), Collections.unmodifiableList(new ArrayList<>(entry.getValue()))); } this.physicalFiles = mapBuilder.build(); } diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java index feda0276bd9..0f76fbdfc7f 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java +++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotUtils.java @@ -116,7 +116,7 @@ public class SnapshotUtils { } } if (result == null) { - return Arrays.asList(selectedIndices); + return Collections.unmodifiableList(new ArrayList<>(Arrays.asList(selectedIndices))); } return Collections.unmodifiableList(new ArrayList<>(result)); } From 05aa1d90b84697f9a489b03f50fd66192fdadba7 Mon Sep 17 00:00:00 2001 From: Jim Hooker Date: Sat, 29 Aug 2015 07:43:17 +0100 Subject: [PATCH 09/51] Extend AbstractComponent and remove logger --- .../action/support/DestructiveOperations.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 5452a4a7519..b73ee8a75fd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -19,27 +19,25 @@ package org.elasticsearch.action.support; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.settings.NodeSettingsService; /** * Helper for dealing with destructive operations and wildcard usage. */ -public final class DestructiveOperations implements NodeSettingsService.Listener { +public final class DestructiveOperations extends AbstractComponent implements NodeSettingsService.Listener { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ public static final String REQUIRES_NAME = "action.destructive_requires_name"; - - private final ESLogger logger = Loggers.getLogger(DestructiveOperations.class); private volatile boolean destructiveRequiresName; @Inject public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) { + super(settings); destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false); nodeSettingsService.addListener(this); } From 1c85b686745bc6db37292f10e5f18d76d4578f5a Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sat, 29 Aug 2015 17:21:46 -0400 Subject: [PATCH 10/51] Don't document expert segment merge settings --- docs/reference/index-modules.asciidoc | 6 - docs/reference/index-modules/merge.asciidoc | 113 ------------------ docs/reference/search/request/scroll.asciidoc | 2 +- .../suggesters/completion-suggest.asciidoc | 4 +- 4 files changed, 3 insertions(+), 122 deletions(-) delete mode 100644 docs/reference/index-modules/merge.asciidoc diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 075c460321f..7d2a5ab03db 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -152,10 +152,6 @@ Other index settings are available in index modules: Enable or disable dynamic mapping for an index. -<>:: - - Control over how shards are merged by the background merge process. - <>:: Configure custom similarity settings to customize how search results are @@ -181,8 +177,6 @@ include::index-modules/allocation.asciidoc[] include::index-modules/mapper.asciidoc[] -include::index-modules/merge.asciidoc[] - include::index-modules/similarity.asciidoc[] include::index-modules/slowlog.asciidoc[] diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc deleted file mode 100644 index ac6de1d3a9e..00000000000 --- a/docs/reference/index-modules/merge.asciidoc +++ /dev/null @@ -1,113 +0,0 @@ -[[index-modules-merge]] -== Merge - -experimental[All of the settings exposed in the `merge` module are expert only and may be removed in the future] - -A shard in elasticsearch is a Lucene index, and a Lucene index is broken -down into segments. Segments are internal storage elements in the index -where the index data is stored, and are immutable up to delete markers. -Segments are, periodically, merged into larger segments to keep the -index size at bay and expunge deletes. - -Merges segments of approximately equal size, subject to an allowed -number of segments per tier. The merge policy is able to merge -non-adjacent segments, and separates how many segments are merged at once from how many -segments are allowed per tier. It also does not over-merge (i.e., cascade merges). - -[float] -[[merge-settings]] -=== Merge policy settings - -All merge policy settings are _dynamic_ and can be updated on a live index. -The merge policy has the following settings: - -`index.merge.policy.expunge_deletes_allowed`:: - - When expungeDeletes is called, we only merge away a segment if its delete - percentage is over this threshold. Default is `10`. - -`index.merge.policy.floor_segment`:: - - Segments smaller than this are "rounded up" to this size, i.e. treated as - equal (floor) size for merge selection. This is to prevent frequent - flushing of tiny segments, thus preventing a long tail in the index. Default - is `2mb`. - -`index.merge.policy.max_merge_at_once`:: - - Maximum number of segments to be merged at a time during "normal" merging. - Default is `10`. - -`index.merge.policy.max_merge_at_once_explicit`:: - - Maximum number of segments to be merged at a time, during optimize or - expungeDeletes. Default is `30`. - -`index.merge.policy.max_merged_segment`:: - - Maximum sized segment to produce during normal merging (not explicit - optimize). This setting is approximate: the estimate of the merged segment - size is made by summing sizes of to-be-merged segments (compensating for - percent deleted docs). Default is `5gb`. - -`index.merge.policy.segments_per_tier`:: - - Sets the allowed number of segments per tier. Smaller values mean more - merging but fewer segments. Default is `10`. Note, this value needs to be - >= than the `max_merge_at_once` otherwise you'll force too many merges to - occur. - -`index.merge.policy.reclaim_deletes_weight`:: - - Controls how aggressively merges that reclaim more deletions are favored. - Higher values favor selecting merges that reclaim deletions. A value of - `0.0` means deletions don't impact merge selection. Defaults to `2.0`. - -For normal merging, the policy first computes a "budget" of how many -segments are allowed to be in the index. If the index is over-budget, -then the policy sorts segments by decreasing size (proportionally considering percent -deletes), and then finds the least-cost merge. Merge cost is measured by -a combination of the "skew" of the merge (size of largest seg divided by -smallest seg), total merge size and pct deletes reclaimed, so that -merges with lower skew, smaller size and those reclaiming more deletes, -are favored. - -If a merge will produce a segment that's larger than -`max_merged_segment` then the policy will merge fewer segments (down to -1 at once, if that one has deletions) to keep the segment size under -budget. - -Note, this can mean that for large shards that holds many gigabytes of -data, the default of `max_merged_segment` (`5gb`) can cause for many -segments to be in an index, and causing searches to be slower. Use the -indices segments API to see the segments that an index has, and -possibly either increase the `max_merged_segment` or issue an optimize -call for the index (try and aim to issue it on a low traffic time). - -[float] -[[merge-scheduling]] -=== Merge scheduling - -The merge scheduler (ConcurrentMergeScheduler) controls the execution of -merge operations once they are needed (according to the merge policy). Merges -run in separate threads, and when the maximum number of threads is reached, -further merges will wait until a merge thread becomes available. - -The merge scheduler supports the following _dynamic_ settings: - -`index.merge.scheduler.max_thread_count`:: - - The maximum number of threads that may be merging at once. Defaults to - `Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2))` - which works well for a good solid-state-disk (SSD). If your index is on - spinning platter drives instead, decrease this to 1. - -`index.merge.scheduler.auto_throttle`:: - - If this is true (the default), then the merge scheduler will rate-limit IO - (writes) for merges to an adaptive value depending on how many merges are - requested over time. An application with a low indexing rate that - unluckily suddenly requires a large merge will see that merge aggressively - throttled, while an application doing heavy indexing will see the throttle - move higher to allow merges to keep up with ongoing indexing. - diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 2ad1f57388e..29214415bbf 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -115,7 +115,7 @@ process all data -- it just needs to be long enough to process the previous batch of results. Each `scroll` request (with the `scroll` parameter) sets a new expiry time. -Normally, the <> optimizes the +Normally, the background merge process optimizes the index by merging together smaller segments to create new bigger segments, at which time the smaller segments are deleted. This process continues during scrolling, but an open search context prevents the old segments from being diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 3b4f70f3e89..ee8969b95e5 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -136,8 +136,8 @@ payloads or weights. This form does still work inside of multi fields. NOTE: The suggest data structure might not reflect deletes on documents immediately. You may need to do an <> for that. -You can call optimize with the `only_expunge_deletes=true` to only cater for deletes -or alternatively call a <> operation. +You can call optimize with the `only_expunge_deletes=true` to only target +deletions for merging. [[querying]] ==== Querying From 7ad2222ccc933cc68997e47e1f34b668fe2a8aed Mon Sep 17 00:00:00 2001 From: Michael McCandless Date: Sun, 30 Aug 2015 18:14:47 -0400 Subject: [PATCH 11/51] copy over merge docs as javadocs --- .../index/shard/MergePolicyConfig.java | 88 ++++++++++++++++++- .../index/shard/MergeSchedulerConfig.java | 25 +++++- 2 files changed, 111 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java b/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java index cf4a4c85d10..3895bbed2c4 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java +++ b/core/src/main/java/org/elasticsearch/index/shard/MergePolicyConfig.java @@ -28,6 +28,92 @@ import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.settings.IndexSettingsService; +/** + * A shard in elasticsearch is a Lucene index, and a Lucene index is broken + * down into segments. Segments are internal storage elements in the index + * where the index data is stored, and are immutable up to delete markers. + * Segments are, periodically, merged into larger segments to keep the + * index size at bay and expunge deletes. + * + *

+ * Merges select segments of approximately equal size, subject to an allowed + * number of segments per tier. The merge policy is able to merge + * non-adjacent segments, and separates how many segments are merged at once from how many + * segments are allowed per tier. It also does not over-merge (i.e., cascade merges). + * + *

+ * All merge policy settings are dynamic and can be updated on a live index. + * The merge policy has the following settings: + * + *

    + *
  • index.merge.policy.expunge_deletes_allowed: + * + * When expungeDeletes is called, we only merge away a segment if its delete + * percentage is over this threshold. Default is 10. + * + *
  • index.merge.policy.floor_segment: + * + * Segments smaller than this are "rounded up" to this size, i.e. treated as + * equal (floor) size for merge selection. This is to prevent frequent + * flushing of tiny segments, thus preventing a long tail in the index. Default + * is 2mb. + * + *
  • index.merge.policy.max_merge_at_once: + * + * Maximum number of segments to be merged at a time during "normal" merging. + * Default is 10. + * + *
  • index.merge.policy.max_merge_at_once_explicit: + * + * Maximum number of segments to be merged at a time, during optimize or + * expungeDeletes. Default is 30. + * + *
  • index.merge.policy.max_merged_segment: + * + * Maximum sized segment to produce during normal merging (not explicit + * optimize). This setting is approximate: the estimate of the merged segment + * size is made by summing sizes of to-be-merged segments (compensating for + * percent deleted docs). Default is 5gb. + * + *
  • index.merge.policy.segments_per_tier: + * + * Sets the allowed number of segments per tier. Smaller values mean more + * merging but fewer segments. Default is 10. Note, this value needs to be + * >= than the max_merge_at_once otherwise you'll force too many merges to + * occur. + * + *
  • index.merge.policy.reclaim_deletes_weight: + * + * Controls how aggressively merges that reclaim more deletions are favored. + * Higher values favor selecting merges that reclaim deletions. A value of + * 0.0 means deletions don't impact merge selection. Defaults to 2.0. + *
+ * + *

+ * For normal merging, the policy first computes a "budget" of how many + * segments are allowed to be in the index. If the index is over-budget, + * then the policy sorts segments by decreasing size (proportionally considering percent + * deletes), and then finds the least-cost merge. Merge cost is measured by + * a combination of the "skew" of the merge (size of largest seg divided by + * smallest seg), total merge size and pct deletes reclaimed, so that + * merges with lower skew, smaller size and those reclaiming more deletes, + * are favored. + * + *

+ * If a merge will produce a segment that's larger than + * max_merged_segment then the policy will merge fewer segments (down to + * 1 at once, if that one has deletions) to keep the segment size under + * budget. + * + *

+ * Note, this can mean that for large shards that holds many gigabytes of + * data, the default of max_merged_segment (5gb) can cause for many + * segments to be in an index, and causing searches to be slower. Use the + * indices segments API to see the segments that an index has, and + * possibly either increase the max_merged_segment or issue an optimize + * call for the index (try and aim to issue it on a low traffic time). + */ + public final class MergePolicyConfig implements IndexSettingsService.Listener{ private final TieredMergePolicy mergePolicy = new TieredMergePolicy(); private final ESLogger logger; @@ -187,4 +273,4 @@ public final class MergePolicyConfig implements IndexSettingsService.Listener{ return Double.toString(ratio); } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java b/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java index 9c8aba25ee3..f061a95f2af 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java +++ b/core/src/main/java/org/elasticsearch/index/shard/MergeSchedulerConfig.java @@ -24,7 +24,30 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; /** - * + * The merge scheduler (ConcurrentMergeScheduler) controls the execution of + * merge operations once they are needed (according to the merge policy). Merges + * run in separate threads, and when the maximum number of threads is reached, + * further merges will wait until a merge thread becomes available. + * + *

The merge scheduler supports the following dynamic settings: + * + *

    + *
  • index.merge.scheduler.max_thread_count: + * + * The maximum number of threads that may be merging at once. Defaults to + * Math.max(1, Math.min(4, Runtime.getRuntime().availableProcessors() / 2)) + * which works well for a good solid-state-disk (SSD). If your index is on + * spinning platter drives instead, decrease this to 1. + * + *
  • index.merge.scheduler.auto_throttle: + * + * If this is true (the default), then the merge scheduler will rate-limit IO + * (writes) for merges to an adaptive value depending on how many merges are + * requested over time. An application with a low indexing rate that + * unluckily suddenly requires a large merge will see that merge aggressively + * throttled, while an application doing heavy indexing will see the throttle + * move higher to allow merges to keep up with ongoing indexing. + *
*/ public final class MergeSchedulerConfig { From c01b377ea844e66efae63d1cc017dbcb5ad79408 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 26 Aug 2015 14:32:52 -0700 Subject: [PATCH 12/51] Mappings: Fix numerous checks for equality and compatibility The field type tests for mappings had a huge hole: check compatibility was not tested directly at all! I had meant for this to happen in a follow up after #8871, and was relying on existing mapping tests. However, there were a number of issues. This change reworks the fieldtype tests to be able to check all settable properties on a field type work with checkCompatibility. It fixes a handful of small bugs in various field types. In particular, analyzer comparison was just wrong: it was comparing reference equality for search analyzer instead of the analyzer name. There was also no check for search quote analyzer. closes #13112 --- .../index/analysis/NamedAnalyzer.java | 15 + .../index/mapper/MappedFieldType.java | 52 ++-- .../index/mapper/core/BinaryFieldMapper.java | 9 + .../mapper/core/CompletionFieldMapper.java | 30 +- .../index/mapper/geo/GeoPointFieldMapper.java | 16 +- .../index/mapper/geo/GeoShapeFieldMapper.java | 17 +- .../internal/FieldNamesFieldMapper.java | 1 + .../index/mapper/FieldTypeLookupTests.java | 4 +- .../index/mapper/FieldTypeTestCase.java | 281 +++++++++++++++--- .../mapper/core/BinaryFieldTypeTests.java | 22 +- .../mapper/core/BooleanFieldTypeTests.java | 7 +- .../index/mapper/core/ByteFieldTypeTests.java | 7 +- .../mapper/core/CompletionFieldTypeTests.java | 45 ++- .../index/mapper/core/DateFieldTypeTests.java | 40 +-- .../mapper/core/DoubleFieldTypeTests.java | 7 +- .../mapper/core/FloatFieldTypeTests.java | 7 +- .../mapper/core/IntegerFieldTypeTests.java | 7 +- .../index/mapper/core/LongFieldTypeTests.java | 7 +- .../mapper/core/ShortFieldTypeTests.java | 7 +- .../mapper/geo/GeoPointFieldMapperTests.java | 5 +- .../mapper/geo/GeoPointFieldTypeTests.java | 44 ++- .../mapper/geo/GeoShapeFieldMapperTests.java | 8 +- .../mapper/geo/GeoShapeFieldTypeTests.java | 60 ++-- .../internal/FieldNamesFieldTypeTests.java | 22 +- .../merge/JavaMultiFieldMergeTests.java | 8 +- .../string/SimpleStringMappingTests.java | 2 +- .../timestamp/TimestampMappingTests.java | 13 +- .../update/UpdateMappingOnClusterIT.java | 16 +- 28 files changed, 552 insertions(+), 207 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java index f3a50390667..25ff8f96834 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/NamedAnalyzer.java @@ -22,6 +22,8 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; +import java.util.Objects; + /** * Named analyzer is an analyzer wrapper around an actual analyzer ({@link #analyzer} that is associated * with a name ({@link #name()}. @@ -104,4 +106,17 @@ public class NamedAnalyzer extends DelegatingAnalyzerWrapper { throw new IllegalStateException("NamedAnalyzer cannot be wrapped with a wrapper, only a delegator"); } }; + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof NamedAnalyzer)) return false; + NamedAnalyzer that = (NamedAnalyzer) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index 65113e61033..112a8f4b480 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -192,13 +192,24 @@ public abstract class MappedFieldType extends FieldType { public boolean equals(Object o) { if (!super.equals(o)) return false; MappedFieldType fieldType = (MappedFieldType) o; + // check similarity first because we need to check the name, and it might be null + // TODO: SimilarityProvider should have equals? + if (similarity == null || fieldType.similarity == null) { + if (similarity != fieldType.similarity) { + return false; + } + } else { + if (Objects.equals(similarity.name(), fieldType.similarity.name()) == false) { + return false; + } + } + return boost == fieldType.boost && docValues == fieldType.docValues && Objects.equals(names, fieldType.names) && Objects.equals(indexAnalyzer, fieldType.indexAnalyzer) && Objects.equals(searchAnalyzer, fieldType.searchAnalyzer) && Objects.equals(searchQuoteAnalyzer(), fieldType.searchQuoteAnalyzer()) && - Objects.equals(similarity, fieldType.similarity) && Objects.equals(normsLoading, fieldType.normsLoading) && Objects.equals(fieldDataType, fieldType.fieldDataType) && Objects.equals(nullValue, fieldType.nullValue) && @@ -207,10 +218,11 @@ public abstract class MappedFieldType extends FieldType { @Override public int hashCode() { - return Objects.hash(super.hashCode(), names, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, similarity, normsLoading, fieldDataType, nullValue, nullValueAsString); + return Objects.hash(super.hashCode(), names, boost, docValues, indexAnalyzer, searchAnalyzer, searchQuoteAnalyzer, + similarity == null ? null : similarity.name(), normsLoading, fieldDataType, nullValue, nullValueAsString); } -// norelease: we need to override freeze() and add safety checks that all settings are actually set + // norelease: we need to override freeze() and add safety checks that all settings are actually set /** Returns the name of this type, as would be specified in mapping properties */ public abstract String typeName(); @@ -234,51 +246,48 @@ public abstract class MappedFieldType extends FieldType { boolean mergeWithIndexed = other.indexOptions() != IndexOptions.NONE; // TODO: should be validating if index options go "up" (but "down" is ok) if (indexed != mergeWithIndexed || tokenized() != other.tokenized()) { - conflicts.add("mapper [" + names().fullName() + "] has different index values"); + conflicts.add("mapper [" + names().fullName() + "] has different [index] values"); } if (stored() != other.stored()) { - conflicts.add("mapper [" + names().fullName() + "] has different store values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store] values"); } if (hasDocValues() == false && other.hasDocValues()) { // don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitly set // when the doc_values field data format is configured - conflicts.add("mapper [" + names().fullName() + "] has different doc_values values"); + conflicts.add("mapper [" + names().fullName() + "] has different [doc_values] values, cannot change from disabled to enabled"); } if (omitNorms() && !other.omitNorms()) { - conflicts.add("mapper [" + names().fullName() + "] cannot enable norms (`norms.enabled`)"); - } - if (tokenized() != other.tokenized()) { - conflicts.add("mapper [" + names().fullName() + "] has different tokenize values"); + conflicts.add("mapper [" + names().fullName() + "] has different [omit_norms] values, cannot change from disable to enabled"); } if (storeTermVectors() != other.storeTermVectors()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector] values"); } if (storeTermVectorOffsets() != other.storeTermVectorOffsets()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector_offsets values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector_offsets] values"); } if (storeTermVectorPositions() != other.storeTermVectorPositions()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector_positions values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector_positions] values"); } if (storeTermVectorPayloads() != other.storeTermVectorPayloads()) { - conflicts.add("mapper [" + names().fullName() + "] has different store_term_vector_payloads values"); + conflicts.add("mapper [" + names().fullName() + "] has different [store_term_vector_payloads] values"); } // null and "default"-named index analyzers both mean the default is used if (indexAnalyzer() == null || "default".equals(indexAnalyzer().name())) { if (other.indexAnalyzer() != null && "default".equals(other.indexAnalyzer().name()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different analyzer"); + conflicts.add("mapper [" + names().fullName() + "] has different [analyzer]"); } } else if (other.indexAnalyzer() == null || "default".equals(other.indexAnalyzer().name())) { - conflicts.add("mapper [" + names().fullName() + "] has different analyzer"); + conflicts.add("mapper [" + names().fullName() + "] has different [analyzer]"); } else if (indexAnalyzer().name().equals(other.indexAnalyzer().name()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different analyzer"); + conflicts.add("mapper [" + names().fullName() + "] has different [analyzer]"); } if (!names().indexName().equals(other.names().indexName())) { - conflicts.add("mapper [" + names().fullName() + "] has different index_name"); + conflicts.add("mapper [" + names().fullName() + "] has different [index_name]"); } if (Objects.equals(similarity(), other.similarity()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different similarity"); + conflicts.add("mapper [" + names().fullName() + "] has different [similarity]"); } if (strict) { @@ -289,11 +298,14 @@ public abstract class MappedFieldType extends FieldType { conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types."); } if (normsLoading() != other.normsLoading()) { - conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [norms].loading across all types."); + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [norms.loading] across all types."); } if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) { conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types."); } + if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) { + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types."); + } if (Objects.equals(fieldDataType(), other.fieldDataType()) == false) { conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [fielddata] across all types."); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index d225b3f6ae6..78d038526b3 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -134,6 +134,15 @@ public class BinaryFieldMapper extends FieldMapper { return CONTENT_TYPE; } + @Override + public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { + super.checkCompatibility(fieldType, conflicts, strict); + BinaryFieldType other = (BinaryFieldType)fieldType; + if (tryUncompressing() != other.tryUncompressing()) { + conflicts.add("mapper [" + names().fullName() + "] has different [try_uncompressing] (IMPOSSIBLE)"); + } + } + public boolean tryUncompressing() { return tryUncompressing; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index dab41b4a78a..7eb01fd352e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -57,6 +57,7 @@ import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedMap; @@ -237,6 +238,27 @@ public class CompletionFieldMapper extends FieldMapper { this.contextMapping = ref.contextMapping; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof CompletionFieldType)) return false; + if (!super.equals(o)) return false; + CompletionFieldType fieldType = (CompletionFieldType) o; + return analyzingSuggestLookupProvider.getPreserveSep() == fieldType.analyzingSuggestLookupProvider.getPreserveSep() && + analyzingSuggestLookupProvider.getPreservePositionsIncrements() == fieldType.analyzingSuggestLookupProvider.getPreservePositionsIncrements() && + analyzingSuggestLookupProvider.hasPayloads() == fieldType.analyzingSuggestLookupProvider.hasPayloads() && + Objects.equals(getContextMapping(), fieldType.getContextMapping()); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), + analyzingSuggestLookupProvider.getPreserveSep(), + analyzingSuggestLookupProvider.getPreservePositionsIncrements(), + analyzingSuggestLookupProvider.hasPayloads(), + getContextMapping()); + } + @Override public CompletionFieldType clone() { return new CompletionFieldType(this); @@ -252,16 +274,16 @@ public class CompletionFieldMapper extends FieldMapper { super.checkCompatibility(fieldType, conflicts, strict); CompletionFieldType other = (CompletionFieldType)fieldType; if (analyzingSuggestLookupProvider.hasPayloads() != other.analyzingSuggestLookupProvider.hasPayloads()) { - conflicts.add("mapper [" + names().fullName() + "] has different payload values"); + conflicts.add("mapper [" + names().fullName() + "] has different [payload] values"); } if (analyzingSuggestLookupProvider.getPreservePositionsIncrements() != other.analyzingSuggestLookupProvider.getPreservePositionsIncrements()) { - conflicts.add("mapper [" + names().fullName() + "] has different 'preserve_position_increments' values"); + conflicts.add("mapper [" + names().fullName() + "] has different [preserve_position_increments] values"); } if (analyzingSuggestLookupProvider.getPreserveSep() != other.analyzingSuggestLookupProvider.getPreserveSep()) { - conflicts.add("mapper [" + names().fullName() + "] has different 'preserve_separators' values"); + conflicts.add("mapper [" + names().fullName() + "] has different [preserve_separators] values"); } if(!ContextMapping.mappingsAreEqual(getContextMapping(), other.getContextMapping())) { - conflicts.add("mapper [" + names().fullName() + "] has different 'context_mapping' values"); + conflicts.add("mapper [" + names().fullName() + "] has different [context_mapping] values"); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 8addceff7e0..c958998fcf6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -350,20 +350,26 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper super.checkCompatibility(fieldType, conflicts, strict); GeoPointFieldType other = (GeoPointFieldType)fieldType; if (isLatLonEnabled() != other.isLatLonEnabled()) { - conflicts.add("mapper [" + names().fullName() + "] has different lat_lon"); + conflicts.add("mapper [" + names().fullName() + "] has different [lat_lon]"); } if (isGeohashEnabled() != other.isGeohashEnabled()) { - conflicts.add("mapper [" + names().fullName() + "] has different geohash"); + conflicts.add("mapper [" + names().fullName() + "] has different [geohash]"); } if (geohashPrecision() != other.geohashPrecision()) { - conflicts.add("mapper [" + names().fullName() + "] has different geohash_precision"); + conflicts.add("mapper [" + names().fullName() + "] has different [geohash_precision]"); } if (isGeohashPrefixEnabled() != other.isGeohashPrefixEnabled()) { - conflicts.add("mapper [" + names().fullName() + "] has different geohash_prefix"); + conflicts.add("mapper [" + names().fullName() + "] has different [geohash_prefix]"); } if (isLatLonEnabled() && other.isLatLonEnabled() && latFieldType().numericPrecisionStep() != other.latFieldType().numericPrecisionStep()) { - conflicts.add("mapper [" + names().fullName() + "] has different precision_step"); + conflicts.add("mapper [" + names().fullName() + "] has different [precision_step]"); + } + if (ignoreMalformed() != other.ignoreMalformed()) { + conflicts.add("mapper [" + names().fullName() + "] has different [ignore_malformed]"); + } + if (coerce() != other.coerce()) { + conflicts.add("mapper [" + names().fullName() + "] has different [coerce]"); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 3e7aa39e42e..c28285f95a5 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -280,21 +280,30 @@ public class GeoShapeFieldMapper extends FieldMapper { GeoShapeFieldType other = (GeoShapeFieldType)fieldType; // prevent user from changing strategies if (strategyName().equals(other.strategyName()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different strategy"); + conflicts.add("mapper [" + names().fullName() + "] has different [strategy]"); } // prevent user from changing trees (changes encoding) if (tree().equals(other.tree()) == false) { - conflicts.add("mapper [" + names().fullName() + "] has different tree"); + conflicts.add("mapper [" + names().fullName() + "] has different [tree]"); } // TODO we should allow this, but at the moment levels is used to build bookkeeping variables // in lucene's SpatialPrefixTree implementations, need a patch to correct that first if (treeLevels() != other.treeLevels()) { - conflicts.add("mapper [" + names().fullName() + "] has different tree_levels"); + conflicts.add("mapper [" + names().fullName() + "] has different [tree_levels]"); } if (precisionInMeters() != other.precisionInMeters()) { - conflicts.add("mapper [" + names().fullName() + "] has different precision"); + conflicts.add("mapper [" + names().fullName() + "] has different [precision]"); + } + + if (strict) { + if (orientation() != other.orientation()) { + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [orientation] across all types."); + } + if (distanceErrorPct() != other.distanceErrorPct()) { + conflicts.add("mapper [" + names().fullName() + "] is used by multiple types. Set update_all_types to true to update [distance_error_pct] across all types."); + } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 53c07c41309..ac2ef99c20b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -167,6 +167,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { @Override public void checkCompatibility(MappedFieldType fieldType, List conflicts, boolean strict) { + super.checkCompatibility(fieldType, conflicts, strict); if (strict) { FieldNamesFieldType other = (FieldNamesFieldType)fieldType; if (isEnabled() != other.isEnabled()) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java index 5ea01ee244e..41a44d4a88c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeLookupTests.java @@ -182,14 +182,14 @@ public class FieldTypeLookupTests extends ESTestCase { lookup.checkCompatibility(newList(f3), false); fail("expected conflict"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("has different store values")); + assertTrue(e.getMessage().contains("has different [store] values")); } // even with updateAllTypes == true, incompatible try { lookup.checkCompatibility(newList(f3), true); fail("expected conflict"); } catch (IllegalArgumentException e) { - assertTrue(e.getMessage().contains("has different store values")); + assertTrue(e.getMessage().contains("has different [store] values")); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java index a27a4344c56..a45348d530c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/FieldTypeTestCase.java @@ -18,57 +18,197 @@ */ package org.elasticsearch.index.mapper; -import org.elasticsearch.common.lucene.Lucene; +import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.similarity.BM25SimilarityProvider; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; /** Base test case for subclasses of MappedFieldType */ public abstract class FieldTypeTestCase extends ESTestCase { + /** Abstraction for mutating a property of a MappedFieldType */ + public static abstract class Modifier { + /** The name of the property that is being modified. Used in test failure messages. */ + public final String property; + /** true if this modifier only makes types incompatible in strict mode, false otherwise */ + public final boolean strictOnly; + /** true if reversing the order of checkCompatibility arguments should result in the same conflicts, false otherwise **/ + public final boolean symmetric; + + public Modifier(String property, boolean strictOnly, boolean symmetric) { + this.property = property; + this.strictOnly = strictOnly; + this.symmetric = symmetric; + } + + /** Modifies the property */ + public abstract void modify(MappedFieldType ft); + /** + * Optional method to implement that allows the field type that will be compared to be modified, + * so that it does not have the default value for the property being modified. + */ + public void normalizeOther(MappedFieldType other) {} + } + + private final List modifiers = new ArrayList<>(Arrays.asList( + new Modifier("boost", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setBoost(1.1f); + } + }, + new Modifier("doc_values", false, false) { + @Override + public void modify(MappedFieldType ft) { + ft.setHasDocValues(ft.hasDocValues() == false); + } + }, + new Modifier("analyzer", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setIndexAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + }, + new Modifier("analyzer", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setIndexAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setIndexAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + } + }, + new Modifier("search_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + }, + new Modifier("search_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSearchAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + } + }, + new Modifier("search_quote_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + }, + new Modifier("search_quote_analyzer", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSearchQuoteAnalyzer(new NamedAnalyzer("bar", new StandardAnalyzer())); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSearchQuoteAnalyzer(new NamedAnalyzer("foo", new StandardAnalyzer())); + } + }, + new Modifier("similarity", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY)); + } + }, + new Modifier("similarity", false, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY)); + } + @Override + public void normalizeOther(MappedFieldType other) { + other.setSimilarity(new BM25SimilarityProvider("bar", Settings.EMPTY)); + } + }, + new Modifier("norms.loading", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setNormsLoading(MappedFieldType.Loading.LAZY); + } + }, + new Modifier("fielddata", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setFieldDataType(new FieldDataType("foo", Settings.builder().put("loading", "eager").build())); + } + }, + new Modifier("null_value", true, true) { + @Override + public void modify(MappedFieldType ft) { + ft.setNullValue(dummyNullValue); + } + } + )); + + /** + * Add a mutation that will be tested for all expected semantics of equality and compatibility. + * These should be added in an @Before method. + */ + protected void addModifier(Modifier modifier) { + modifiers.add(modifier); + } + + private Object dummyNullValue = "dummyvalue"; + + /** Sets the null value used by the modifier for null value testing. This should be set in an @Before method. */ + protected void setDummyNullValue(Object value) { + dummyNullValue = value; + } + /** Create a default constructed fieldtype */ protected abstract MappedFieldType createDefaultFieldType(); - MappedFieldType createNamedDefaultFieldType(String name) { + MappedFieldType createNamedDefaultFieldType() { MappedFieldType fieldType = createDefaultFieldType(); - fieldType.setNames(new MappedFieldType.Names(name)); + fieldType.setNames(new MappedFieldType.Names("foo")); return fieldType; } - /** A dummy null value to use when modifying null value */ - protected Object dummyNullValue() { - return "dummyvalue"; - } - - /** Returns the number of properties that can be modified for the fieldtype */ - protected int numProperties() { - return 10; - } - - /** Modifies a property, identified by propNum, on the given fieldtype */ - protected void modifyProperty(MappedFieldType ft, int propNum) { - switch (propNum) { - case 0: ft.setNames(new MappedFieldType.Names("dummy")); break; - case 1: ft.setBoost(1.1f); break; - case 2: ft.setHasDocValues(!ft.hasDocValues()); break; - case 3: ft.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); break; - case 4: ft.setSearchAnalyzer(Lucene.STANDARD_ANALYZER); break; - case 5: ft.setSearchQuoteAnalyzer(Lucene.STANDARD_ANALYZER); break; - case 6: ft.setSimilarity(new BM25SimilarityProvider("foo", Settings.EMPTY)); break; - case 7: ft.setNormsLoading(MappedFieldType.Loading.LAZY); break; - case 8: ft.setFieldDataType(new FieldDataType("foo", Settings.builder().put("loading", "eager").build())); break; - case 9: ft.setNullValue(dummyNullValue()); break; - default: fail("unknown fieldtype property number " + propNum); + // TODO: remove this once toString is no longer final on FieldType... + protected void assertFieldTypeEquals(String property, MappedFieldType ft1, MappedFieldType ft2) { + if (ft1.equals(ft2) == false) { + fail("Expected equality, testing property " + property + "\nexpected: " + toString(ft1) + "; \nactual: " + toString(ft2) + "\n"); } } - // TODO: remove this once toString is no longer final on FieldType... - protected void assertEquals(int i, MappedFieldType ft1, MappedFieldType ft2) { - assertEquals("prop " + i + "\nexpected: " + toString(ft1) + "; \nactual: " + toString(ft2), ft1, ft2); + protected void assertFieldTypeNotEquals(String property, MappedFieldType ft1, MappedFieldType ft2) { + if (ft1.equals(ft2)) { + fail("Expected inequality, testing property " + property + "\nfirst: " + toString(ft1) + "; \nsecond: " + toString(ft2) + "\n"); + } + } + + protected void assertCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict) { + List conflicts = new ArrayList<>(); + ft1.checkCompatibility(ft2, conflicts, strict); + assertTrue("Found conflicts for " + msg + ": " + conflicts, conflicts.isEmpty()); + } + + protected void assertNotCompatible(String msg, MappedFieldType ft1, MappedFieldType ft2, boolean strict, String... messages) { + assert messages.length != 0; + List conflicts = new ArrayList<>(); + ft1.checkCompatibility(ft2, conflicts, strict); + for (String message : messages) { + boolean found = false; + for (String conflict : conflicts) { + if (conflict.contains(message)) { + found = true; + } + } + assertTrue("Missing conflict for " + msg + ": [" + message + "] in conflicts " + conflicts, found); + } } protected String toString(MappedFieldType ft) { @@ -88,45 +228,50 @@ public abstract class FieldTypeTestCase extends ESTestCase { } public void testClone() { - MappedFieldType fieldType = createNamedDefaultFieldType("foo"); + MappedFieldType fieldType = createNamedDefaultFieldType(); MappedFieldType clone = fieldType.clone(); assertNotSame(clone, fieldType); assertEquals(clone.getClass(), fieldType.getClass()); assertEquals(clone, fieldType); assertEquals(clone, clone.clone()); // transitivity - for (int i = 0; i < numProperties(); ++i) { - fieldType = createNamedDefaultFieldType("foo"); - modifyProperty(fieldType, i); + for (Modifier modifier : modifiers) { + fieldType = createNamedDefaultFieldType(); + modifier.modify(fieldType); clone = fieldType.clone(); assertNotSame(clone, fieldType); - assertEquals(i, clone, fieldType); + assertFieldTypeEquals(modifier.property, clone, fieldType); } } public void testEquals() { - MappedFieldType ft1 = createNamedDefaultFieldType("foo"); - MappedFieldType ft2 = createNamedDefaultFieldType("foo"); + MappedFieldType ft1 = createNamedDefaultFieldType(); + MappedFieldType ft2 = createNamedDefaultFieldType(); assertEquals(ft1, ft1); // reflexive assertEquals(ft1, ft2); // symmetric assertEquals(ft2, ft1); assertEquals(ft1.hashCode(), ft2.hashCode()); - for (int i = 0; i < numProperties(); ++i) { - ft2 = createNamedDefaultFieldType("foo"); - modifyProperty(ft2, i); - assertNotEquals(ft1, ft2); - assertNotEquals(ft1.hashCode(), ft2.hashCode()); + for (Modifier modifier : modifiers) { + ft1 = createNamedDefaultFieldType(); + ft2 = createNamedDefaultFieldType(); + modifier.modify(ft2); + assertFieldTypeNotEquals(modifier.property, ft1, ft2); + assertNotEquals("hash code for modified property " + modifier.property, ft1.hashCode(), ft2.hashCode()); + // modify the same property and they are equal again + modifier.modify(ft1); + assertFieldTypeEquals(modifier.property, ft1, ft2); + assertEquals("hash code for modified property " + modifier.property, ft1.hashCode(), ft2.hashCode()); } } public void testFreeze() { - for (int i = 0; i < numProperties(); ++i) { - MappedFieldType fieldType = createNamedDefaultFieldType("foo"); + for (Modifier modifier : modifiers) { + MappedFieldType fieldType = createNamedDefaultFieldType(); fieldType.freeze(); try { - modifyProperty(fieldType, i); - fail("expected already frozen exception for property " + i); + modifier.modify(fieldType); + fail("expected already frozen exception for property " + modifier.property); } catch (IllegalStateException e) { assertTrue(e.getMessage().contains("already frozen")); } @@ -134,7 +279,7 @@ public abstract class FieldTypeTestCase extends ESTestCase { } public void testCheckTypeName() { - final MappedFieldType fieldType = createNamedDefaultFieldType("foo"); + final MappedFieldType fieldType = createNamedDefaultFieldType(); List conflicts = new ArrayList<>(); fieldType.checkTypeName(fieldType, conflicts); assertTrue(conflicts.toString(), conflicts.isEmpty()); @@ -164,4 +309,46 @@ public abstract class FieldTypeTestCase extends ESTestCase { assertTrue(conflicts.get(0).contains("cannot be changed from type")); assertEquals(1, conflicts.size()); } + + public void testCheckCompatibility() { + MappedFieldType ft1 = createNamedDefaultFieldType(); + MappedFieldType ft2 = createNamedDefaultFieldType(); + assertCompatible("default", ft1, ft2, true); + assertCompatible("default", ft1, ft2, false); + assertCompatible("default", ft2, ft1, true); + assertCompatible("default", ft2, ft1, false); + + for (Modifier modifier : modifiers) { + ft1 = createNamedDefaultFieldType(); + ft2 = createNamedDefaultFieldType(); + modifier.normalizeOther(ft1); + modifier.modify(ft2); + if (modifier.strictOnly) { + String[] conflicts = { + "mapper [foo] is used by multiple types", + "update [" + modifier.property + "]" + }; + assertCompatible(modifier.property, ft1, ft2, false); + assertNotCompatible(modifier.property, ft1, ft2, true, conflicts); + assertCompatible(modifier.property, ft2, ft1, false); // always symmetric when not strict + if (modifier.symmetric) { + assertNotCompatible(modifier.property, ft2, ft1, true, conflicts); + } else { + assertCompatible(modifier.property, ft2, ft1, true); + } + } else { + // not compatible whether strict or not + String conflict = "different [" + modifier.property + "]"; + assertNotCompatible(modifier.property, ft1, ft2, true, conflict); + assertNotCompatible(modifier.property, ft1, ft2, false, conflict); + if (modifier.symmetric) { + assertNotCompatible(modifier.property, ft2, ft1, true, conflict); + assertNotCompatible(modifier.property, ft2, ft1, false, conflict); + } else { + assertCompatible(modifier.property, ft2, ft1, true); + assertCompatible(modifier.property, ft2, ft1, false); + } + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java index 47403c8b1df..f241d555e12 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BinaryFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class BinaryFieldTypeTests extends FieldTypeTestCase { @@ -28,17 +29,14 @@ public class BinaryFieldTypeTests extends FieldTypeTestCase { return new BinaryFieldMapper.BinaryFieldType(); } - @Override - protected int numProperties() { - return 1 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - BinaryFieldMapper.BinaryFieldType bft = (BinaryFieldMapper.BinaryFieldType)ft; - switch (propNum) { - case 0: bft.setTryUncompressing(!bft.tryUncompressing()); break; - default: super.modifyProperty(ft, propNum - 1); - } + @Before + public void setupProperties() { + addModifier(new Modifier("try_uncompressing", false, true) { + @Override + public void modify(MappedFieldType ft) { + BinaryFieldMapper.BinaryFieldType bft = (BinaryFieldMapper.BinaryFieldType)ft; + bft.setTryUncompressing(!bft.tryUncompressing()); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java index 3485a383cd6..0800e4ae3d9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/BooleanFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class BooleanFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class BooleanFieldTypeTests extends FieldTypeTestCase { return new BooleanFieldMapper.BooleanFieldType(); } - @Override - protected Object dummyNullValue() { - return true; + @Before + public void setupProperties() { + setDummyNullValue(true); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java index 33bf21e2710..08697ccd364 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/ByteFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class ByteFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class ByteFieldTypeTests extends FieldTypeTestCase { return new ByteFieldMapper.ByteFieldType(); } - @Override - protected Object dummyNullValue() { - return (byte)10; + @Before + public void setupProperties() { + setDummyNullValue((byte)10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java index b5afc1ae672..55dd7f8f7c9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/CompletionFieldTypeTests.java @@ -20,10 +20,53 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.search.suggest.completion.AnalyzingCompletionLookupProvider; +import org.elasticsearch.search.suggest.context.ContextBuilder; +import org.elasticsearch.search.suggest.context.ContextMapping; +import org.junit.Before; + +import java.util.SortedMap; +import java.util.TreeMap; public class CompletionFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - return new CompletionFieldMapper.CompletionFieldType(); + CompletionFieldMapper.CompletionFieldType ft = new CompletionFieldMapper.CompletionFieldType(); + ft.setProvider(new AnalyzingCompletionLookupProvider(true, false, true, false)); + return ft; + } + + @Before + public void setupProperties() { + addModifier(new Modifier("preserve_separators", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + cft.setProvider(new AnalyzingCompletionLookupProvider(false, false, true, false)); + } + }); + addModifier(new Modifier("preserve_position_increments", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + cft.setProvider(new AnalyzingCompletionLookupProvider(true, false, false, false)); + } + }); + addModifier(new Modifier("payload", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + cft.setProvider(new AnalyzingCompletionLookupProvider(true, false, true, true)); + } + }); + addModifier(new Modifier("context_mapping", false, true) { + @Override + public void modify(MappedFieldType ft) { + CompletionFieldMapper.CompletionFieldType cft = (CompletionFieldMapper.CompletionFieldType)ft; + SortedMap contextMapping = new TreeMap<>(); + contextMapping.put("foo", ContextBuilder.location("foo").build()); + cft.setContextMapping(contextMapping); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java index b6e162c8c38..3c37af6f49a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/DateFieldTypeTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -31,23 +32,26 @@ public class DateFieldTypeTests extends FieldTypeTestCase { return new DateFieldMapper.DateFieldType(); } - @Override - protected Object dummyNullValue() { - return 10; - } - - @Override - protected int numProperties() { - return 2 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - DateFieldMapper.DateFieldType dft = (DateFieldMapper.DateFieldType)ft; - switch (propNum) { - case 0: dft.setDateTimeFormatter(Joda.forPattern("basic_week_date", Locale.ROOT)); break; - case 1: dft.setTimeUnit(TimeUnit.HOURS); break; - default: super.modifyProperty(ft, propNum - 2); - } + @Before + public void setupProperties() { + setDummyNullValue(10); + addModifier(new Modifier("format", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((DateFieldMapper.DateFieldType) ft).setDateTimeFormatter(Joda.forPattern("basic_week_date", Locale.ROOT)); + } + }); + addModifier(new Modifier("locale", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((DateFieldMapper.DateFieldType) ft).setDateTimeFormatter(Joda.forPattern("date_optional_time", Locale.CANADA)); + } + }); + addModifier(new Modifier("numeric_resolution", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((DateFieldMapper.DateFieldType)ft).setTimeUnit(TimeUnit.HOURS); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java index 247f5e2364d..5f34e746ece 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/DoubleFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class DoubleFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class DoubleFieldTypeTests extends FieldTypeTestCase { return new DoubleFieldMapper.DoubleFieldType(); } - @Override - protected Object dummyNullValue() { - return 10.0D; + @Before + public void setupProperties() { + setDummyNullValue(10.0D); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java index 934f656ae76..73d593ac2f6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/FloatFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class FloatFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class FloatFieldTypeTests extends FieldTypeTestCase { return new DoubleFieldMapper.DoubleFieldType(); } - @Override - protected Object dummyNullValue() { - return 10.0; + @Before + public void setupProperties() { + setDummyNullValue(10.0); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java index 6e4a49e758b..b8c40af72c5 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/IntegerFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class IntegerFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class IntegerFieldTypeTests extends FieldTypeTestCase { return new IntegerFieldMapper.IntegerFieldType(); } - @Override - protected Object dummyNullValue() { - return 10; + @Before + public void setupProperties() { + setDummyNullValue(10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java index 671483e8978..e7b41bf21d1 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/LongFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class LongFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class LongFieldTypeTests extends FieldTypeTestCase { return new LongFieldMapper.LongFieldType(); } - @Override - protected Object dummyNullValue() { - return (long)10; + @Before + public void setupProperties() { + setDummyNullValue((long)10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java index 8a10da5cac2..12ddf827f43 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/ShortFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.core; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class ShortFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,8 +28,8 @@ public class ShortFieldTypeTests extends FieldTypeTestCase { return new ShortFieldMapper.ShortFieldType(); } - @Override - protected Object dummyNullValue() { - return (short)10; + @Before + public void setupProperties() { + setDummyNullValue((short)10); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index 1ca1c3a2837..4d61ecf397e 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -626,9 +626,10 @@ public class GeoPointFieldMapperTests extends ESSingleNodeTestCase { MergeResult mergeResult = stage1.merge(stage2.mapping(), false, false); assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts().length, equalTo(2)); // todo better way of checking conflict? - assertThat("mapper [point] has different lat_lon", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); + assertThat("mapper [point] has different [lat_lon]", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); + assertThat("mapper [point] has different [ignore_malformed]", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); // correct mapping and ensure no failures stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java index 07a769faa61..b6ebeb90acf 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldTypeTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.junit.Before; public class GeoPointFieldTypeTests extends FieldTypeTestCase { @Override @@ -29,20 +30,33 @@ public class GeoPointFieldTypeTests extends FieldTypeTestCase { return new GeoPointFieldMapper.GeoPointFieldType(); } - @Override - protected int numProperties() { - return 4 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - GeoPointFieldMapper.GeoPointFieldType gft = (GeoPointFieldMapper.GeoPointFieldType)ft; - switch (propNum) { - case 0: gft.setGeohashEnabled(new StringFieldMapper.StringFieldType(), 1, true); break; - case 1: gft.setLatLonEnabled(new DoubleFieldMapper.DoubleFieldType(), new DoubleFieldMapper.DoubleFieldType()); break; - case 2: gft.setIgnoreMalformed(!gft.ignoreMalformed()); break; - case 3: gft.setCoerce(!gft.coerce()); break; - default: super.modifyProperty(ft, propNum - 4); - } + @Before + public void setupProperties() { + addModifier(new Modifier("geohash", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoPointFieldMapper.GeoPointFieldType)ft).setGeohashEnabled(new StringFieldMapper.StringFieldType(), 1, true); + } + }); + addModifier(new Modifier("lat_lon", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoPointFieldMapper.GeoPointFieldType)ft).setLatLonEnabled(new DoubleFieldMapper.DoubleFieldType(), new DoubleFieldMapper.DoubleFieldType()); + } + }); + addModifier(new Modifier("ignore_malformed", false, true) { + @Override + public void modify(MappedFieldType ft) { + GeoPointFieldMapper.GeoPointFieldType gft = (GeoPointFieldMapper.GeoPointFieldType)ft; + gft.setIgnoreMalformed(!gft.ignoreMalformed()); + } + }); + addModifier(new Modifier("coerce", false, true) { + @Override + public void modify(MappedFieldType ft) { + GeoPointFieldMapper.GeoPointFieldType gft = (GeoPointFieldMapper.GeoPointFieldType)ft; + gft.setCoerce(!gft.coerce()); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index b30589b8c93..b7161c3463b 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -377,10 +377,10 @@ public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase { assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(mergeResult.buildConflicts().length, equalTo(4)); ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())); - assertThat("mapper [shape] has different strategy", isIn(conflicts)); - assertThat("mapper [shape] has different tree", isIn(conflicts)); - assertThat("mapper [shape] has different tree_levels", isIn(conflicts)); - assertThat("mapper [shape] has different precision", isIn(conflicts)); + assertThat("mapper [shape] has different [strategy]", isIn(conflicts)); + assertThat("mapper [shape] has different [tree]", isIn(conflicts)); + assertThat("mapper [shape] has different [tree_levels]", isIn(conflicts)); + assertThat("mapper [shape] has different [precision]", isIn(conflicts)); // verify nothing changed FieldMapper fieldMapper = stage1.mappers().getMapper("shape"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java index 527d79c0ee9..7ce99aa737a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldTypeTests.java @@ -21,31 +21,51 @@ package org.elasticsearch.index.mapper.geo; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class GeoShapeFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { - GeoShapeFieldMapper.GeoShapeFieldType gft = new GeoShapeFieldMapper.GeoShapeFieldType(); - gft.setNames(new MappedFieldType.Names("testgeoshape")); - return gft; + return new GeoShapeFieldMapper.GeoShapeFieldType(); } - @Override - protected int numProperties() { - return 6 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - GeoShapeFieldMapper.GeoShapeFieldType gft = (GeoShapeFieldMapper.GeoShapeFieldType)ft; - switch (propNum) { - case 0: gft.setTree("quadtree"); break; - case 1: gft.setStrategyName("term"); break; - case 2: gft.setTreeLevels(10); break; - case 3: gft.setPrecisionInMeters(20); break; - case 4: gft.setDefaultDistanceErrorPct(0.5); break; - case 5: gft.setOrientation(ShapeBuilder.Orientation.LEFT); break; - default: super.modifyProperty(ft, propNum - 6); - } + @Before + public void setupProperties() { + addModifier(new Modifier("tree", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setTree("quadtree"); + } + }); + addModifier(new Modifier("strategy", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setStrategyName("term"); + } + }); + addModifier(new Modifier("tree_levels", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setTreeLevels(10); + } + }); + addModifier(new Modifier("precision", false, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setPrecisionInMeters(20); + } + }); + addModifier(new Modifier("distance_error_pct", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setDefaultDistanceErrorPct(0.5); + } + }); + addModifier(new Modifier("orientation", true, true) { + @Override + public void modify(MappedFieldType ft) { + ((GeoShapeFieldMapper.GeoShapeFieldType)ft).setOrientation(ShapeBuilder.Orientation.LEFT); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java index 8f0ea33d37e..83aa779a61d 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldTypeTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper.internal; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; +import org.junit.Before; public class FieldNamesFieldTypeTests extends FieldTypeTestCase { @Override @@ -27,17 +28,14 @@ public class FieldNamesFieldTypeTests extends FieldTypeTestCase { return new FieldNamesFieldMapper.FieldNamesFieldType(); } - @Override - protected int numProperties() { - return 1 + super.numProperties(); - } - - @Override - protected void modifyProperty(MappedFieldType ft, int propNum) { - FieldNamesFieldMapper.FieldNamesFieldType fnft = (FieldNamesFieldMapper.FieldNamesFieldType)ft; - switch (propNum) { - case 0: fnft.setEnabled(!fnft.isEnabled()); break; - default: super.modifyProperty(ft, propNum - 1); - } + @Before + public void setupProperties() { + addModifier(new Modifier("enabled", true, true) { + @Override + public void modify(MappedFieldType ft) { + FieldNamesFieldMapper.FieldNamesFieldType fnft = (FieldNamesFieldMapper.FieldNamesFieldType)ft; + fnft.setEnabled(!fnft.isEnabled()); + } + }); } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 655069ed9e2..eec0002a6ef 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -173,15 +173,15 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { DocumentMapper docMapper4 = parser.parse(mapping); mergeResult = docMapper.merge(docMapper4.mapping(), true, false); assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different [index] values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different [store] values")); mergeResult = docMapper.merge(docMapper4.mapping(), false, false); assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); - assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different [index] values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different [store] values")); // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index f122de90202..e6b08fb4978 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -515,7 +515,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), true, false); assertTrue(mergeResult.hasConflicts()); assertEquals(1, mergeResult.buildConflicts().length); - assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms")); + assertTrue(mergeResult.buildConflicts()[0].contains("different [omit_norms]")); } /** diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index e33f898427d..057dc41f0f9 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -579,11 +579,10 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); List expectedConflicts = new ArrayList<>(Arrays.asList( - "mapper [_timestamp] has different index values", - "mapper [_timestamp] has different store values", + "mapper [_timestamp] has different [index] values", + "mapper [_timestamp] has different [store] values", "Cannot update default in _timestamp value. Value is 1970-01-01 now encountering 1970-01-02", - "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar", - "mapper [_timestamp] has different tokenize values")); + "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar")); for (String conflict : mergeResult.buildConflicts()) { assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict)); @@ -618,12 +617,12 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); List expectedConflicts = new ArrayList<>(); - expectedConflicts.add("mapper [_timestamp] has different index values"); - expectedConflicts.add("mapper [_timestamp] has different tokenize values"); + expectedConflicts.add("mapper [_timestamp] has different [index] values"); + expectedConflicts.add("mapper [_timestamp] has different [tokenize] values"); if (indexValues.get(0).equals("not_analyzed") == false) { // if the only index value left is not_analyzed, then the doc values setting will be the same, but in the // other two cases, it will change - expectedConflicts.add("mapper [_timestamp] has different doc_values values"); + expectedConflicts.add("mapper [_timestamp] has different [doc_values] values"); } for (String conflict : mergeResult.buildConflicts()) { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java index 4d4d06bd292..4ae039a3610 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingOnClusterIT.java @@ -55,14 +55,14 @@ public class UpdateMappingOnClusterIT extends ESIntegTestCase { String mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_create_index.json"); String mappingUpdate = copyToStringFromClasspath("/org/elasticsearch/index/mapper/update/all_mapping_update_with_conflicts.json"); String[] errorMessage = {"[_all] enabled is true now encountering false", - "[_all] cannot enable norms (`norms.enabled`)", - "[_all] has different store values", - "[_all] has different store_term_vector values", - "[_all] has different store_term_vector_offsets values", - "[_all] has different store_term_vector_positions values", - "[_all] has different store_term_vector_payloads values", - "[_all] has different analyzer", - "[_all] has different similarity"}; + "[_all] has different [omit_norms] values", + "[_all] has different [store] values", + "[_all] has different [store_term_vector] values", + "[_all] has different [store_term_vector_offsets] values", + "[_all] has different [store_term_vector_positions] values", + "[_all] has different [store_term_vector_payloads] values", + "[_all] has different [analyzer]", + "[_all] has different [similarity]"}; // fielddata and search_analyzer should not report conflict testConflict(mapping, mappingUpdate, errorMessage); } From a80317c4b39df375d869093ea5a793a74913b66e Mon Sep 17 00:00:00 2001 From: Martijn Laarman Date: Sat, 15 Aug 2015 20:49:40 +0200 Subject: [PATCH 13/51] cmd /C needs to be quoted as a whole To support spaces in both the command as well as its arguments cmd needs be called like this: cmd /C ""c:\a b\c.bat" "argument 1" "argument2"" ant was running cmd /C "c:\a b\c.bat" "argument 1" "argument2" which in windows causes to be preprocessed to cmd /C c:\a b\c.bat" "argument 1" "argument2 Which would make it appear as though ant was not properly quoting (which it did sort of). --- dev-tools/src/main/resources/ant/integration-tests.xml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/dev-tools/src/main/resources/ant/integration-tests.xml b/dev-tools/src/main/resources/ant/integration-tests.xml index 7b97f3e089d..68d531cd6af 100644 --- a/dev-tools/src/main/resources/ant/integration-tests.xml +++ b/dev-tools/src/main/resources/ant/integration-tests.xml @@ -32,8 +32,10 @@ + + From aa52c4f712ee932f06d99fe791b68f0eb03ef704 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Mon, 31 Aug 2015 13:47:40 +0200 Subject: [PATCH 14/51] Docs: Fixed variations of spelling of buckets_path Closes #13201 --- docs/reference/aggregations/pipeline.asciidoc | 10 +++++----- .../pipeline/avg-bucket-aggregation.asciidoc | 2 +- .../pipeline/bucket-script-aggregation.asciidoc | 4 ++-- .../pipeline/bucket-selector-aggregation.asciidoc | 4 ++-- .../pipeline/cumulative-sum-aggregation.asciidoc | 6 +++--- .../pipeline/derivative-aggregation.asciidoc | 2 +- .../pipeline/max-bucket-aggregation.asciidoc | 6 +++--- .../pipeline/min-bucket-aggregation.asciidoc | 6 +++--- .../aggregations/pipeline/movavg-aggregation.asciidoc | 4 ++-- .../pipeline/serial-diff-aggregation.asciidoc | 4 ++-- .../pipeline/sum-bucket-aggregation.asciidoc | 6 +++--- 11 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index b31fda65ca3..670ed6266b0 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -18,9 +18,9 @@ _Sibling_:: Pipeline aggregations that are provided with the output of a sibling aggregation and are able to compute a new aggregation which will be at the same level as the sibling aggregation. -Pipeline aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths` +Pipeline aggregations can reference the aggregations they need to perform their computation by using the `buckets_path` parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the -<> section below. +<> section below. Pipeline aggregations cannot have sub-aggregations but depending on the type it can reference another pipeline in the `buckets_path` allowing pipeline aggregations to be chained. For example, you can chain together two derivatives to calculate the second derivative @@ -29,7 +29,7 @@ allowing pipeline aggregations to be chained. For example, you can chain togeth NOTE: Because pipeline aggregations only add to the output, when chaining pipeline aggregations the output of each pipeline aggregation will be included in the final output. -[[bucket-path-syntax]] +[[buckets-path-syntax]] [float] === `buckets_path` Syntax @@ -96,13 +96,13 @@ a metric embedded inside a sibling aggregation: }, "max_monthly_sales": { "max_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the `sales_per_month` date histogram. [float] diff --git a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc index bbd540dc01f..b2b9d93f767 100644 --- a/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/avg-bucket-aggregation.asciidoc @@ -24,7 +24,7 @@ An `avg_bucket` aggregation looks like this in isolation: .`avg_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the average for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the average for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` || diff --git a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc index 6c790403af0..72addadaefa 100644 --- a/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-script-aggregation.asciidoc @@ -34,7 +34,7 @@ the metrics to use for that variable. |`script` |The script to run for this aggregation. The script can be inline, file or indexed. (see <> for more details) |Required | |`buckets_path` |A map of script variables and their associated path to the buckets we wish to use for the variable -(see <> for more details) |Required | +(see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | @@ -73,7 +73,7 @@ The following snippet calculates the ratio percentage of t-shirt sales compared }, "t-shirt-percentage": { "bucket_script": { - "buckets_paths": { + "buckets_path": { "tShirtSales": "t-shirts>sales", "totalSales": "total_sales" }, diff --git a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc index 7ac4f66dba4..2b838ba45fb 100644 --- a/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/bucket-selector-aggregation.asciidoc @@ -39,7 +39,7 @@ the metrics to use for that variable. |`script` |The script to run for this aggregation. The script can be inline, file or indexed. (see <> for more details) |Required | |`buckets_path` |A map of script variables and their associated path to the buckets we wish to use for the variable -(see <> for more details) |Required | +(see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | |=== @@ -63,7 +63,7 @@ The following snippet only retains buckets where the total sales for the month i } "sales_bucket_filter": { "bucket_selector": { - "buckets_paths": { + "buckets_path": { "totalSales": "total_sales" }, "script": "totalSales <= 50" diff --git a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc index 88fcd83831e..823c5c80d6d 100644 --- a/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/cumulative-sum-aggregation.asciidoc @@ -25,7 +25,7 @@ A `cumulative_sum` aggregation looks like this in isolation: .`cumulative_sum` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the cumulative sum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the cumulative sum for (see <> for more details) |Required | |`format` |format to apply to the output value of this aggregation |Optional, defaults to `null` | |=== @@ -49,7 +49,7 @@ The following snippet calculates the cumulative sum of the total monthly `sales` }, "cumulative_sales": { "cumulative_sum": { - "buckets_paths": "sales" <1> + "buckets_path": "sales" <1> } } } @@ -58,7 +58,7 @@ The following snippet calculates the cumulative sum of the total monthly `sales` } -------------------------------------------------- -<1> `bucket_paths` instructs this cumulative sum aggregation to use the output of the `sales` aggregation for the cumulative sum +<1> `buckets_path` instructs this cumulative sum aggregation to use the output of the `sales` aggregation for the cumulative sum And the following may be the response: diff --git a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc index ec63600a321..48296caf608 100644 --- a/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/derivative-aggregation.asciidoc @@ -25,7 +25,7 @@ A `derivative` aggregation looks like this in isolation: .`derivative` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the derivative for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the derivative for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | diff --git a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc index 0d15cc02e2a..310a643a66c 100644 --- a/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/max-bucket-aggregation.asciidoc @@ -25,7 +25,7 @@ A `max_bucket` aggregation looks like this in isolation: .`max_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the maximum for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | @@ -53,13 +53,13 @@ The following snippet calculates the maximum of the total monthly `sales`: }, "max_monthly_sales": { "max_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +<1> `buckets_path` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc index ed02f7b2051..11d3d559512 100644 --- a/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/min-bucket-aggregation.asciidoc @@ -25,7 +25,7 @@ A `max_bucket` aggregation looks like this in isolation: .`min_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the minimum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the minimum for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` | @@ -54,14 +54,14 @@ The following snippet calculates the minimum of the total monthly `sales`: }, "min_monthly_sales": { "min_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this max_bucket aggregation that we want the minimum value of the `sales` aggregation in the +<1> `buckets_path` instructs this max_bucket aggregation that we want the minimum value of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index b7c86d5826c..6fe91cb45c6 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -40,7 +40,7 @@ A `moving_avg` aggregation looks like this in isolation: .`moving_avg` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | |`model` |The moving average weighting model that we wish to use |Optional |`simple` |`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` |`window` |The size of window to "slide" across the histogram. |Optional |`5` @@ -78,7 +78,7 @@ embedded like any other metric aggregation: Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see -<> for a description of the syntax for `buckets_path`. +<> for a description of the syntax for `buckets_path`. ==== Models diff --git a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc index 84283bd9f3f..7193510bf1a 100644 --- a/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/serial-diff-aggregation.asciidoc @@ -52,7 +52,7 @@ A `serial_diff` aggregation looks like this in isolation: .`moving_avg` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | |`lag` |The historical bucket to subtract from the current value. E.g. a lag of 7 will subtract the current value from the value 7 buckets ago. Must be a positive, non-zero integer |Optional |`1` |`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` @@ -94,7 +94,7 @@ A `serial_diff` aggregation looks like this in isolation: Serial differences are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally add normal metrics, such as a `sum`, inside of that histogram. Finally, the `serial_diff` is embedded inside the histogram. The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see -<> for a description of the syntax for `buckets_path`. +<> for a description of the syntax for `buckets_path`. diff --git a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc index 3729056d783..56d786f59f0 100644 --- a/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/sum-bucket-aggregation.asciidoc @@ -24,7 +24,7 @@ A `sum_bucket` aggregation looks like this in isolation: .`sum_bucket` Parameters |=== |Parameter Name |Description |Required |Default Value -|`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more +|`buckets_path` |The path to the buckets we wish to find the sum for (see <> for more details) |Required | |`gap_policy` |The policy to apply when gaps are found in the data (see <> for more details)|Optional, defaults to `skip` || @@ -52,13 +52,13 @@ The following snippet calculates the sum of all the total monthly `sales` bucket }, "sum_monthly_sales": { "sum_bucket": { - "buckets_paths": "sales_per_month>sales" <1> + "buckets_path": "sales_per_month>sales" <1> } } } } -------------------------------------------------- -<1> `bucket_paths` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the +<1> `buckets_path` instructs this sum_bucket aggregation that we want the sum of the `sales` aggregation in the `sales_per_month` date histogram. And the following may be the response: From dbbecce8f20823f793ba712794e60699001863bb Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 26 Aug 2015 17:24:26 +0200 Subject: [PATCH 15/51] Sort thread pools by name in Nodes Stats --- .../threadpool/ThreadPoolStats.java | 21 +++- .../threadpool/ThreadPoolStatsTests.java | 118 ++++++++++++++++++ 2 files changed, 138 insertions(+), 1 deletion(-) create mode 100644 core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java index 8b180dab5fb..af53482c629 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; @@ -35,7 +36,7 @@ import java.util.List; */ public class ThreadPoolStats implements Streamable, ToXContent, Iterable { - public static class Stats implements Streamable, ToXContent { + public static class Stats implements Streamable, ToXContent, Comparable { private String name; private int threads; @@ -133,6 +134,23 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable stats; @@ -142,6 +160,7 @@ public class ThreadPoolStats implements Streamable, ToXContent, Iterable stats) { + Collections.sort(stats); this.stats = stats; } diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java new file mode 100644 index 00000000000..0fc4f4c7a7f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolStatsTests.java @@ -0,0 +1,118 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; + +public class ThreadPoolStatsTests extends ESTestCase { + + @Test + public void testThreadPoolStatsSort() throws IOException { + List stats = new ArrayList<>(); + stats.add(new ThreadPoolStats.Stats("z", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 3, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("d", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("m", 2, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("t", -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats("a", -1, 0, 0, 0, 0, 0L)); + + List copy = new ArrayList<>(stats); + Collections.sort(copy); + + List names = new ArrayList<>(copy.size()); + for (ThreadPoolStats.Stats stat : copy) { + names.add(stat.getName()); + } + assertThat(names, contains("a", "d", "m", "m", "m", "t", "z")); + + List threads = new ArrayList<>(copy.size()); + for (ThreadPoolStats.Stats stat : copy) { + threads.add(stat.getThreads()); + } + assertThat(threads, contains(-1, -1, 1, 2, 3,-1,-1)); + } + + @Test + public void testThreadPoolStatsToXContent() throws IOException { + try (BytesStreamOutput os = new BytesStreamOutput()) { + + List stats = new ArrayList<>(); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SUGGEST, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SEARCH, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.WARMER, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.GENERIC, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.OPTIMIZE, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.PERCOLATE, -1, 0, 0, 0, 0, 0L)); + stats.add(new ThreadPoolStats.Stats(ThreadPool.Names.SAME, -1, 0, 0, 0, 0, 0L)); + + + try (XContentBuilder builder = new XContentBuilder(XContentType.JSON.xContent(), os)) { + new ThreadPoolStats(stats).toXContent(builder, ToXContent.EMPTY_PARAMS); + } + + try (XContentParser parser = XContentType.JSON.xContent().createParser(os.bytes())) { + XContentParser.Token token = parser.currentToken(); + assertNull(token); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.VALUE_STRING)); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.FIELD_NAME)); + + List names = new ArrayList<>(); + while (token == XContentParser.Token.FIELD_NAME) { + names.add(parser.currentName()); + + token = parser.nextToken(); + assertThat(token, equalTo(XContentParser.Token.START_OBJECT)); + + parser.skipChildren(); + token = parser.nextToken(); + } + assertThat(names, contains(ThreadPool.Names.GENERIC, + ThreadPool.Names.OPTIMIZE, + ThreadPool.Names.PERCOLATE, + ThreadPool.Names.SAME, + ThreadPool.Names.SEARCH, + ThreadPool.Names.SUGGEST, + ThreadPool.Names.WARMER)); + } + } + } +} From 73785e075e666bcf0b2db548db449c4210d453bc Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Mon, 31 Aug 2015 15:48:32 +0200 Subject: [PATCH 16/51] add default impl for resolveIndex() --- .../elasticsearch/action/delete/TransportDeleteAction.java | 5 ----- .../org/elasticsearch/action/index/TransportIndexAction.java | 5 ----- .../support/replication/TransportReplicationAction.java | 4 +++- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 8e2df0388e4..5e16916bac1 100644 --- a/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/core/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -94,11 +94,6 @@ public class TransportDeleteAction extends TransportReplicationAction listener) { request.request().routing(state.metaData().resolveIndexRouting(request.request().routing(), request.request().index())); diff --git a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 97620829f3a..83e70c2f504 100644 --- a/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -120,11 +120,6 @@ public class TransportIndexAction extends TransportReplicationAction indexResponseActionListener) { MetaData metaData = clusterService.state().metaData(); diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index 155c30756ca..f6925009189 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -142,7 +142,9 @@ public abstract class TransportReplicationAction Date: Mon, 31 Aug 2015 15:55:00 +0200 Subject: [PATCH 17/51] Add note about multi data path and disk threshold deciders Prior to 2.0 we summed up the available space on all disk on a node due to the raid-0 like behavior. Now we don't do this anymore and use the min & max disk space to make decisions. Closes #13106 --- docs/reference/modules/cluster/disk_allocator.asciidoc | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/reference/modules/cluster/disk_allocator.asciidoc b/docs/reference/modules/cluster/disk_allocator.asciidoc index 09b504529db..9baf8a379fb 100644 --- a/docs/reference/modules/cluster/disk_allocator.asciidoc +++ b/docs/reference/modules/cluster/disk_allocator.asciidoc @@ -5,7 +5,7 @@ Elasticsearch factors in the available disk space on a node before deciding whether to allocate new shards to that node or to actively relocate shards away from that node. -Below are the settings that can be configred in the `elasticsearch.yml` config +Below are the settings that can be configured in the `elasticsearch.yml` config file or updated dynamically on a live cluster with the <> API: @@ -67,3 +67,10 @@ PUT /_cluster/settings -------------------------------------------------- // AUTOSENSE +NOTE: Prior to 2.0.0, when using multiple data paths, the disk threshold +decider only factored in the usage across all data paths (if you had two +data paths, one with 50b out of 100b free (50% used) and another with +40b out of 50b free (80% used) it would see the node's disk usage as 90b +out of 150b). In 2.0.0, the minimum and maximum disk usages are tracked +separately. + From 23c1766cdc12b1d6df0aa21a2f3ab4c551aae3ac Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 31 Aug 2015 10:45:46 -0400 Subject: [PATCH 18/51] [packaging] Lock vagrant to virtualbox Virtualbox is the default virtualization provier for vagrant but folks override that from time to time. If they do then the build will fail because the boxes used by the build don't usually support non-virtualbox providers. Closes #13217 --- qa/vagrant/src/dev/ant/vagrant-integration-tests.xml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml b/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml index efed4960b5e..67b208803b9 100644 --- a/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml +++ b/qa/vagrant/src/dev/ant/vagrant-integration-tests.xml @@ -68,6 +68,13 @@ That isn't to say that the updates will always be compatible. Its ok to just destroy the boxes if they get busted. --> + + + From fb561ce2286950cdd510de1977c9eb1cb6aaf738 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 31 Aug 2015 10:50:50 -0400 Subject: [PATCH 19/51] [docs] Lock vagrant to virtualbox --- TESTING.asciidoc | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 13ceef4bdd7..4329c88b59e 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -338,8 +338,8 @@ time to setup all the VMs one at a time. Run this to download and setup the VMs we use for testing by default: -------------------------------------------------------- -vagrant up --provision trusty && vagrant halt trusty -vagrant up --provision centos-7 && vagrant halt centos-7 +vagrant up --provision trusty --provider virtualbox && vagrant halt trusty +vagrant up --provision centos-7 --provider virtualbox && vagrant halt centos-7 -------------------------------------------------------- or run this to download and setup all the VMs: @@ -347,7 +347,7 @@ or run this to download and setup all the VMs: ------------------------------------------------------------------------------- vagrant halt for box in $(vagrant status | grep 'poweroff\|not created' | cut -f1 -d' '); do - vagrant up --provision $box + vagrant up --provision $box --provider virtualbox vagrant halt $box done ------------------------------------------------------------------------------- @@ -420,13 +420,13 @@ This is just regular vagrant so you can run normal multi box vagrant commands to test things manually. Just run: --------------------------------------- -vagrant up trusty && vagrant ssh trusty +vagrant up trusty --provider virtualbox && vagrant ssh trusty --------------------------------------- to get an Ubuntu or ------------------------------------------- -vagrant up centos-7 && vagrant ssh centos-7 +vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 ------------------------------------------- to get a CentOS. Once you are done with them you should halt them: @@ -469,7 +469,7 @@ vagrant ssh precise -c 'sudo rm -rf /bin'; echo oops All you've got to do to get another one is ---------------------------------------------- -vagrant destroy -f trusty && vagrant up trusty +vagrant destroy -f trusty && vagrant up trusty --provider virtualbox ---------------------------------------------- The whole process takes a minute and a half on a modern laptop, two and a half @@ -508,7 +508,7 @@ mvn -pl distribution/rpm package and in another window: ---------------------------------------------------- -vagrant up centos-7 && vagrant ssh centos-7 +vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 cd $RPM sudo bats $BATS/*rpm*.bats ---------------------------------------------------- @@ -520,7 +520,7 @@ If you wanted to retest all the release artifacts on a single VM you could: mvn -amd -pl distribution install -DskipTests # Copy them all the testroot mvn -Dtests.vagrant -pl qa/vagrant pre-integration-test -vagrant up trusty && vagrant ssh trusty +vagrant up trusty --provider virtualbox && vagrant ssh trusty cd $TESTROOT sudo bats $BATS/*.bats ------------------------------------------------- From 1b84cadb7b56f6a94dcd41c1f6f827bba7ea4064 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 31 Aug 2015 16:58:03 +0200 Subject: [PATCH 20/51] test: The transport client that interacts with the external cluster shoud be provided a list of transport client plugins. --- .../java/org/elasticsearch/test/ESIntegTestCase.java | 2 +- .../org/elasticsearch/test/ExternalTestCluster.java | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java index 5327f4e6325..9bb1cfd0cfb 100644 --- a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java @@ -1739,7 +1739,7 @@ public abstract class ESIntegTestCase extends ESTestCase { throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]"); } } - return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportAddresses); + return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses); } protected Settings externalClusterClientSettings() { diff --git a/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java index 9a919a391cb..90ca7818b9f 100644 --- a/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/ExternalTestCluster.java @@ -33,10 +33,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.node.internal.InternalSettingsPreparer; +import org.elasticsearch.plugins.Plugin; import java.io.IOException; import java.net.InetSocketAddress; import java.nio.file.Path; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.concurrent.atomic.AtomicInteger; @@ -65,7 +67,7 @@ public final class ExternalTestCluster extends TestCluster { private final int numDataNodes; private final int numMasterAndDataNodes; - public ExternalTestCluster(Path tempDir, Settings additionalSettings, TransportAddress... transportAddresses) { + public ExternalTestCluster(Path tempDir, Settings additionalSettings, Collection> pluginClasses, TransportAddress... transportAddresses) { super(0); Settings clientSettings = Settings.settingsBuilder() .put(additionalSettings) @@ -75,7 +77,11 @@ public final class ExternalTestCluster extends TestCluster { .put("path.home", tempDir) .put("node.mode", "network").build(); // we require network here! - this.client = TransportClient.builder().settings(clientSettings).build().addTransportAddresses(transportAddresses); + TransportClient.Builder transportClientBuilder = TransportClient.builder().settings(clientSettings); + for (Class pluginClass : pluginClasses) { + transportClientBuilder.addPlugin(pluginClass); + } + this.client = transportClientBuilder.build().addTransportAddresses(transportAddresses); NodesInfoResponse nodeInfos = this.client.admin().cluster().prepareNodesInfo().clear().setSettings(true).setHttp(true).get(); httpAddresses = new InetSocketAddress[nodeInfos.getNodes().length]; From db7aecab4dd8566d9c3d45db3d43d05d6680dfa4 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 31 Aug 2015 17:03:45 +0200 Subject: [PATCH 21/51] update list of available os stats os cpu information is no longer exposed through the nodes stats api --- docs/reference/cluster/nodes-stats.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/cluster/nodes-stats.asciidoc b/docs/reference/cluster/nodes-stats.asciidoc index 1425bfee1ee..b22312e2130 100644 --- a/docs/reference/cluster/nodes-stats.asciidoc +++ b/docs/reference/cluster/nodes-stats.asciidoc @@ -39,7 +39,7 @@ of `indices`, `os`, `process`, `jvm`, `transport`, `http`, pools, number of loaded/unloaded classes `os`:: - Operating system stats, load average, cpu, mem, swap + Operating system stats, load average, mem, swap (see <>) `process`:: From d81f426b682c541ce5226eb2f621a99289426c0f Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Mon, 24 Aug 2015 10:59:51 +0200 Subject: [PATCH 22/51] Make refresh a replicated action prerequisite to #9421 see also #12600 --- .../action/ActionWriteResponse.java | 2 +- .../admin/indices/flush/FlushResponse.java | 12 - .../indices/flush/ShardFlushRequest.java | 26 +- .../indices/flush/ShardFlushResponse.java | 37 -- .../indices/flush/TransportFlushAction.java | 82 +---- .../flush/TransportShardFlushAction.java | 102 ++++++ .../admin/indices/refresh/RefreshAction.java | 2 - .../admin/indices/refresh/RefreshRequest.java | 2 - .../indices/refresh/RefreshResponse.java | 16 - .../indices/refresh/ShardRefreshRequest.java | 37 -- .../indices/refresh/ShardRefreshResponse.java | 36 -- .../refresh/TransportRefreshAction.java | 84 +---- .../refresh/TransportShardRefreshAction.java | 103 ++++++ .../action/bulk/BulkShardRequest.java | 11 +- .../action/bulk/TransportShardBulkAction.java | 2 +- .../support/broadcast/BroadcastRequest.java | 6 +- .../support/broadcast/BroadcastResponse.java | 6 +- .../replication/ReplicationRequest.java | 23 +- .../TransportBroadcastReplicationAction.java | 162 +++++++++ .../TransportReplicationAction.java | 27 +- .../action/IndicesRequestIT.java | 42 +-- .../admin/indices/flush/FlushBlocksIT.java | 5 +- .../indices/optimize/OptimizeBlocksIT.java | 2 +- .../indices/refresh/RefreshBlocksIT.java | 4 +- .../BroadcastReplicationTests.java | 315 ++++++++++++++++++ .../ClusterStateCreationUtils.java | 230 +++++++++++++ .../replication/ShardReplicationTests.java | 106 +----- .../store/IndicesStoreIntegrationIT.java | 4 +- .../test/cluster/TestClusterService.java | 4 +- .../hamcrest/ElasticsearchAssertions.java | 17 + 30 files changed, 1049 insertions(+), 458 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java delete mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java delete mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java create mode 100644 core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java create mode 100644 core/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java create mode 100644 core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java diff --git a/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java b/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java index a63f6dcd9fa..f4152ac85e4 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java +++ b/core/src/main/java/org/elasticsearch/action/ActionWriteResponse.java @@ -39,7 +39,7 @@ import java.util.Collections; /** * Base class for write action responses. */ -public abstract class ActionWriteResponse extends ActionResponse { +public class ActionWriteResponse extends ActionResponse { public final static ActionWriteResponse.ShardInfo.Failure[] EMPTY = new ActionWriteResponse.ShardInfo.Failure[0]; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java index a158b02611b..c2ac7002645 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/FlushResponse.java @@ -21,10 +21,7 @@ package org.elasticsearch.action.admin.indices.flush; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.IOException; import java.util.List; /** @@ -42,13 +39,4 @@ public class FlushResponse extends BroadcastResponse { super(totalShards, successfulShards, failedShards, shardFailures); } - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java index 0e38181fa61..10db46c1da0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushRequest.java @@ -19,27 +19,27 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; +import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; -/** - * - */ -class ShardFlushRequest extends BroadcastShardRequest { +public class ShardFlushRequest extends ReplicationRequest { + private FlushRequest request = new FlushRequest(); - ShardFlushRequest() { - } - - ShardFlushRequest(ShardId shardId, FlushRequest request) { - super(shardId, request); + public ShardFlushRequest(FlushRequest request) { + super(request); this.request = request; } + public ShardFlushRequest() { + } + + FlushRequest getRequest() { + return request; + } @Override public void readFrom(StreamInput in) throws IOException { @@ -53,7 +53,5 @@ class ShardFlushRequest extends BroadcastShardRequest { request.writeTo(out); } - FlushRequest getRequest() { - return request; - } + } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java deleted file mode 100644 index 6f2cc6a5522..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/ShardFlushResponse.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.flush; - -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; -import org.elasticsearch.index.shard.ShardId; - -/** - * - */ -class ShardFlushResponse extends BroadcastShardResponse { - - ShardFlushResponse() { - - } - - ShardFlushResponse(ShardId shardId) { - super(shardId); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 323a6cc2382..d43cbb25ab5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -19,99 +19,45 @@ package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * Flush Action. */ -public class TransportFlushAction extends TransportBroadcastAction { - - private final IndicesService indicesService; +public class TransportFlushAction extends TransportBroadcastReplicationAction { @Inject public TransportFlushAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, FlushAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - FlushRequest.class, ShardFlushRequest.class, ThreadPool.Names.FLUSH); - this.indicesService = indicesService; + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportShardFlushAction replicatedFlushAction) { + super(FlushAction.NAME, FlushRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedFlushAction); } @Override - protected FlushResponse newResponse(FlushRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { - int successfulShards = 0; - int failedShards = 0; - List shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // a non active shard, ignore - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - successfulShards++; - } - } - return new FlushResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ActionWriteResponse newShardResponse() { + return new ActionWriteResponse(); } @Override - protected ShardFlushRequest newShardRequest(int numShards, ShardRouting shard, FlushRequest request) { - return new ShardFlushRequest(shard.shardId(), request); + protected ShardFlushRequest newShardRequest(FlushRequest request, ShardId shardId) { + return new ShardFlushRequest(request).setShardId(shardId).timeout("0ms"); } @Override - protected ShardFlushResponse newShardResponse() { - return new ShardFlushResponse(); - } - - @Override - protected ShardFlushResponse shardOperation(ShardFlushRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); - indexShard.flush(request.getRequest()); - return new ShardFlushResponse(request.shardId()); - } - - /** - * The refresh request works against *all* shards. - */ - @Override - protected GroupShardsIterator shards(ClusterState clusterState, FlushRequest request, String[] concreteIndices) { - return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true, true); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, FlushRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, FlushRequest countRequest, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java new file mode 100644 index 00000000000..9b7c0643be2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class TransportShardFlushAction extends TransportReplicationAction { + + public static final String NAME = "indices:data/write/flush"; + + @Inject + public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService, + IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction, + actionFilters, indexNameExpressionResolver, ShardFlushRequest.class, ShardFlushRequest.class, ThreadPool.Names.FLUSH); + } + + @Override + protected ActionWriteResponse newResponseInstance() { + return new ActionWriteResponse(); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + indexShard.flush(shardRequest.request.getRequest()); + logger.trace("{} flush request executed on primary", indexShard.shardId()); + return new Tuple<>(new ActionWriteResponse(), shardRequest.request); + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); + indexShard.flush(request.getRequest()); + logger.trace("{} flush request executed on replica", indexShard.shardId()); + } + + @Override + protected boolean checkWriteConsistency() { + return false; + } + + @Override + protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { + return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt(); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()}); + } + + @Override + protected boolean shouldExecuteReplication(Settings settings) { + return true; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java index 79db06ec3f0..cfc2fb3a09f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshAction.java @@ -22,8 +22,6 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -/** - */ public class RefreshAction extends Action { public static final RefreshAction INSTANCE = new RefreshAction(); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java index 8f871307135..b0cb49c8874 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshRequest.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastRequest; */ public class RefreshRequest extends BroadcastRequest { - RefreshRequest() { } @@ -48,5 +47,4 @@ public class RefreshRequest extends BroadcastRequest { public RefreshRequest(String... indices) { super(indices); } - } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java index 28295fdd0a0..ba3ec31c6a5 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/RefreshResponse.java @@ -21,34 +21,18 @@ package org.elasticsearch.action.admin.indices.refresh; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import java.io.IOException; import java.util.List; /** * The response of a refresh action. - * - * */ public class RefreshResponse extends BroadcastResponse { RefreshResponse() { - } RefreshResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { super(totalShards, successfulShards, failedShards, shardFailures); } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java deleted file mode 100644 index 37ea2cc46de..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshRequest.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; -import org.elasticsearch.index.shard.ShardId; - -/** - * - */ -class ShardRefreshRequest extends BroadcastShardRequest { - - ShardRefreshRequest() { - } - - ShardRefreshRequest(ShardId shardId, RefreshRequest request) { - super(shardId, request); - } - -} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java deleted file mode 100644 index 4de0f5877dd..00000000000 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/ShardRefreshResponse.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.indices.refresh; - -import org.elasticsearch.action.support.broadcast.BroadcastShardResponse; -import org.elasticsearch.index.shard.ShardId; - -/** - * - */ -class ShardRefreshResponse extends BroadcastShardResponse { - - ShardRefreshResponse() { - } - - ShardRefreshResponse(ShardId shardId) { - super(shardId); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index 2eead86e202..6e7e3459b11 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -19,100 +19,46 @@ package org.elasticsearch.action.admin.indices.refresh; +import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DefaultShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; -import org.elasticsearch.action.support.broadcast.TransportBroadcastAction; +import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.ArrayList; import java.util.List; -import java.util.concurrent.atomic.AtomicReferenceArray; /** * Refresh action. */ -public class TransportRefreshAction extends TransportBroadcastAction { - - private final IndicesService indicesService; +public class TransportRefreshAction extends TransportBroadcastReplicationAction { @Inject public TransportRefreshAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, - TransportService transportService, IndicesService indicesService, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, RefreshAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, - RefreshRequest.class, ShardRefreshRequest.class, ThreadPool.Names.REFRESH); - this.indicesService = indicesService; + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + TransportShardRefreshAction shardRefreshAction) { + super(RefreshAction.NAME, RefreshRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, shardRefreshAction); } @Override - protected RefreshResponse newResponse(RefreshRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { - int successfulShards = 0; - int failedShards = 0; - List shardFailures = null; - for (int i = 0; i < shardsResponses.length(); i++) { - Object shardResponse = shardsResponses.get(i); - if (shardResponse == null) { - // non active shard, ignore - } else if (shardResponse instanceof BroadcastShardOperationFailedException) { - failedShards++; - if (shardFailures == null) { - shardFailures = new ArrayList<>(); - } - shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse)); - } else { - successfulShards++; - } - } - return new RefreshResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures); + protected ActionWriteResponse newShardResponse() { + return new ActionWriteResponse(); } @Override - protected ShardRefreshRequest newShardRequest(int numShards, ShardRouting shard, RefreshRequest request) { - return new ShardRefreshRequest(shard.shardId(), request); + protected ReplicationRequest newShardRequest(RefreshRequest request, ShardId shardId) { + return new ReplicationRequest(request).setShardId(shardId).timeout("0ms"); } @Override - protected ShardRefreshResponse newShardResponse() { - return new ShardRefreshResponse(); - } - - @Override - protected ShardRefreshResponse shardOperation(ShardRefreshRequest request) { - IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); - indexShard.refresh("api"); - logger.trace("{} refresh request executed", indexShard.shardId()); - return new ShardRefreshResponse(request.shardId()); - } - - /** - * The refresh request works against *all* shards. - */ - @Override - protected GroupShardsIterator shards(ClusterState clusterState, RefreshRequest request, String[] concreteIndices) { - return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true, true); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, RefreshRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, RefreshRequest countRequest, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java new file mode 100644 index 00000000000..2604d67b1b0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.action.support.replication.TransportReplicationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * + */ +public class TransportShardRefreshAction extends TransportReplicationAction { + + public static final String NAME = "indices:data/write/refresh"; + + @Inject + public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService, + IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, + MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction, + actionFilters, indexNameExpressionResolver, ReplicationRequest.class, ReplicationRequest.class, ThreadPool.Names.REFRESH); + } + + @Override + protected ActionWriteResponse newResponseInstance() { + return new ActionWriteResponse(); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + indexShard.refresh("api"); + logger.trace("{} refresh request executed on primary", indexShard.shardId()); + return new Tuple<>(new ActionWriteResponse(), shardRequest.request); + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); + indexShard.refresh("api"); + logger.trace("{} refresh request executed on replica", indexShard.shardId()); + } + + @Override + protected boolean checkWriteConsistency() { + return false; + } + + @Override + protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { + return clusterState.getRoutingTable().indicesRouting().get(request.concreteIndex()).getShards().get(request.request().shardId().getId()).shardsIt(); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, new String[]{request.concreteIndex()}); + } + + @Override + protected boolean shouldExecuteReplication(Settings settings) { + return true; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java index a1eb616b1ee..6bda7b259ee 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkShardRequest.java @@ -22,6 +22,7 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.action.support.replication.ReplicationRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.ArrayList; @@ -32,8 +33,6 @@ import java.util.List; */ public class BulkShardRequest extends ReplicationRequest { - private int shardId; - private BulkItemRequest[] items; private boolean refresh; @@ -44,7 +43,7 @@ public class BulkShardRequest extends ReplicationRequest { BulkShardRequest(BulkRequest bulkRequest, String index, int shardId, boolean refresh, BulkItemRequest[] items) { super(bulkRequest); this.index = index; - this.shardId = shardId; + this.setShardId(new ShardId(index, shardId)); this.items = items; this.refresh = refresh; } @@ -53,10 +52,6 @@ public class BulkShardRequest extends ReplicationRequest { return this.refresh; } - int shardId() { - return shardId; - } - BulkItemRequest[] items() { return items; } @@ -75,7 +70,6 @@ public class BulkShardRequest extends ReplicationRequest { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeVInt(shardId); out.writeVInt(items.length); for (BulkItemRequest item : items) { if (item != null) { @@ -91,7 +85,6 @@ public class BulkShardRequest extends ReplicationRequest { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shardId = in.readVInt(); items = new BulkItemRequest[in.readVInt()]; for (int i = 0; i < items.length; i++) { if (in.readBoolean()) { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 2ca2dfe142a..a9aa3dcb31d 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -109,7 +109,7 @@ public class TransportShardBulkAction extends TransportReplicationAction extends ActionRequest implements IndicesRequest.Replaceable { +public class BroadcastRequest extends ActionRequest implements IndicesRequest.Replaceable { protected String[] indices; private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - protected BroadcastRequest() { + public BroadcastRequest() { } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java index 560c7ec9869..54d6220fd34 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/BroadcastResponse.java @@ -32,17 +32,17 @@ import static org.elasticsearch.action.support.DefaultShardOperationFailedExcept /** * Base class for all broadcast operation based responses. */ -public abstract class BroadcastResponse extends ActionResponse { +public class BroadcastResponse extends ActionResponse { private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; private int totalShards; private int successfulShards; private int failedShards; private ShardOperationFailedException[] shardFailures = EMPTY; - protected BroadcastResponse() { + public BroadcastResponse() { } - protected BroadcastResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { + public BroadcastResponse(int totalShards, int successfulShards, int failedShards, List shardFailures) { this.totalShards = totalShards; this.successfulShards = successfulShards; this.failedShards = failedShards; diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java index 93907cf0aa6..37244c71efe 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/ReplicationRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -37,7 +38,7 @@ import static org.elasticsearch.action.ValidateActions.addValidationError; /** * */ -public abstract class ReplicationRequest extends ActionRequest implements IndicesRequest { +public class ReplicationRequest extends ActionRequest implements IndicesRequest { public static final TimeValue DEFAULT_TIMEOUT = new TimeValue(1, TimeUnit.MINUTES); @@ -49,14 +50,14 @@ public abstract class ReplicationRequest extends A private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; private volatile boolean canHaveDuplicates = false; - protected ReplicationRequest() { + public ReplicationRequest() { } /** * Creates a new request that inherits headers and context from the request provided as argument. */ - protected ReplicationRequest(ActionRequest request) { + public ReplicationRequest(ActionRequest request) { super(request); } @@ -133,6 +134,16 @@ public abstract class ReplicationRequest extends A return this.consistencyLevel; } + /** + * @return the shardId of the shard where this operation should be executed on. + * can be null in case the shardId is determined by a single document (index, type, id) for example for index or delete request. + */ + public + @Nullable + ShardId shardId() { + return internalShardId; + } + /** * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} */ @@ -173,4 +184,10 @@ public abstract class ReplicationRequest extends A out.writeString(index); out.writeBoolean(canHaveDuplicates); } + + public T setShardId(ShardId shardId) { + this.internalShardId = shardId; + this.index = shardId.getIndex(); + return (T) this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java new file mode 100644 index 00000000000..42a83630a53 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportBroadcastReplicationAction.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.support.replication; + +import com.carrotsearch.hppc.cursors.IntObjectCursor; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +/** + * Base class for requests that should be executed on all shards of an index or several indices. + * This action sends shard requests to all primary shards of the indices and they are then replicated like write requests + */ +public abstract class TransportBroadcastReplicationAction extends HandledTransportAction { + + private final TransportReplicationAction replicatedBroadcastShardAction; + private final ClusterService clusterService; + + public TransportBroadcastReplicationAction(String name, Class request, Settings settings, ThreadPool threadPool, ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) { + super(settings, name, threadPool, transportService, actionFilters, indexNameExpressionResolver, request); + this.replicatedBroadcastShardAction = replicatedBroadcastShardAction; + this.clusterService = clusterService; + } + + @Override + protected void doExecute(final Request request, final ActionListener listener) { + final ClusterState clusterState = clusterService.state(); + List shards = shards(request, clusterState); + final CopyOnWriteArrayList shardsResponses = new CopyOnWriteArrayList(); + if (shards.size() == 0) { + finishAndNotifyListener(listener, shardsResponses); + } + final CountDown responsesCountDown = new CountDown(shards.size()); + for (final ShardId shardId : shards) { + ActionListener shardActionListener = new ActionListener() { + @Override + public void onResponse(ShardResponse shardResponse) { + shardsResponses.add(shardResponse); + logger.trace("{}: got response from {}", actionName, shardId); + if (responsesCountDown.countDown()) { + finishAndNotifyListener(listener, shardsResponses); + } + } + + @Override + public void onFailure(Throwable e) { + logger.trace("{}: got failure from {}", actionName, shardId); + int totalNumCopies = clusterState.getMetaData().index(shardId.index().getName()).getNumberOfReplicas() + 1; + ShardResponse shardResponse = newShardResponse(); + ActionWriteResponse.ShardInfo.Failure[] failures; + if (ExceptionsHelper.unwrap(e, UnavailableShardsException.class) != null) { + failures = new ActionWriteResponse.ShardInfo.Failure[0]; + } else { + ActionWriteResponse.ShardInfo.Failure failure = new ActionWriteResponse.ShardInfo.Failure(shardId.index().name(), shardId.id(), null, e, ExceptionsHelper.status(e), true); + failures = new ActionWriteResponse.ShardInfo.Failure[totalNumCopies]; + Arrays.fill(failures, failure); + } + shardResponse.setShardInfo(new ActionWriteResponse.ShardInfo(totalNumCopies, 0, failures)); + shardsResponses.add(shardResponse); + if (responsesCountDown.countDown()) { + finishAndNotifyListener(listener, shardsResponses); + } + } + }; + shardExecute(request, shardId, shardActionListener); + } + } + + protected void shardExecute(Request request, ShardId shardId, ActionListener shardActionListener) { + replicatedBroadcastShardAction.execute(newShardRequest(request, shardId), shardActionListener); + } + + /** + * @return all shard ids the request should run on + */ + protected List shards(Request request, ClusterState clusterState) { + List shardIds = new ArrayList<>(); + String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request); + for (String index : concreteIndices) { + IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index); + if (indexMetaData != null) { + for (IntObjectCursor shardRouting : clusterState.getRoutingTable().indicesRouting().get(index).getShards()) { + shardIds.add(shardRouting.value.shardId()); + } + } + } + return shardIds; + } + + protected abstract ShardResponse newShardResponse(); + + protected abstract ShardRequest newShardRequest(Request request, ShardId shardId); + + private void finishAndNotifyListener(ActionListener listener, CopyOnWriteArrayList shardsResponses) { + logger.trace("{}: got all shard responses", actionName); + int successfulShards = 0; + int failedShards = 0; + int totalNumCopies = 0; + List shardFailures = null; + for (int i = 0; i < shardsResponses.size(); i++) { + ActionWriteResponse shardResponse = shardsResponses.get(i); + if (shardResponse == null) { + // non active shard, ignore + } else { + failedShards += shardResponse.getShardInfo().getFailed(); + successfulShards += shardResponse.getShardInfo().getSuccessful(); + totalNumCopies += shardResponse.getShardInfo().getTotal(); + if (shardFailures == null) { + shardFailures = new ArrayList<>(); + } + for (ActionWriteResponse.ShardInfo.Failure failure : shardResponse.getShardInfo().getFailures()) { + shardFailures.add(new DefaultShardOperationFailedException(new BroadcastShardOperationFailedException(new ShardId(failure.index(), failure.shardId()), failure.getCause()))); + } + } + } + listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); + } + + protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures); +} diff --git a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index f6925009189..608575007f4 100644 --- a/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -362,6 +362,7 @@ public abstract class TransportReplicationAction()), new IndexNameExpressionResolver(Settings.EMPTY), null); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + @Test + public void testNotStartedPrimary() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + clusterService.setState(state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Future response = (broadcastReplicationAction.execute(new BroadcastRequest().indices(index))); + for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { + shardRequests.v2().onFailure(new UnavailableShardsException(shardId, "test exception expected")); + } + response.get(); + logger.info("total shards: {}, ", response.get().getTotalShards()); + // we expect no failures here because UnavailableShardsException does not count as failed + assertBroadcastResponse(2, 0, 0, response.get(), null); + } + + @Test + public void testStartedPrimary() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + clusterService.setState(state(index, randomBoolean(), + ShardRoutingState.STARTED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Future response = (broadcastReplicationAction.execute(new BroadcastRequest().indices(index))); + for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { + ActionWriteResponse actionWriteResponse = new ActionWriteResponse(); + actionWriteResponse.setShardInfo(new ActionWriteResponse.ShardInfo(1, 1, new ActionWriteResponse.ShardInfo.Failure[0])); + shardRequests.v2().onResponse(actionWriteResponse); + } + logger.info("total shards: {}, ", response.get().getTotalShards()); + assertBroadcastResponse(1, 1, 0, response.get(), null); + } + + @Test + public void testResultCombine() throws InterruptedException, ExecutionException, IOException { + final String index = "test"; + int numShards = randomInt(3); + clusterService.setState(stateWithAssignedPrimariesAndOneReplica(index, numShards)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + Future response = (broadcastReplicationAction.execute(new BroadcastRequest().indices(index))); + int succeeded = 0; + int failed = 0; + for (Tuple> shardRequests : broadcastReplicationAction.capturedShardRequests) { + if (randomBoolean()) { + ActionWriteResponse.ShardInfo.Failure[] failures = new ActionWriteResponse.ShardInfo.Failure[0]; + int shardsSucceeded = randomInt(1) + 1; + succeeded += shardsSucceeded; + ActionWriteResponse actionWriteResponse = new ActionWriteResponse(); + if (shardsSucceeded == 1 && randomBoolean()) { + //sometimes add failure (no failure means shard unavailable) + failures = new ActionWriteResponse.ShardInfo.Failure[1]; + failures[0] = new ActionWriteResponse.ShardInfo.Failure(index, shardRequests.v1().id(), null, new Exception("pretend shard failed"), RestStatus.GATEWAY_TIMEOUT, false); + failed++; + } + actionWriteResponse.setShardInfo(new ActionWriteResponse.ShardInfo(2, shardsSucceeded, failures)); + shardRequests.v2().onResponse(actionWriteResponse); + } else { + // sometimes fail + failed += 2; + // just add a general exception and see if failed shards will be incremented by 2 + shardRequests.v2().onFailure(new Exception("pretend shard failed")); + } + } + assertBroadcastResponse(2 * numShards, succeeded, failed, response.get(), Exception.class); + } + + @Test + public void testNoShards() throws InterruptedException, ExecutionException, IOException { + clusterService.setState(stateWithNoShard()); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + BroadcastResponse response = executeAndAssertImmediateResponse(broadcastReplicationAction, new BroadcastRequest()); + assertBroadcastResponse(0, 0, 0, response, null); + } + + @Test + public void testShardsList() throws InterruptedException, ExecutionException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + ClusterState clusterState = state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + List shards = broadcastReplicationAction.shards(new BroadcastRequest().indices(shardId.index().name()), clusterState); + assertThat(shards.size(), equalTo(1)); + assertThat(shards.get(0), equalTo(shardId)); + } + + private class TestBroadcastReplicationAction extends TransportBroadcastReplicationAction { + protected final Set>> capturedShardRequests = ConcurrentCollections.newConcurrentSet(); + + public TestBroadcastReplicationAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, TransportReplicationAction replicatedBroadcastShardAction) { + super("test-broadcast-replication-action", BroadcastRequest.class, settings, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, replicatedBroadcastShardAction); + } + + @Override + protected ActionWriteResponse newShardResponse() { + return new ActionWriteResponse(); + } + + @Override + protected ReplicationRequest newShardRequest(BroadcastRequest request, ShardId shardId) { + return new ReplicationRequest().setShardId(shardId); + } + + @Override + protected BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List shardFailures) { + return new BroadcastResponse(totalNumCopies, successfulShards, failedShards, shardFailures); + } + + @Override + protected void shardExecute(BroadcastRequest request, ShardId shardId, ActionListener shardActionListener) { + capturedShardRequests.add(new Tuple<>(shardId, shardActionListener)); + } + + protected void clearCapturedRequests() { + capturedShardRequests.clear(); + } + } + + public FlushResponse assertImmediateResponse(String index, TransportFlushAction flushAction) throws InterruptedException, ExecutionException { + Date beginDate = new Date(); + FlushResponse flushResponse = flushAction.execute(new FlushRequest(index)).get(); + Date endDate = new Date(); + long maxTime = 500; + assertThat("this should not take longer than " + maxTime + " ms. The request hangs somewhere", endDate.getTime() - beginDate.getTime(), lessThanOrEqualTo(maxTime)); + return flushResponse; + } + + @Test + public void testTimeoutFlush() throws ExecutionException, InterruptedException { + + final String index = "test"; + clusterService.setState(state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + TransportShardFlushAction shardFlushAction = new TransportShardFlushAction(Settings.EMPTY, transportService, clusterService, + null, threadPool, null, + null, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY)); + TransportFlushAction flushAction = new TransportFlushAction(Settings.EMPTY, threadPool, clusterService, + transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), + shardFlushAction); + FlushResponse flushResponse = (FlushResponse) executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)); + logger.info("total shards: {}, ", flushResponse.getTotalShards()); + assertBroadcastResponse(2, 0, 0, flushResponse, UnavailableShardsException.class); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(flushAction, new FlushRequest(index)), ClusterBlockException.class); + } + + void assertFailure(String msg, BroadcastResponse broadcastResponse, Class klass) throws InterruptedException { + assertThat(broadcastResponse.getSuccessfulShards(), equalTo(0)); + assertThat(broadcastResponse.getTotalShards(), equalTo(broadcastResponse.getFailedShards())); + for (int i = 0; i < broadcastResponse.getFailedShards(); i++) { + assertThat(msg, broadcastResponse.getShardFailures()[i].getCause().getCause(), instanceOf(klass)); + } + } + + @Test + public void testTimeoutRefresh() throws ExecutionException, InterruptedException { + + final String index = "test"; + clusterService.setState(state(index, randomBoolean(), + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED, ShardRoutingState.UNASSIGNED)); + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + TransportShardRefreshAction shardrefreshAction = new TransportShardRefreshAction(Settings.EMPTY, transportService, clusterService, + null, threadPool, null, + null, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY)); + TransportRefreshAction refreshAction = new TransportRefreshAction(Settings.EMPTY, threadPool, clusterService, + transportService, new ActionFilters(new HashSet()), new IndexNameExpressionResolver(Settings.EMPTY), + shardrefreshAction); + RefreshResponse refreshResponse = (RefreshResponse) executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)); + assertBroadcastResponse(2, 0, 0, refreshResponse, UnavailableShardsException.class); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)), ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertFailure("all shards should fail with cluster block", executeAndAssertImmediateResponse(refreshAction, new RefreshRequest(index)), ClusterBlockException.class); + } + + public BroadcastResponse executeAndAssertImmediateResponse(TransportBroadcastReplicationAction broadcastAction, BroadcastRequest request) throws InterruptedException, ExecutionException { + return (BroadcastResponse) broadcastAction.execute(request).actionGet("5s"); + } + + private void assertBroadcastResponse(int total, int successful, int failed, BroadcastResponse response, Class exceptionClass) { + assertThat(response.getSuccessfulShards(), equalTo(successful)); + assertThat(response.getTotalShards(), equalTo(total)); + assertThat(response.getFailedShards(), equalTo(failed)); + for (int i = 0; i < failed; i++) { + assertThat(response.getShardFailures()[0].getCause().getCause(), instanceOf(exceptionClass)); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java new file mode 100644 index 00000000000..e5143a3ef09 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -0,0 +1,230 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.action.support.replication; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.shard.ShardId; + +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.ESTestCase.randomFrom; +import static org.elasticsearch.test.ESTestCase.randomIntBetween; + +/** + * Helper methods for generating cluster states + */ +public class ClusterStateCreationUtils { + + + /** + * Creates cluster state with and index that has one shard and #(replicaStates) replicas + * + * @param index name of the index + * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param primaryState state of primary + * @param replicaStates states of the replicas. length of this array determines also the number of replicas + */ + public static ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { + final int numberOfReplicas = replicaStates.length; + + int numberOfNodes = numberOfReplicas + 1; + if (primaryState == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + for (ShardRoutingState state : replicaStates) { + if (state == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + } + numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures + final ShardId shardId = new ShardId(index, 0); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set unassignedNodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + unassignedNodes.add(node.id()); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + + String primaryNode = null; + String relocatingNode = null; + UnassignedInfo unassignedInfo = null; + if (primaryState != ShardRoutingState.UNASSIGNED) { + if (primaryLocal) { + primaryNode = newNode(0).id(); + unassignedNodes.remove(primaryNode); + } else { + primaryNode = selectAndRemove(unassignedNodes); + } + if (primaryState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } else { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); + } + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, 0, unassignedInfo)); + + for (ShardRoutingState replicaState : replicaStates) { + String replicaNode = null; + relocatingNode = null; + unassignedInfo = null; + if (replicaState != ShardRoutingState.UNASSIGNED) { + assert primaryNode != null : "a replica is assigned but the primary isn't"; + replicaNode = selectAndRemove(unassignedNodes); + if (replicaState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } else { + unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); + } + indexShardRoutingBuilder.addShard( + TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, 0, unassignedInfo)); + } + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); + return state.build(); + } + + /** + * Creates cluster state with several shards and one replica and all shards STARTED. + */ + public static ClusterState stateWithAssignedPrimariesAndOneReplica(String index, int numberOfShards) { + + int numberOfNodes = 2; // we need a non-local master to test shard failures + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 1) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); + IndexRoutingTable.Builder indexRoutingTableBuilder = IndexRoutingTable.builder(index); + for (int i = 0; i < numberOfShards; i++) { + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + final ShardId shardId = new ShardId(index, i); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(0).id(), null, null, true, ShardRoutingState.STARTED, 0, null)); + indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, i, newNode(1).id(), null, null, false, ShardRoutingState.STARTED, 0, null)); + indexRoutingTableBuilder.addIndexShard(indexShardRoutingBuilder.build()); + } + state.routingTable(RoutingTable.builder().add(indexRoutingTableBuilder)); + return state.build(); + } + + /** + * Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas. + * Primary will be STARTED in cluster state but replicas will be one of UNASSIGNED, INITIALIZING, STARTED or RELOCATING. + * + * @param index name of the index + * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param numberOfReplicas number of replicas + */ + public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { + int assignedReplicas = randomIntBetween(0, numberOfReplicas); + return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); + } + + /** + * Creates cluster state with and index that has one shard and as many replicas as numberOfReplicas. + * Primary will be STARTED in cluster state. Some (unassignedReplicas) will be UNASSIGNED and + * some (assignedReplicas) will be one of INITIALIZING, STARTED or RELOCATING. + * + * @param index name of the index + * @param primaryLocal if primary should coincide with the local node in the cluster state + * @param assignedReplicas number of replicas that should have INITIALIZING, STARTED or RELOCATING state + * @param unassignedReplicas number of replicas that should be unassigned + */ + public static ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { + ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; + // no point in randomizing - node assignment later on does it too. + for (int i = 0; i < assignedReplicas; i++) { + replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); + } + for (int i = assignedReplicas; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.UNASSIGNED; + } + return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); + } + + /** + * Creates a cluster state with no index + */ + public static ClusterState stateWithNoShard() { + int numberOfNodes = 2; + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set unassignedNodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + unassignedNodes.add(node.id()); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().generateClusterUuidIfNeeded()); + state.routingTable(RoutingTable.builder()); + return state.build(); + } + + private static DiscoveryNode newNode(int nodeId) { + return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); + } + + static private String selectAndRemove(Set strings) { + String selection = randomFrom(strings.toArray(new String[strings.size()])); + strings.remove(selection); + return selection; + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java index d7fb2ddd54d..0b9e254b0ad 100644 --- a/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java +++ b/core/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationTests.java @@ -77,6 +77,8 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.elasticsearch.action.support.replication.ClusterStateCreationUtils.stateWithStartedPrimary; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.hamcrest.Matchers.*; @@ -98,6 +100,7 @@ public class ShardReplicationTests extends ESTestCase { threadPool = new ThreadPool("ShardReplicationTests"); } + @Override @Before public void setUp() throws Exception { super.setUp(); @@ -161,103 +164,6 @@ public class ShardReplicationTests extends ESTestCase { assertEquals(1, count.get()); } - ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { - int assignedReplicas = randomIntBetween(0, numberOfReplicas); - return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); - } - - ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { - ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; - // no point in randomizing - node assignment later on does it too. - for (int i = 0; i < assignedReplicas; i++) { - replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); - } - for (int i = assignedReplicas; i < replicaStates.length; i++) { - replicaStates[i] = ShardRoutingState.UNASSIGNED; - } - return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); - } - - ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { - final int numberOfReplicas = replicaStates.length; - - int numberOfNodes = numberOfReplicas + 1; - if (primaryState == ShardRoutingState.RELOCATING) { - numberOfNodes++; - } - for (ShardRoutingState state : replicaStates) { - if (state == ShardRoutingState.RELOCATING) { - numberOfNodes++; - } - } - numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures - final ShardId shardId = new ShardId(index, 0); - DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); - Set unassignedNodes = new HashSet<>(); - for (int i = 0; i < numberOfNodes + 1; i++) { - final DiscoveryNode node = newNode(i); - discoBuilder = discoBuilder.put(node); - unassignedNodes.add(node.id()); - } - discoBuilder.localNodeId(newNode(0).id()); - discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures - IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(Settings.builder() - .put(SETTING_VERSION_CREATED, Version.CURRENT) - .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) - .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); - - RoutingTable.Builder routing = new RoutingTable.Builder(); - routing.addAsNew(indexMetaData); - IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId); - - String primaryNode = null; - String relocatingNode = null; - UnassignedInfo unassignedInfo = null; - if (primaryState != ShardRoutingState.UNASSIGNED) { - if (primaryLocal) { - primaryNode = newNode(0).id(); - unassignedNodes.remove(primaryNode); - } else { - primaryNode = selectAndRemove(unassignedNodes); - } - if (primaryState == ShardRoutingState.RELOCATING) { - relocatingNode = selectAndRemove(unassignedNodes); - } - } else { - unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); - } - indexShardRoutingBuilder.addShard(TestShardRouting.newShardRouting(index, 0, primaryNode, relocatingNode, null, true, primaryState, 0, unassignedInfo)); - - for (ShardRoutingState replicaState : replicaStates) { - String replicaNode = null; - relocatingNode = null; - unassignedInfo = null; - if (replicaState != ShardRoutingState.UNASSIGNED) { - assert primaryNode != null : "a replica is assigned but the primary isn't"; - replicaNode = selectAndRemove(unassignedNodes); - if (replicaState == ShardRoutingState.RELOCATING) { - relocatingNode = selectAndRemove(unassignedNodes); - } - } else { - unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null); - } - indexShardRoutingBuilder.addShard( - TestShardRouting.newShardRouting(index, shardId.id(), replicaNode, relocatingNode, null, false, replicaState, 0, unassignedInfo)); - } - - ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); - state.nodes(discoBuilder); - state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded()); - state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); - return state.build(); - } - - private String selectAndRemove(Set strings) { - String selection = randomFrom(strings.toArray(new String[strings.size()])); - strings.remove(selection); - return selection; - } - @Test public void testNotStartedPrimary() throws InterruptedException, ExecutionException { final String index = "test"; @@ -527,6 +433,7 @@ public class ShardReplicationTests extends ESTestCase { action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final TransportReplicationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); Thread t = new Thread() { + @Override public void run() { primaryPhase.run(); } @@ -587,6 +494,7 @@ public class ShardReplicationTests extends ESTestCase { action = new ActionWithDelay(Settings.EMPTY, "testActionWithExceptions", transportService, clusterService, threadPool); final Action.ReplicaOperationTransportHandler replicaOperationTransportHandler = action.new ReplicaOperationTransportHandler(); Thread t = new Thread() { + @Override public void run() { try { replicaOperationTransportHandler.messageReceived(new Request(), createTransportChannel()); @@ -746,10 +654,6 @@ public class ShardReplicationTests extends ESTestCase { } } - static DiscoveryNode newNode(int nodeId) { - return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); - } - /* * Throws exceptions when executed. Used for testing if the counter is correctly decremented in case an operation fails. * */ diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index a287bcb4f54..62f9a84d5f9 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -432,12 +432,12 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { * state processing when a recover starts and only unblocking it shortly after the node receives * the ShardActiveRequest. */ - static class ReclocationStartEndTracer extends MockTransportService.Tracer { + public static class ReclocationStartEndTracer extends MockTransportService.Tracer { private final ESLogger logger; private final CountDownLatch beginRelocationLatch; private final CountDownLatch receivedShardExistsRequestLatch; - ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) { + public ReclocationStartEndTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch receivedShardExistsRequestLatch) { this.logger = logger; this.beginRelocationLatch = beginRelocationLatch; this.receivedShardExistsRequestLatch = receivedShardExistsRequestLatch; diff --git a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java index 6a55fbd2577..162dff76bf8 100644 --- a/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java +++ b/core/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -55,6 +56,7 @@ public class TestClusterService implements ClusterService { private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); private final ThreadPool threadPool; private final ESLogger logger = Loggers.getLogger(getClass(), Settings.EMPTY); + private final OperationRouting operationRouting = new OperationRouting(Settings.Builder.EMPTY_SETTINGS, new AwarenessAllocationDecider()); public TestClusterService() { this(ClusterState.builder(new ClusterName("test")).build()); @@ -129,7 +131,7 @@ public class TestClusterService implements ClusterService { @Override public OperationRouting operationRouting() { - return null; + return operationRouting; } @Override diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 8678f20ac12..497322ac7ea 100644 --- a/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -67,6 +67,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.client.http.HttpResponse; +import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.Assert; @@ -126,6 +127,22 @@ public class ElasticsearchAssertions { assertBlocked(builder, null); } + /** + * Checks that all shard requests of a replicated brodcast request failed due to a cluster block + * + * @param replicatedBroadcastResponse the response that should only contain failed shard responses + * + * */ + public static void assertBlocked(BroadcastResponse replicatedBroadcastResponse) { + assertThat("all shard requests should have failed", replicatedBroadcastResponse.getFailedShards(), Matchers.equalTo(replicatedBroadcastResponse.getTotalShards())); + for (ShardOperationFailedException exception : replicatedBroadcastResponse.getShardFailures()) { + ClusterBlockException clusterBlockException = (ClusterBlockException) ExceptionsHelper.unwrap(exception.getCause(), ClusterBlockException.class); + assertNotNull("expected the cause of failure to be a ClusterBlockException but got " + exception.getCause().getMessage(), clusterBlockException); + assertThat(clusterBlockException.blocks().size(), greaterThan(0)); + assertThat(clusterBlockException.status(), CoreMatchers.equalTo(RestStatus.FORBIDDEN)); + } + } + /** * Executes the request and fails if the request has not been blocked by a specific {@link ClusterBlock}. * From da16dcf52710e282529a0ca198449ecda8bf636c Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 31 Aug 2015 13:59:00 -0400 Subject: [PATCH 23/51] [docs] Fix docs for position_increment_gap Closes #13207 --- .../params/position-increment-gap.asciidoc | 35 +++++++++++++------ 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/docs/reference/mapping/params/position-increment-gap.asciidoc b/docs/reference/mapping/params/position-increment-gap.asciidoc index 918e3d493a5..16a963c439e 100644 --- a/docs/reference/mapping/params/position-increment-gap.asciidoc +++ b/docs/reference/mapping/params/position-increment-gap.asciidoc @@ -4,12 +4,12 @@ <> string fields take term <> into account, in order to be able to support <>. -When indexing an array of strings, each string of the array is indexed -directly after the previous one, almost as though all the strings in the array -had been concatenated into one big string. +When indexing string fields with multiple values a "fake" gap is added between +the values to prevent most phrase queries from matching across the values. The +size of this gap is configured using `position_increment_gap` and defaults to +`100`. -This can result in matches from phrase queries spanning two array elements. -For instance: +For example: [source,js] -------------------------------------------------- @@ -26,11 +26,24 @@ GET /my_index/groups/_search } } } + +GET /my_index/groups/_search +{ + "query": { + "match_phrase": { + "names": "Abraham Lincoln", + "slop": 101 <2> + } + } +} -------------------------------------------------- // AUTOSENSE -<1> This phrase query matches our document, even though `Abraham` and `Lincoln` are in separate strings. +<1> This phrase query doesn't match our document which is totally expected. +<2> This phrase query matches our document, even though `Abraham` and `Lincoln` + are in separate strings, because `slop` > `position_increment_gap`. -The `position_increment_gap` can introduce a fake gap between each array element. For instance: + +The `position_increment_gap` can be specified in the mapping. For instance: [source,js] -------------------------------------------------- @@ -41,7 +54,7 @@ PUT my_index "properties": { "names": { "type": "string", - "position_increment_gap": 50 <1> + "position_increment_gap": 0 <1> } } } @@ -63,11 +76,11 @@ GET /my_index/groups/_search } -------------------------------------------------- // AUTOSENSE -<1> The first term in the next array element will be 50 terms apart from the +<1> The first term in the next array element will be 0 terms apart from the last term in the previous array element. -<2> The phrase query no longer matches our document. +<2> The phrase query matches our document which is weird, but its what we asked + for in the mapping. TIP: The `position_increment_gap` setting is allowed to have different settings for fields of the same name in the same index. Its value can be updated on existing fields using the <>. - From b0af7a14266761d222516bae56c05ec42e3d85b7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 31 Aug 2015 14:29:00 -0400 Subject: [PATCH 24/51] Fix NettyTransport --- .../transport/netty/NettyTransport.java | 63 ++++++++++++++----- 1 file changed, 48 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 5fb5a264829..83b6acfde18 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -22,7 +22,6 @@ package org.elasticsearch.transport.netty; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -57,13 +56,31 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BindTransportException; +import org.elasticsearch.transport.BytesTransportRequest; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.NodeNotConnectedException; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportServiceAdapter; import org.elasticsearch.transport.support.TransportStatus; import org.jboss.netty.bootstrap.ClientBootstrap; import org.jboss.netty.bootstrap.ServerBootstrap; import org.jboss.netty.buffer.ChannelBuffer; import org.jboss.netty.buffer.ChannelBuffers; -import org.jboss.netty.channel.*; +import org.jboss.netty.channel.AdaptiveReceiveBufferSizePredictorFactory; +import org.jboss.netty.channel.Channel; +import org.jboss.netty.channel.ChannelFuture; +import org.jboss.netty.channel.ChannelFutureListener; +import org.jboss.netty.channel.ChannelHandlerContext; +import org.jboss.netty.channel.ChannelPipeline; +import org.jboss.netty.channel.ChannelPipelineFactory; +import org.jboss.netty.channel.Channels; +import org.jboss.netty.channel.ExceptionEvent; +import org.jboss.netty.channel.FixedReceiveBufferSizePredictorFactory; +import org.jboss.netty.channel.ReceiveBufferSizePredictorFactory; import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory; import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory; import org.jboss.netty.channel.socket.nio.NioWorkerPool; @@ -77,8 +94,20 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; import java.net.UnknownHostException; import java.nio.channels.CancelledKeyException; -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReadWriteLock; @@ -946,15 +975,19 @@ public class NettyTransport extends AbstractLifecycleComponent implem } } catch (RuntimeException e) { // clean the futures - for (ChannelFuture[] futures : Arrays.asList(connectRecovery, connectBulk, connectReg, connectState, connectPing)) { - for (ChannelFuture future : futures) { - future.cancel(); - if (future.getChannel() != null && future.getChannel().isOpen()) { - try { - future.getChannel().close(); - } catch (Exception e1) { - // ignore - } + List futures = new ArrayList<>(); + futures.addAll(Arrays.asList(connectRecovery)); + futures.addAll(Arrays.asList(connectBulk)); + futures.addAll(Arrays.asList(connectReg)); + futures.addAll(Arrays.asList(connectState)); + futures.addAll(Arrays.asList(connectPing)); + for (ChannelFuture future : Collections.unmodifiableList(futures)) { + future.cancel(); + if (future.getChannel() != null && future.getChannel().isOpen()) { + try { + future.getChannel().close(); + } catch (Exception e1) { + // ignore } } } @@ -1158,7 +1191,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem newAllChannels.addAll(Arrays.asList(reg)); newAllChannels.addAll(Arrays.asList(state)); newAllChannels.addAll(Arrays.asList(ping)); - this.allChannels = Collections.unmodifiableList(allChannels ); + this.allChannels = Collections.unmodifiableList(newAllChannels); } public boolean hasChannel(Channel channel) { From a8bace9f977f3700c294efc34ca9a5f61517b4f4 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 31 Aug 2015 14:35:23 -0400 Subject: [PATCH 25/51] Remove and forbid final uses of ImmutableList --- .../java/org/elasticsearch/cluster/metadata/MetaData.java | 1 - .../org/elasticsearch/transport/netty/NettyTransport.java | 1 + .../string/StringFieldMapperPositionIncrementGapTests.java | 5 ++--- dev-tools/src/main/resources/forbidden/core-signatures.txt | 1 + .../test/java/org/elasticsearch/shaded/test/ShadedIT.java | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 28fb93d95a4..ef4b451de80 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -24,7 +24,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.Collections2; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.google.common.collect.UnmodifiableIterator; import org.apache.lucene.util.CollectionUtil; diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 83b6acfde18..3103d3c86f3 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty; import com.google.common.base.Charsets; +import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.elasticsearch.ExceptionsHelper; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java index 61a9c3ac9bd..c0e957e522f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/StringFieldMapperPositionIncrementGapTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.mapper.string; -import com.google.common.collect.ImmutableList; - import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.client.Client; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -29,6 +27,7 @@ import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; +import java.util.Arrays; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -140,7 +139,7 @@ public class StringFieldMapperPositionIncrementGapTests extends ESSingleNodeTest } private static void testGap(Client client, String indexName, String type, int positionIncrementGap) throws IOException { - client.prepareIndex(indexName, type, "position_gap_test").setSource("string", ImmutableList.of("one", "two three")).setRefresh(true).get(); + client.prepareIndex(indexName, type, "position_gap_test").setSource("string", Arrays.asList("one", "two three")).setRefresh(true).get(); // Baseline - phrase query finds matches in the same field value assertHitCount(client.prepareSearch(indexName).setQuery(matchPhraseQuery("string", "two three")).get(), 1); diff --git a/dev-tools/src/main/resources/forbidden/core-signatures.txt b/dev-tools/src/main/resources/forbidden/core-signatures.txt index d95b793d1ef..7876306bb8f 100644 --- a/dev-tools/src/main/resources/forbidden/core-signatures.txt +++ b/dev-tools/src/main/resources/forbidden/core-signatures.txt @@ -87,3 +87,4 @@ org.elasticsearch.common.io.PathUtils#get(java.net.URI) @defaultMessage avoid adding additional dependencies on Guava com.google.common.collect.Lists +com.google.common.collect.ImmutableList diff --git a/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java b/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java index 0befbf440fa..9e8f022e715 100644 --- a/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java +++ b/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java @@ -55,14 +55,14 @@ public class ShadedIT extends LuceneTestCase { @Test public void testLoadShadedClasses() throws ClassNotFoundException { - Class.forName("org.elasticsearch.common.collect.ImmutableList"); + Class.forName("org.elasticsearch.common.cache.LoadingCache"); Class.forName("org.elasticsearch.common.joda.time.DateTime"); Class.forName("org.elasticsearch.common.util.concurrent.jsr166e.LongAdder"); } @Test(expected = ClassNotFoundException.class) public void testGuavaIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("com.google.common.collect.ImmutableList"); + Class.forName("com.google.common.cache.LoadingCache"); } @Test(expected = ClassNotFoundException.class) From 30ffa9a61b4b69895806cea36ca9f5a19778a5d3 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Mon, 31 Aug 2015 15:38:18 +0200 Subject: [PATCH 26/51] test: Allow tests to override whether mock modules are used --- .../elasticsearch/test/ESIntegTestCase.java | 18 ++++++++++++-- .../test/InternalTestCluster.java | 24 +++++++------------ .../junit/listeners/ReproduceInfoPrinter.java | 4 ++-- .../test/test/InternalTestClusterTests.java | 8 +++---- .../java/org/elasticsearch/tribe/TribeIT.java | 2 +- 5 files changed, 31 insertions(+), 25 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java index 9bb1cfd0cfb..35dd8cabecb 100644 --- a/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESIntegTestCase.java @@ -218,7 +218,7 @@ import static org.hamcrest.Matchers.*; * This class supports the following system properties (passed with -Dkey=value to the application) *
    *
  • -D{@value #TESTS_CLIENT_RATIO} - a double value in the interval [0..1] which defines the ration between node and transport clients used
  • - *
  • -D{@value InternalTestCluster#TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is + *
  • -D{@value #TESTS_ENABLE_MOCK_MODULES} - a boolean value to enable or disable mock modules. This is * useful to test the system without asserting modules that to make sure they don't hide any bugs in production.
  • *
  • - a random seed used to initialize the index random context. *
@@ -268,6 +268,15 @@ public abstract class ESIntegTestCase extends ESTestCase { */ public static final String SETTING_INDEX_SEED = "index.tests.seed"; + /** + * A boolean value to enable or disable mock modules. This is useful to test the + * system without asserting modules that to make sure they don't hide any bugs in + * production. + * + * @see ESIntegTestCase + */ + public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; + /** * Threshold at which indexing switches from frequently async to frequently bulk. */ @@ -1806,9 +1815,14 @@ public abstract class ESIntegTestCase extends ESTestCase { nodeMode = "local"; } + boolean enableMockModules = enableMockModules(); return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes, InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(), - InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix); + InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, enableMockModules); + } + + protected boolean enableMockModules() { + return RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); } /** diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index c6e81f5f80d..f54197b039a 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -153,15 +153,6 @@ public final class InternalTestCluster extends TestCluster { static NodeConfigurationSource DEFAULT_SETTINGS_SOURCE = NodeConfigurationSource.EMPTY; - /** - * A boolean value to enable or disable mock modules. This is useful to test the - * system without asserting modules that to make sure they don't hide any bugs in - * production. - * - * @see ESIntegTestCase - */ - public static final String TESTS_ENABLE_MOCK_MODULES = "tests.enable_mock_modules"; - /** * A node level setting that holds a per node random seed that is consistent across node restarts */ @@ -192,8 +183,6 @@ public final class InternalTestCluster extends TestCluster { public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET; - private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); - static final int DEFAULT_MIN_NUM_DATA_NODES = 1; static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; @@ -229,6 +218,8 @@ public final class InternalTestCluster extends TestCluster { private final ExecutorService executor; + private final boolean enableMockModules; + /** * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number */ @@ -240,7 +231,7 @@ public final class InternalTestCluster extends TestCluster { public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir, int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes, - boolean enableHttpPipelining, String nodePrefix) { + boolean enableHttpPipelining, String nodePrefix, boolean enableMockModules) { super(clusterSeed); if ("network".equals(nodeMode) == false && "local".equals(nodeMode) == false) { throw new IllegalArgumentException("Unknown nodeMode: " + nodeMode); @@ -276,6 +267,7 @@ public final class InternalTestCluster extends TestCluster { this.nodePrefix = nodePrefix; assert nodePrefix != null; + this.enableMockModules = enableMockModules; /* * TODO @@ -387,15 +379,15 @@ public final class InternalTestCluster extends TestCluster { private Collection> getPlugins(long seed) { Set> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins()); Random random = new Random(seed); - if (ENABLE_MOCK_MODULES && usually(random)) { + if (enableMockModules && usually(random)) { plugins.add(MockTransportService.TestPlugin.class); plugins.add(MockFSIndexStore.TestPlugin.class); plugins.add(NodeMocksPlugin.class); plugins.add(MockEngineFactoryPlugin.class); plugins.add(MockSearchService.TestPlugin.class); - } - if (isLocalTransportConfigured()) { - plugins.add(AssertingLocalTransport.TestPlugin.class); + if (isLocalTransportConfigured()) { + plugins.add(AssertingLocalTransport.TestPlugin.class); + } } return plugins; } diff --git a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index 07f09cc2386..0a52a376610 100644 --- a/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/core/src/test/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -25,8 +25,8 @@ import com.carrotsearch.randomizedtesting.TraceFormatting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.InternalTestCluster; import org.junit.internal.AssumptionViolatedException; import org.junit.runner.Description; import org.junit.runner.notification.Failure; @@ -158,7 +158,7 @@ public class ReproduceInfoPrinter extends RunListener { appendProperties("es.logger.level"); if (inVerifyPhase()) { // these properties only make sense for integration tests - appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, InternalTestCluster.TESTS_ENABLE_MOCK_MODULES); + appendProperties("es.node.mode", "es.node.local", TESTS_CLUSTER, ESIntegTestCase.TESTS_ENABLE_MOCK_MODULES); } appendProperties("tests.assertion.disabled", "tests.security.manager", "tests.nightly", "tests.jvms", "tests.client.ratio", "tests.heap.size", "tests.bwc", "tests.bwc.version"); diff --git a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java index 2770e3581d2..dcdec27ae33 100644 --- a/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java +++ b/core/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java @@ -53,8 +53,8 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way assertClusters(cluster0, cluster1, false); @@ -111,8 +111,8 @@ public class InternalTestClusterTests extends ESTestCase { String nodePrefix = "foobar"; Path baseDir = createTempDir(); - InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); - InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix); + InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); + InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true); assertClusters(cluster0, cluster1, false); long seed = randomLong(); diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index a8478aef051..1f8b7f165a5 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -76,7 +76,7 @@ public class TribeIT extends ESIntegTestCase { public static void setupSecondCluster() throws Exception { ESIntegTestCase.beforeClass(); cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, - Strings.randomBase64UUID(getRandom()), NodeConfigurationSource.EMPTY, 0, false, SECOND_CLUSTER_NODE_PREFIX); + Strings.randomBase64UUID(getRandom()), NodeConfigurationSource.EMPTY, 0, false, SECOND_CLUSTER_NODE_PREFIX, true); cluster2.beforeTest(getRandom(), 0.1); cluster2.ensureAtLeastNumDataNodes(2); From a58c5dba89017746b2de0d54c345e2489e5a251e Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Sep 2015 00:34:34 -0400 Subject: [PATCH 27/51] Add missing null check in ESPolicy. This allows reducing privileges with doPrivileged to work, otherwise it will fail with NPE. In general, if some code wants to do that, let it. The null check is needed, even though ProtectionDomain(CodeSource, PermissionCollection) is more than a bit misleading: "the current Policy will not be consulted". Additionally add a defensive check for location, since the docs there are even more confusing: https://bugs.openjdk.java.net/browse/JDK-8129972 The jdk policy impl has both these checks. --- .../org/elasticsearch/bootstrap/ESPolicy.java | 21 +++- .../bootstrap/ESPolicyTests.java | 97 +++++++++++++++++++ 2 files changed, 114 insertions(+), 4 deletions(-) create mode 100644 core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java index 08f8ba4cfe6..4f204966875 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ESPolicy.java @@ -22,6 +22,8 @@ package org.elasticsearch.bootstrap; import org.elasticsearch.common.SuppressForbidden; import java.net.URI; +import java.net.URL; +import java.security.CodeSource; import java.security.Permission; import java.security.PermissionCollection; import java.security.Policy; @@ -44,11 +46,22 @@ final class ESPolicy extends Policy { } @Override @SuppressForbidden(reason = "fast equals check is desired") - public boolean implies(ProtectionDomain domain, Permission permission) { - // run groovy scripts with no permissions - if ("/groovy/script".equals(domain.getCodeSource().getLocation().getFile())) { - return false; + public boolean implies(ProtectionDomain domain, Permission permission) { + CodeSource codeSource = domain.getCodeSource(); + // codesource can be null when reducing privileges via doPrivileged() + if (codeSource != null) { + URL location = codeSource.getLocation(); + // location can be null... ??? nobody knows + // https://bugs.openjdk.java.net/browse/JDK-8129972 + if (location != null) { + // run groovy scripts with no permissions + if ("/groovy/script".equals(location.getFile())) { + return false; + } + } } + + // otherwise defer to template + dynamic file permissions return template.implies(domain, permission) || dynamic.implies(permission); } } diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java new file mode 100644 index 00000000000..5423e68b555 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/bootstrap/ESPolicyTests.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.test.ESTestCase; + +import java.io.FilePermission; +import java.security.AccessControlContext; +import java.security.AccessController; +import java.security.CodeSource; +import java.security.PermissionCollection; +import java.security.Permissions; +import java.security.PrivilegedAction; +import java.security.ProtectionDomain; +import java.security.cert.Certificate; + +/** + * Tests for ESPolicy + *

+ * Most unit tests won't run under security manager, since we don't allow + * access to the policy (you cannot construct it) + */ +public class ESPolicyTests extends ESTestCase { + + /** + * Test policy with null codesource. + *

+ * This can happen when restricting privileges with doPrivileged, + * even though ProtectionDomain's ctor javadocs might make you think + * that the policy won't be consulted. + */ + public void testNullCodeSource() throws Exception { + assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); + PermissionCollection noPermissions = new Permissions(); + ESPolicy policy = new ESPolicy(noPermissions); + assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission("foo", "read"))); + } + + /** + * test with null location + *

+ * its unclear when/if this happens, see https://bugs.openjdk.java.net/browse/JDK-8129972 + */ + public void testNullLocation() throws Exception { + assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); + PermissionCollection noPermissions = new Permissions(); + ESPolicy policy = new ESPolicy(noPermissions); + assertFalse(policy.implies(new ProtectionDomain(new CodeSource(null, (Certificate[])null), noPermissions), new FilePermission("foo", "read"))); + } + + /** + * test restricting privileges to no permissions actually works + */ + public void testRestrictPrivileges() { + assumeTrue("test requires security manager", System.getSecurityManager() != null); + try { + System.getProperty("user.home"); + } catch (SecurityException e) { + fail("this test needs to be fixed: user.home not available by policy"); + } + + PermissionCollection noPermissions = new Permissions(); + AccessControlContext noPermissionsAcc = new AccessControlContext( + new ProtectionDomain[] { + new ProtectionDomain(null, noPermissions) + } + ); + try { + AccessController.doPrivileged(new PrivilegedAction() { + public Void run() { + System.getProperty("user.home"); + fail("access should have been denied"); + return null; + } + }, noPermissionsAcc); + } catch (SecurityException expected) { + // expected exception + } + } +} From f46e66e7d09275ba3009e41f5cf20b7acd5de40e Mon Sep 17 00:00:00 2001 From: xuzha Date: Thu, 27 Aug 2015 21:07:56 -0700 Subject: [PATCH 28/51] Remove the experimental indices.fielddata.cache.expire closes #10781 --- .../indices/fielddata/cache/IndicesFieldDataCache.java | 8 ++------ .../java/org/elasticsearch/test/InternalTestCluster.java | 3 --- docs/reference/migration/migrate_2_1.asciidoc | 7 +++++++ docs/reference/modules/indices/fielddata.asciidoc | 6 ------ 4 files changed, 9 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index 4e764654603..1bf020315c5 100644 --- a/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/core/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -54,7 +54,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL public static final String FIELDDATA_CLEAN_INTERVAL_SETTING = "indices.fielddata.cache.cleanup_interval"; public static final String FIELDDATA_CACHE_CONCURRENCY_LEVEL = "indices.fielddata.cache.concurrency_level"; public static final String INDICES_FIELDDATA_CACHE_SIZE_KEY = "indices.fielddata.cache.size"; - public static final String INDICES_FIELDDATA_CACHE_EXPIRE_KEY = "indices.fielddata.cache.expire"; private final IndicesFieldDataCacheListener indicesFieldDataCacheListener; @@ -70,7 +69,6 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL this.indicesFieldDataCacheListener = indicesFieldDataCacheListener; final String size = settings.get(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1"); final long sizeInBytes = settings.getAsMemory(INDICES_FIELDDATA_CACHE_SIZE_KEY, "-1").bytes(); - final TimeValue expire = settings.getAsTime(INDICES_FIELDDATA_CACHE_EXPIRE_KEY, null); CacheBuilder cacheBuilder = CacheBuilder.newBuilder() .removalListener(this); if (sizeInBytes > 0) { @@ -82,10 +80,8 @@ public class IndicesFieldDataCache extends AbstractComponent implements RemovalL throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } cacheBuilder.concurrencyLevel(concurrencyLevel); - if (expire != null && expire.millis() > 0) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); - } - logger.debug("using size [{}] [{}], expire [{}]", size, new ByteSizeValue(sizeInBytes), expire); + + logger.debug("using size [{}] [{}]", size, new ByteSizeValue(sizeInBytes)); cache = cacheBuilder.build(); this.cleanInterval = settings.getAsTime(FIELDDATA_CLEAN_INTERVAL_SETTING, TimeValue.timeValueMinutes(1)); diff --git a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java index f54197b039a..07aef2125d4 100644 --- a/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/core/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -432,9 +432,6 @@ public final class InternalTestCluster extends TestCluster { if (random.nextBoolean()) { builder.put("indices.fielddata.cache.size", 1 + random.nextInt(1000), ByteSizeUnit.MB); } - if (random.nextBoolean()) { - builder.put("indices.fielddata.cache.expire", TimeValue.timeValueMillis(1 + random.nextInt(10000))); - } } // randomize netty settings diff --git a/docs/reference/migration/migrate_2_1.asciidoc b/docs/reference/migration/migrate_2_1.asciidoc index 63092d9250e..a530fc1193d 100644 --- a/docs/reference/migration/migrate_2_1.asciidoc +++ b/docs/reference/migration/migrate_2_1.asciidoc @@ -35,3 +35,10 @@ We've switched the default value of the `detect_noop` option from `false` to source unless you explicitly set `"detect_noop": false`. `detect_noop` was always computationally cheap compared to the expense of the update which can be thought of as a delete operation followed by an index operation. + +=== Removed features + +==== `indices.fielddata.cache.expire` + +The experimental feature `indices.fielddata.cache.expire` has been removed. +For indices that have this setting configured, this config will be ignored. \ No newline at end of file diff --git a/docs/reference/modules/indices/fielddata.asciidoc b/docs/reference/modules/indices/fielddata.asciidoc index eda1ff48e79..e8c8a8d2c49 100644 --- a/docs/reference/modules/indices/fielddata.asciidoc +++ b/docs/reference/modules/indices/fielddata.asciidoc @@ -18,12 +18,6 @@ and perform poorly. absolute value, eg `12GB`. Defaults to unbounded. Also see <>. -`indices.fielddata.cache.expire`:: - - experimental[] A time based setting that expires field data after a - certain time of inactivity. Defaults to `-1`. For example, can be set to - `5m` for a 5 minute expiry. - NOTE: These are static settings which must be configured on every data node in the cluster. From 7571276b84a4deeb30614129e830da2399841100 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 1 Sep 2015 10:35:41 +0200 Subject: [PATCH 29/51] Pass in relevant disk usage map for early termination --- .../decider/DiskThresholdDecider.java | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 8b8652a067a..b7ee93e5e23 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -335,15 +335,16 @@ public class DiskThresholdDecider extends AllocationDecider { @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { - final Decision decision = earlyTerminate(allocation); + ClusterInfo clusterInfo = allocation.clusterInfo(); + Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); + final Decision decision = earlyTerminate(allocation, usages); if (decision != null) { return decision; } final double usedDiskThresholdLow = 100.0 - DiskThresholdDecider.this.freeDiskThresholdLow; final double usedDiskThresholdHigh = 100.0 - DiskThresholdDecider.this.freeDiskThresholdHigh; - ClusterInfo clusterInfo = allocation.clusterInfo(); - Map usages = clusterInfo.getNodeMostAvailableDiskUsages(); + DiskUsage usage = getDiskUsage(node, allocation, usages); // First, check that the node currently over the low watermark double freeDiskPercentage = usage.getFreeDiskAsPercentage(); @@ -449,12 +450,13 @@ public class DiskThresholdDecider extends AllocationDecider { if (shardRouting.currentNodeId().equals(node.nodeId()) == false) { throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]"); } - final Decision decision = earlyTerminate(allocation); + final ClusterInfo clusterInfo = allocation.clusterInfo(); + final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); + final Decision decision = earlyTerminate(allocation, usages); if (decision != null) { return decision; } - final ClusterInfo clusterInfo = allocation.clusterInfo(); - final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); + final DiskUsage usage = getDiskUsage(node, allocation, usages); final String dataPath = clusterInfo.getDataPath(shardRouting); // If this node is already above the high threshold, the shard cannot remain (get it off!) @@ -590,7 +592,7 @@ public class DiskThresholdDecider extends AllocationDecider { } } - private Decision earlyTerminate(RoutingAllocation allocation) { + private Decision earlyTerminate(RoutingAllocation allocation, final Map usages) { // Always allow allocation if the decider is disabled if (!enabled) { return allocation.decision(Decision.YES, NAME, "disk threshold decider disabled"); @@ -613,7 +615,6 @@ public class DiskThresholdDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "cluster info unavailable"); } - final Map usages = clusterInfo.getNodeLeastAvailableDiskUsages(); // Fail open if there are no disk usages available if (usages.isEmpty()) { if (logger.isTraceEnabled()) { From 1ee6ea92479698e06b67ba498781efd7f951e4cc Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 1 Sep 2015 11:49:26 +0200 Subject: [PATCH 30/51] Docs: index.codec is static, not dynamic The `index.codec` setting can only be set on a closed index, not dynamically --- docs/reference/index-modules.asciidoc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index 7d2a5ab03db..3a1c2de1385 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -68,6 +68,12 @@ corruption is detected, it will prevent the shard from being opened. Accepts: Checking shards may take a lot of time on large indices. -- +[[index-codec]] `index.codec`:: + + experimental[] The `default` value compresses stored data with LZ4 + compression, but this can be set to `best_compression` for a higher + compression ratio, at the expense of slower stored fields performance. + [float] [[dynamic-index-settings]] === Dynamic index settings @@ -92,12 +98,6 @@ specific index module: index visible to search. Defaults to `1s`. Can be set to `-1` to disable refresh. -[[index-codec]] `index.codec`:: - - experimental[] The `default` value compresses stored data with LZ4 - compression, but this can be set to `best_compression` for a higher - compression ratio, at the expense of slower stored fields performance. - `index.blocks.read_only`:: Set to `true` to make the index and index metadata read only, `false` to From 8cd86a615a66dede1c55ea7f0e3ce822e33f1c86 Mon Sep 17 00:00:00 2001 From: Isabel Drost-Fromm Date: Thu, 23 Jul 2015 10:50:35 +0200 Subject: [PATCH 31/51] Adds template support to _msearch resource Much like we already do with search this adds templating support to the _msearch resource. Closes #10885 --- .../action/search/MultiSearchRequest.java | 26 ++++++-------- .../action/search/RestMultiSearchAction.java | 15 +++++++- .../search/MultiSearchRequestTests.java | 35 ++++++++++++++----- .../action/search/simple-msearch1.json | 0 .../action/search/simple-msearch2.json | 0 .../action/search/simple-msearch3.json | 0 .../action/search/simple-msearch4.json | 0 .../action/search/simple-msearch5.json | 6 ++++ .../rest-api-spec/test/msearch/10_basic.yaml | 8 ++++- 9 files changed, 63 insertions(+), 27 deletions(-) rename core/src/test/{java => resources}/org/elasticsearch/action/search/simple-msearch1.json (100%) rename core/src/test/{java => resources}/org/elasticsearch/action/search/simple-msearch2.json (100%) rename core/src/test/{java => resources}/org/elasticsearch/action/search/simple-msearch3.json (100%) rename core/src/test/{java => resources}/org/elasticsearch/action/search/simple-msearch4.json (100%) create mode 100644 core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json diff --git a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index df2e2424e71..d754d969428 100644 --- a/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -69,15 +69,15 @@ public class MultiSearchRequest extends ActionRequest implem } public MultiSearchRequest add(byte[] data, int from, int length, - @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception { - return add(new BytesArray(data, from, length), indices, types, searchType, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true); + boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType) throws Exception { + return add(new BytesArray(data, from, length), isTemplateRequest, indices, types, searchType, null, IndicesOptions.strictExpandOpenAndForbidClosed(), true); } - public MultiSearchRequest add(BytesReference data, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception { - return add(data, indices, types, searchType, null, indicesOptions, true); + public MultiSearchRequest add(BytesReference data, boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, IndicesOptions indicesOptions) throws Exception { + return add(data, isTemplateRequest, indices, types, searchType, null, indicesOptions, true); } - public MultiSearchRequest add(BytesReference data, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception { + public MultiSearchRequest add(BytesReference data, boolean isTemplateRequest, @Nullable String[] indices, @Nullable String[] types, @Nullable String searchType, @Nullable String routing, IndicesOptions indicesOptions, boolean allowExplicitIndex) throws Exception { XContent xContent = XContentFactory.xContent(data); int from = 0; int length = data.length(); @@ -146,8 +146,11 @@ public class MultiSearchRequest extends ActionRequest implem if (nextMarker == -1) { break; } - - searchRequest.source(data.slice(from, nextMarker - from)); + if (isTemplateRequest) { + searchRequest.templateSource(data.slice(from, nextMarker - from)); + } else { + searchRequest.source(data.slice(from, nextMarker - from)); + } // move pointers from = nextMarker + 1; @@ -157,15 +160,6 @@ public class MultiSearchRequest extends ActionRequest implem return this; } - private String[] parseArray(XContentParser parser) throws IOException { - final List list = new ArrayList<>(); - assert parser.currentToken() == XContentParser.Token.START_ARRAY; - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - list.add(parser.text()); - } - return list.toArray(new String[list.size()]); - } - private int findNextMarker(byte marker, int from, BytesReference data, int length) { for (int i = from; i < length; i++) { if (data.get(i) == marker) { diff --git a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index 6dfe605d96b..af1f2f464a7 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -50,6 +50,13 @@ public class RestMultiSearchAction extends BaseRestHandler { controller.registerHandler(GET, "/{index}/{type}/_msearch", this); controller.registerHandler(POST, "/{index}/{type}/_msearch", this); + controller.registerHandler(GET, "/_msearch/template", this); + controller.registerHandler(POST, "/_msearch/template", this); + controller.registerHandler(GET, "/{index}/_msearch/template", this); + controller.registerHandler(POST, "/{index}/_msearch/template", this); + controller.registerHandler(GET, "/{index}/{type}/_msearch/template", this); + controller.registerHandler(POST, "/{index}/{type}/_msearch/template", this); + this.allowExplicitIndex = settings.getAsBoolean("rest.action.multi.allow_explicit_index", true); } @@ -59,9 +66,15 @@ public class RestMultiSearchAction extends BaseRestHandler { String[] indices = Strings.splitStringByCommaToArray(request.param("index")); String[] types = Strings.splitStringByCommaToArray(request.param("type")); + String path = request.path(); + boolean isTemplateRequest = isTemplateRequest(path); IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, multiSearchRequest.indicesOptions()); - multiSearchRequest.add(RestActions.getRestContent(request), indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex); + multiSearchRequest.add(RestActions.getRestContent(request), isTemplateRequest, indices, types, request.param("search_type"), request.param("routing"), indicesOptions, allowExplicitIndex); client.multiSearch(multiSearchRequest, new RestToXContentListener(channel)); } + + private boolean isTemplateRequest(String path) { + return (path != null && path.endsWith("/template")); + } } diff --git a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index d2533eaf2da..5fd9baea068 100644 --- a/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.search; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.test.StreamsUtils; import org.elasticsearch.common.xcontent.ToXContent; @@ -29,19 +28,16 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Test; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; -/** - */ public class MultiSearchRequestTests extends ESTestCase { @Test public void simpleAdd() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); assertThat(request.requests().size(), equalTo(8)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); @@ -67,7 +63,7 @@ public class MultiSearchRequestTests extends ESTestCase { @Test public void simpleAdd2() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch2.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); assertThat(request.requests().size(), equalTo(5)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).types().length, equalTo(0)); @@ -81,11 +77,11 @@ public class MultiSearchRequestTests extends ESTestCase { assertThat(request.requests().get(4).indices(), nullValue()); assertThat(request.requests().get(4).types().length, equalTo(0)); } - + @Test public void simpleAdd3() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch3.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); assertThat(request.requests().size(), equalTo(4)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); @@ -104,7 +100,28 @@ public class MultiSearchRequestTests extends ESTestCase { @Test public void simpleAdd4() throws Exception { byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json"); - MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, false, null, null, null); + assertThat(request.requests().size(), equalTo(3)); + assertThat(request.requests().get(0).indices()[0], equalTo("test0")); + assertThat(request.requests().get(0).indices()[1], equalTo("test1")); + assertThat(request.requests().get(0).requestCache(), equalTo(true)); + assertThat(request.requests().get(0).preference(), nullValue()); + assertThat(request.requests().get(1).indices()[0], equalTo("test2")); + assertThat(request.requests().get(1).indices()[1], equalTo("test3")); + assertThat(request.requests().get(1).types()[0], equalTo("type1")); + assertThat(request.requests().get(1).requestCache(), nullValue()); + assertThat(request.requests().get(1).preference(), equalTo("_local")); + assertThat(request.requests().get(2).indices()[0], equalTo("test4")); + assertThat(request.requests().get(2).indices()[1], equalTo("test1")); + assertThat(request.requests().get(2).types()[0], equalTo("type2")); + assertThat(request.requests().get(2).types()[1], equalTo("type1")); + assertThat(request.requests().get(2).routing(), equalTo("123")); + } + + @Test + public void simpleAdd5() throws Exception { + byte[] data = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch5.json"); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, true, null, null, null); assertThat(request.requests().size(), equalTo(3)); assertThat(request.requests().get(0).indices()[0], equalTo("test0")); assertThat(request.requests().get(0).indices()[1], equalTo("test1")); diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch1.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch1.json diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch2.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch2.json diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch3.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch3.json diff --git a/core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json similarity index 100% rename from core/src/test/java/org/elasticsearch/action/search/simple-msearch4.json rename to core/src/test/resources/org/elasticsearch/action/search/simple-msearch4.json diff --git a/core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json new file mode 100644 index 00000000000..5f08919481f --- /dev/null +++ b/core/src/test/resources/org/elasticsearch/action/search/simple-msearch5.json @@ -0,0 +1,6 @@ +{"index":["test0", "test1"], "request_cache": true} +{"template": {"query" : {"match_{{template}}" {}}}, "params": {"template": "all" } } } +{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"template": {"query" : {"match_{{template}}" {}}}, "params": {"template": "all" } } } +{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"template": {"query" : {"match_{{template}}" {}}}, "params": {"template": "all" } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml index 8b736b860bd..49e34fb16cd 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yaml @@ -44,4 +44,10 @@ - match: { responses.1.error.root_cause.0.index: test_2 } - match: { responses.2.hits.total: 1 } - + - do: + msearch: + body: + - index: test_1 + - query: + { "template": { "query": { "term": { "foo": { "value": "{{template}}" } } }, "params": { "template": "bar" } } } + - match: { responses.0.hits.total: 1 } From 5d9fb2e8a612d67e4cb9ef29fcd86befa4f941d3 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 4 Aug 2015 11:42:29 +0200 Subject: [PATCH 32/51] Upgrade to lucene-5.3.0. From a user perspective, the main benefit from this upgrade is that the new Lucene53Codec has disk-based norms. The elasticsearch directory has been fixed to load these norms through mmap instead of nio. Other changes include the removal of `max_thread_states`, the fact that PhraseQuery and BooleanQuery are now immutable, and that deleted docs are now applied on top of the Scorer API. This change introduces a couple of `AwaitsFix`s but I don't think it should hold us from merging. --- .../apache/lucene/queries/MinDocQuery.java | 9 +- .../classic/MapperQueryParser.java | 33 +-- .../CustomSeparatorBreakIterator.java | 153 ------------ .../analyzing/XAnalyzingSuggester.java | 64 ++--- .../suggest/analyzing/XFuzzySuggester.java | 36 ++- .../main/java/org/elasticsearch/Version.java | 2 +- .../action/termvectors/TermVectorsFields.java | 2 +- .../action/termvectors/TermVectorsFilter.java | 2 +- .../termvectors/TermVectorsResponse.java | 2 +- .../action/termvectors/TermVectorsWriter.java | 4 +- .../common/io/stream/StreamOutput.java | 85 ++----- .../elasticsearch/common/lucene/Lucene.java | 36 +-- .../common/lucene/all/AllTermQuery.java | 233 ++++++++++++------ .../lucene/index/FilterableTermsEnum.java | 34 ++- .../lucene/search/FilteredCollector.java | 2 +- .../lucene/search/MoreLikeThisQuery.java | 4 +- .../common/lucene/search/Queries.java | 15 +- .../common/lucene/search/XMoreLikeThis.java | 2 +- .../function/FiltersFunctionScoreQuery.java | 8 +- .../search/function/FunctionScoreQuery.java | 4 +- .../uid/PerThreadIDAndVersionLookup.java | 12 +- .../common/util/MultiDataPathUpgrader.java | 3 +- .../elasticsearch/env/NodeEnvironment.java | 8 +- .../index/codec/CodecService.java | 6 +- .../PerFieldMappingPostingFormatCodec.java | 5 +- .../BloomFilterPostingsFormat.java | 6 +- .../index/engine/EngineConfig.java | 21 -- .../index/engine/InternalEngine.java | 3 +- .../fielddata/ordinals/OrdinalsBuilder.java | 2 +- .../plain/PagedBytesIndexFieldData.java | 2 +- .../plain/ParentChildIndexFieldData.java | 2 +- .../plain/ParentChildIntersectTermsEnum.java | 8 +- .../index/query/BoolQueryParser.java | 2 +- .../index/query/MatchQueryParser.java | 2 +- .../index/query/QueryStringQueryParser.java | 2 +- .../index/query/SimpleQueryParser.java | 7 +- .../index/query/SimpleQueryStringParser.java | 2 +- .../index/query/TermsQueryParser.java | 3 +- .../index/search/MultiMatchQuery.java | 3 +- .../child/ChildrenConstantScoreQuery.java | 6 +- .../index/search/child/ChildrenQuery.java | 4 +- .../child/ParentConstantScoreQuery.java | 18 +- .../index/search/child/ParentIdsFilter.java | 36 ++- .../index/search/child/ParentQuery.java | 7 +- .../search/geo/GeoDistanceRangeQuery.java | 7 +- .../search/nested/IncludeNestedDocsQuery.java | 4 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/shard/VersionFieldUpgrader.java | 6 +- .../index/store/FsDirectoryService.java | 5 +- .../org/elasticsearch/index/store/Store.java | 4 +- .../cache/query/IndicesQueryCache.java | 5 +- .../children/ParentToChildrenAggregator.java | 10 +- .../bucket/filter/FilterAggregator.java | 2 +- .../bucket/filters/FiltersAggregator.java | 2 +- .../fetch/innerhits/InnerHitsContext.java | 4 +- .../MatchedQueriesFetchSubPhase.java | 2 +- .../search/highlight/CustomQueryScorer.java | 3 +- .../search/lookup/IndexFieldTerm.java | 34 ++- .../AnalyzingCompletionLookupProvider.java | 9 +- .../completion/CompletionTokenStream.java | 5 - .../AbstractTermVectorsTestCase.java | 4 +- .../GetTermVectorsCheckDocFreqIT.java | 6 +- .../action/termvectors/GetTermVectorsIT.java | 10 +- .../lucene/IndexCacheableQueryTests.java | 13 +- .../deps/lucene/SimpleLuceneTests.java | 4 +- .../elasticsearch/index/codec/CodecTests.java | 4 +- .../index/engine/InternalEngineTests.java | 5 - .../index/engine/ShadowEngineTests.java | 4 - .../ParentChildFilteredTermsEnumTests.java | 4 +- .../ChildrenConstantScoreQueryTests.java | 11 +- .../search/child/ChildrenQueryTests.java | 11 +- .../child/ParentConstantScoreQueryTests.java | 11 +- .../index/search/child/ParentQueryTests.java | 11 +- .../elasticsearch/index/store/StoreTest.java | 12 +- .../indices/warmer/SimpleIndicesWarmerIT.java | 105 +------- .../search/fetch/FetchSubPhasePluginIT.java | 2 +- .../search/highlight/HighlighterSearchIT.java | 8 +- .../suggest/CompletionTokenStreamTest.java | 16 +- .../AnalyzingCompletionLookupProviderV1.java | 4 +- .../CompletionPostingsFormatTest.java | 11 +- .../org/elasticsearch/test/ESTestCase.java | 5 - .../test/ESTokenStreamTestCase.java | 5 - .../test/IBMJ9HackThreadFilters.java | 53 ---- .../engine/ThrowingLeafReaderWrapper.java | 4 +- .../validate/SimpleValidateQueryIT.java | 4 +- .../lucene-analyzers-common-5.2.1.jar.sha1 | 1 - .../lucene-analyzers-common-5.3.0.jar.sha1 | 1 + .../lucene-backward-codecs-5.2.1.jar.sha1 | 1 - .../lucene-backward-codecs-5.3.0.jar.sha1 | 1 + .../licenses/lucene-core-5.2.1.jar.sha1 | 1 - .../licenses/lucene-core-5.3.0.jar.sha1 | 1 + .../lucene-expressions-5.2.1.jar.sha1 | 1 - .../lucene-expressions-5.3.0.jar.sha1 | 1 + .../licenses/lucene-grouping-5.2.1.jar.sha1 | 1 - .../licenses/lucene-grouping-5.3.0.jar.sha1 | 1 + .../lucene-highlighter-5.2.1.jar.sha1 | 1 - .../lucene-highlighter-5.3.0.jar.sha1 | 1 + .../licenses/lucene-join-5.2.1.jar.sha1 | 1 - .../licenses/lucene-join-5.3.0.jar.sha1 | 1 + .../licenses/lucene-memory-5.2.1.jar.sha1 | 1 - .../licenses/lucene-memory-5.3.0.jar.sha1 | 1 + .../licenses/lucene-misc-5.2.1.jar.sha1 | 1 - .../licenses/lucene-misc-5.3.0.jar.sha1 | 1 + .../licenses/lucene-queries-5.2.1.jar.sha1 | 1 - .../licenses/lucene-queries-5.3.0.jar.sha1 | 1 + .../lucene-queryparser-5.2.1.jar.sha1 | 1 - .../lucene-queryparser-5.3.0.jar.sha1 | 1 + .../licenses/lucene-sandbox-5.2.1.jar.sha1 | 1 - .../licenses/lucene-sandbox-5.3.0.jar.sha1 | 1 + .../licenses/lucene-spatial-5.2.1.jar.sha1 | 1 - .../licenses/lucene-spatial-5.3.0.jar.sha1 | 1 + .../licenses/lucene-spatial3d-5.3.0.jar.sha1 | 1 + .../licenses/lucene-suggest-5.2.1.jar.sha1 | 1 - .../licenses/lucene-suggest-5.3.0.jar.sha1 | 1 + pom.xml | 4 +- 115 files changed, 548 insertions(+), 816 deletions(-) delete mode 100644 core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java delete mode 100644 core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java delete mode 100644 distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-core-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-core-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-expressions-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-expressions-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-grouping-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-grouping-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-join-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-join-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-memory-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-memory-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-misc-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-misc-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-queries-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-queries-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-spatial-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-spatial-5.3.0.jar.sha1 create mode 100644 distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 delete mode 100644 distribution/licenses/lucene-suggest-5.2.1.jar.sha1 create mode 100644 distribution/licenses/lucene-suggest-5.3.0.jar.sha1 diff --git a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java index 169c017804b..1e9ecf7ae6f 100644 --- a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java +++ b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; import java.io.IOException; @@ -60,7 +59,7 @@ public final class MinDocQuery extends Query { public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { return new ConstantScoreWeight(this) { @Override - public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { final int maxDoc = context.reader().maxDoc(); if (context.docBase + maxDoc <= minDoc) { return null; @@ -89,12 +88,6 @@ public final class MinDocQuery extends Query { } else { doc = target; } - while (doc < maxDoc) { - if (acceptDocs == null || acceptDocs.get(doc)) { - break; - } - doc += 1; - } if (doc >= maxDoc) { doc = NO_MORE_DOCS; } diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index c8d3d31a305..5304c3d2e93 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -279,7 +279,7 @@ public class MapperQueryParser extends QueryParser { if (q != null) { added = true; applyBoost(mField, q); - applySlop(q, slop); + q = applySlop(q, slop); disMaxQuery.add(q); } } @@ -293,7 +293,7 @@ public class MapperQueryParser extends QueryParser { Query q = super.getFieldQuery(mField, queryText, slop); if (q != null) { applyBoost(mField, q); - applySlop(q, slop); + q = applySlop(q, slop); clauses.add(new BooleanClause(q, BooleanClause.Occur.SHOULD)); } } @@ -718,15 +718,6 @@ public class MapperQueryParser extends QueryParser { return super.getWildcardQuery(field, aggStr.toString()); } - @Override - protected WildcardQuery newWildcardQuery(Term t) { - // Backport: https://issues.apache.org/jira/browse/LUCENE-6677 - assert Version.LATEST == Version.LUCENE_5_2_1; - WildcardQuery query = new WildcardQuery(t, maxDeterminizedStates); - query.setRewriteMethod(multiTermRewriteMethod); - return query; - } - @Override protected Query getRegexpQuery(String field, String termStr) throws ParseException { if (lowercaseExpandedTerms) { @@ -815,14 +806,24 @@ public class MapperQueryParser extends QueryParser { } } - private void applySlop(Query q, int slop) { - if (q instanceof FilteredQuery) { - applySlop(((FilteredQuery)q).getQuery(), slop); - } + private Query applySlop(Query q, int slop) { if (q instanceof PhraseQuery) { - ((PhraseQuery) q).setSlop(slop); + PhraseQuery pq = (PhraseQuery) q; + PhraseQuery.Builder builder = new PhraseQuery.Builder(); + builder.setSlop(slop); + final Term[] terms = pq.getTerms(); + final int[] positions = pq.getPositions(); + for (int i = 0; i < terms.length; ++i) { + builder.add(terms[i], positions[i]); + } + pq = builder.build(); + pq.setBoost(q.getBoost()); + return pq; } else if (q instanceof MultiPhraseQuery) { ((MultiPhraseQuery) q).setSlop(slop); + return q; + } else { + return q; } } diff --git a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java b/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java deleted file mode 100644 index efdddf5260e..00000000000 --- a/core/src/main/java/org/apache/lucene/search/postingshighlight/CustomSeparatorBreakIterator.java +++ /dev/null @@ -1,153 +0,0 @@ -/* -Licensed to Elasticsearch under one or more contributor -license agreements. See the NOTICE file distributed with -this work for additional information regarding copyright -ownership. Elasticsearch licenses this file to you under -the Apache License, Version 2.0 (the "License"); you may -not use this file except in compliance with the License. -You may obtain a copy of the License at - * - http://www.apache.org/licenses/LICENSE-2.0 - * -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. - */ - -package org.apache.lucene.search.postingshighlight; - -import java.text.BreakIterator; -import java.text.CharacterIterator; - -/** - * A {@link BreakIterator} that breaks the text whenever a certain separator, provided as a constructor argument, is found. - */ -public class CustomSeparatorBreakIterator extends BreakIterator { - - private final char separator; - private CharacterIterator text; - private int current; - - public CustomSeparatorBreakIterator(char separator) { - this.separator = separator; - } - - @Override - public int current() { - return current; - } - - @Override - public int first() { - text.setIndex(text.getBeginIndex()); - return current = text.getIndex(); - } - - @Override - public int last() { - text.setIndex(text.getEndIndex()); - return current = text.getIndex(); - } - - @Override - public int next() { - if (text.getIndex() == text.getEndIndex()) { - return DONE; - } else { - return advanceForward(); - } - } - - private int advanceForward() { - char c; - while( (c = text.next()) != CharacterIterator.DONE) { - if (c == separator) { - return current = text.getIndex() + 1; - } - } - assert text.getIndex() == text.getEndIndex(); - return current = text.getIndex(); - } - - @Override - public int following(int pos) { - if (pos < text.getBeginIndex() || pos > text.getEndIndex()) { - throw new IllegalArgumentException("offset out of bounds"); - } else if (pos == text.getEndIndex()) { - // this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something) - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909 - text.setIndex(text.getEndIndex()); - current = text.getIndex(); - return DONE; - } else { - text.setIndex(pos); - current = text.getIndex(); - return advanceForward(); - } - } - - @Override - public int previous() { - if (text.getIndex() == text.getBeginIndex()) { - return DONE; - } else { - return advanceBackward(); - } - } - - private int advanceBackward() { - char c; - while( (c = text.previous()) != CharacterIterator.DONE) { - if (c == separator) { - return current = text.getIndex() + 1; - } - } - assert text.getIndex() == text.getBeginIndex(); - return current = text.getIndex(); - } - - @Override - public int preceding(int pos) { - if (pos < text.getBeginIndex() || pos > text.getEndIndex()) { - throw new IllegalArgumentException("offset out of bounds"); - } else if (pos == text.getBeginIndex()) { - // this conflicts with the javadocs, but matches actual behavior (Oracle has a bug in something) - // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=9000909 - text.setIndex(text.getBeginIndex()); - current = text.getIndex(); - return DONE; - } else { - text.setIndex(pos); - current = text.getIndex(); - return advanceBackward(); - } - } - - @Override - public int next(int n) { - if (n < 0) { - for (int i = 0; i < -n; i++) { - previous(); - } - } else { - for (int i = 0; i < n; i++) { - next(); - } - } - return current(); - } - - @Override - public CharacterIterator getText() { - return text; - } - - @Override - public void setText(CharacterIterator newText) { - text = newText; - current = text.getBeginIndex(); - } -} diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java index ec26fffb228..98401cd2e14 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.suggest.Lookup; import org.apache.lucene.store.*; import org.apache.lucene.util.*; import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.LimitedFiniteStringsIterator; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.Transition; import org.apache.lucene.util.fst.*; @@ -465,16 +466,12 @@ public long ramBytesUsed() { byte buffer[] = new byte[8]; try { ByteArrayDataOutput output = new ByteArrayDataOutput(buffer); - BytesRef surfaceForm; - while ((surfaceForm = iterator.next()) != null) { - Set paths = toFiniteStrings(surfaceForm, ts2a); - - maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, paths.size()); - - for (IntsRef path : paths) { - - Util.toBytesRef(path, scratch); + for (BytesRef surfaceForm; (surfaceForm = iterator.next()) != null;) { + LimitedFiniteStringsIterator finiteStrings = + new LimitedFiniteStringsIterator(toAutomaton(surfaceForm, ts2a), maxGraphExpansions); + for (IntsRef string; (string = finiteStrings.next()) != null; count++) { + Util.toBytesRef(string, scratch); // length of the analyzed text (FST input) if (scratch.length() > Short.MAX_VALUE-2) { @@ -526,7 +523,7 @@ public long ramBytesUsed() { writer.write(buffer, 0, output.getPosition()); } - count++; + maxAnalyzedPathsForOneInput = Math.max(maxAnalyzedPathsForOneInput, finiteStrings.size()); } writer.close(); @@ -912,23 +909,17 @@ public long ramBytesUsed() { return prefixPaths; } - public final Set toFiniteStrings(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException { - // Analyze surface form: - TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString()); - return toFiniteStrings(ts2a, ts); - } - - public final Set toFiniteStrings(final TokenStreamToAutomaton ts2a, final TokenStream ts) throws IOException { - Automaton automaton = null; - try { - - // Create corresponding automaton: labels are bytes - // from each analyzed token, with byte 0 used as - // separator between tokens: - automaton = ts2a.toAutomaton(ts); - } finally { - IOUtils.closeWhileHandlingException(ts); + final Automaton toAutomaton(final BytesRef surfaceForm, final TokenStreamToAutomaton ts2a) throws IOException { + try (TokenStream ts = indexAnalyzer.tokenStream("", surfaceForm.utf8ToString())) { + return toAutomaton(ts, ts2a); } + } + + final Automaton toAutomaton(TokenStream ts, final TokenStreamToAutomaton ts2a) throws IOException { + // Create corresponding automaton: labels are bytes + // from each analyzed token, with byte 0 used as + // separator between tokens: + Automaton automaton = ts2a.toAutomaton(ts); automaton = replaceSep(automaton); automaton = convertAutomaton(automaton); @@ -940,11 +931,24 @@ public long ramBytesUsed() { // more than one path, eg if the analyzer created a // graph using SynFilter or WDF): - // TODO: we could walk & add simultaneously, so we - // don't have to alloc [possibly biggish] - // intermediate HashSet in RAM: + return automaton; + } - return Operations.getFiniteStrings(automaton, maxGraphExpansions); + // EDIT: Adrien, needed by lookup providers + // NOTE: these XForks are unmaintainable, we need to get rid of them... + public Set toFiniteStrings(TokenStream stream) throws IOException { + final TokenStreamToAutomaton ts2a = getTokenStreamToAutomaton(); + Automaton automaton; + try (TokenStream ts = stream) { + automaton = toAutomaton(ts, ts2a); + } + LimitedFiniteStringsIterator finiteStrings = + new LimitedFiniteStringsIterator(automaton, maxGraphExpansions); + Set set = new HashSet<>(); + for (IntsRef string = finiteStrings.next(); string != null; string = finiteStrings.next()) { + set.add(IntsRef.deepCopyOf(string)); + } + return Collections.unmodifiableSet(set); } final Automaton toLookupAutomaton(final CharSequence key) throws IOException { diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java index 5170057a67c..20f95c646fc 100644 --- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java +++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XFuzzySuggester.java @@ -28,9 +28,10 @@ import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.PairOutputs; import java.io.IOException; -import java.util.Arrays; +import java.util.ArrayList; import java.util.List; -import java.util.Set; + +import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES; /** * Implements a fuzzy {@link AnalyzingSuggester}. The similarity measurement is @@ -221,42 +222,37 @@ public final class XFuzzySuggester extends XAnalyzingSuggester { } Automaton toLevenshteinAutomata(Automaton automaton) { - final Set ref = Operations.getFiniteStrings(automaton, -1); - Automaton subs[] = new Automaton[ref.size()]; - int upto = 0; - for (IntsRef path : ref) { - if (path.length <= nonFuzzyPrefix || path.length < minFuzzyLength) { - subs[upto] = Automata.makeString(path.ints, path.offset, path.length); - upto++; + List subs = new ArrayList<>(); + FiniteStringsIterator finiteStrings = new FiniteStringsIterator(automaton); + for (IntsRef string; (string = finiteStrings.next()) != null;) { + if (string.length <= nonFuzzyPrefix || string.length < minFuzzyLength) { + subs.add(Automata.makeString(string.ints, string.offset, string.length)); } else { - int ints[] = new int[path.length-nonFuzzyPrefix]; - System.arraycopy(path.ints, path.offset+nonFuzzyPrefix, ints, 0, ints.length); + int ints[] = new int[string.length-nonFuzzyPrefix]; + System.arraycopy(string.ints, string.offset+nonFuzzyPrefix, ints, 0, ints.length); // TODO: maybe add alphaMin to LevenshteinAutomata, // and pass 1 instead of 0? We probably don't want // to allow the trailing dedup bytes to be // edited... but then 0 byte is "in general" allowed // on input (but not in UTF8). LevenshteinAutomata lev = new LevenshteinAutomata(ints, unicodeAware ? Character.MAX_CODE_POINT : 255, transpositions); - subs[upto] = lev.toAutomaton(maxEdits, UnicodeUtil.newString(path.ints, path.offset, nonFuzzyPrefix)); - upto++; + subs.add(lev.toAutomaton(maxEdits, UnicodeUtil.newString(string.ints, string.offset, nonFuzzyPrefix))); } } - if (subs.length == 0) { + if (subs.isEmpty()) { // automaton is empty, there is no accepted paths through it return Automata.makeEmpty(); // matches nothing - } else if (subs.length == 1) { + } else if (subs.size() == 1) { // no synonyms or anything: just a single path through the tokenstream - return subs[0]; + return subs.get(0); } else { // multiple paths: this is really scary! is it slow? // maybe we should not do this and throw UOE? - Automaton a = Operations.union(Arrays.asList(subs)); + Automaton a = Operations.union(subs); // TODO: we could call toLevenshteinAutomata() before det? // this only happens if you have multiple paths anyway (e.g. synonyms) - - // This automaton should not blow up during determinize: - return Operations.determinize(a, Integer.MAX_VALUE); + return Operations.determinize(a, DEFAULT_MAX_DETERMINIZED_STATES); } } } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index d12fcd3274b..624aa02e416 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -258,7 +258,7 @@ public class Version { public static final int V_2_0_0_ID = 2000099; public static final Version V_2_0_0 = new Version(V_2_0_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_1_0_ID = 2010099; - public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final Version V_2_1_0 = new Version(V_2_1_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_0); public static final Version CURRENT = V_2_1_0; diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java index 4496b230775..127756e6b2f 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFields.java @@ -336,7 +336,7 @@ public final class TermVectorsFields extends Fields { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { final TermVectorPostingsEnum retVal = (reuse instanceof TermVectorPostingsEnum ? (TermVectorPostingsEnum) reuse : new TermVectorPostingsEnum()); return retVal.reset(hasPositions ? positions : null, hasOffsets ? startOffsets : null, hasOffsets ? endOffsets diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFilter.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFilter.java index 643973b9e53..373893f8a7d 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFilter.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsFilter.java @@ -286,7 +286,7 @@ public class TermVectorsFilter { } private int getTermFreq(TermsEnum termsEnum, PostingsEnum docsEnum) throws IOException { - docsEnum = termsEnum.postings(null, docsEnum); + docsEnum = termsEnum.postings(docsEnum); docsEnum.nextDoc(); return docsEnum.freq(); } diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index e321a2d46bb..2a4bc836ece 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -220,7 +220,7 @@ public class TermVectorsResponse extends ActionResponse implements ToXContent { builder.startObject(spare.toString()); buildTermStatistics(builder, termIter); // finally write the term vectors - PostingsEnum posEnum = termIter.postings(null, null, PostingsEnum.ALL); + PostingsEnum posEnum = termIter.postings(null, PostingsEnum.ALL); int termFreq = posEnum.freq(); builder.field(FieldStrings.TERM_FREQ, termFreq); initMemory(curTerms, termFreq); diff --git a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java index de52c5e4840..89a8ff088f6 100644 --- a/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java +++ b/core/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java @@ -151,7 +151,7 @@ final class TermVectorsWriter { } private PostingsEnum writeTermWithDocsOnly(TermsEnum iterator, PostingsEnum docsEnum) throws IOException { - docsEnum = iterator.postings(null, docsEnum); + docsEnum = iterator.postings(docsEnum); int nextDoc = docsEnum.nextDoc(); assert nextDoc != DocIdSetIterator.NO_MORE_DOCS; writeFreq(docsEnum.freq()); @@ -162,7 +162,7 @@ final class TermVectorsWriter { private PostingsEnum writeTermWithDocsAndPos(TermsEnum iterator, PostingsEnum docsAndPosEnum, boolean positions, boolean offsets, boolean payloads) throws IOException { - docsAndPosEnum = iterator.postings(null, docsAndPosEnum, PostingsEnum.ALL); + docsAndPosEnum = iterator.postings(docsAndPosEnum, PostingsEnum.ALL); // for each term (iterator next) in this field (field) // iterate over the docs (should only be one) int nextDoc = docsAndPosEnum.nextDoc(); diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 8ce9e24d6af..536af8bfdc6 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -30,7 +30,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.text.Text; import org.joda.time.ReadableInstant; @@ -43,8 +42,6 @@ import java.util.Date; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.regex.Matcher; -import java.util.regex.Pattern; /** * @@ -456,14 +453,6 @@ public abstract class StreamOutput extends OutputStream { } } - static { - assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1: "Remove these regex once we upgrade to Lucene 5.3 and get proper getters for these expections"; - } - private final static Pattern CORRUPT_INDEX_EXCEPTION_REGEX = Regex.compile("^(.+) \\(resource=(.+)\\)$", ""); - private final static Pattern INDEX_FORMAT_TOO_NEW_EXCEPTION_REGEX = Regex.compile("Format version is not supported \\(resource (.+)\\): (-?\\d+) \\(needs to be between (-?\\d+) and (-?\\d+)\\)", ""); - private final static Pattern INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_1 = Regex.compile("Format version is not supported \\(resource (.+)\\): (-?\\d+)(?: \\(needs to be between (-?\\d+) and (-?\\d+)\\)). This version of Lucene only supports indexes created with release 4.0 and later\\.", ""); - private final static Pattern INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_2 = Regex.compile("Format version is not supported \\(resource (.+)\\): (.+). This version of Lucene only supports indexes created with release 4.0 and later\\.", ""); - private static int parseIntSafe(String val, int defaultVal) { try { return Integer.parseInt(val); @@ -481,73 +470,29 @@ public abstract class StreamOutput extends OutputStream { boolean writeMessage = true; if (throwable instanceof CorruptIndexException) { writeVInt(1); - // Lucene 5.3 will have getters for all these - // we should switch to using getters instead of trying to parse the message: - // writeOptionalString(((CorruptIndexException)throwable).getDescription()); - // writeOptionalString(((CorruptIndexException)throwable).getResource()); - Matcher matcher = CORRUPT_INDEX_EXCEPTION_REGEX.matcher(throwable.getMessage()); - if (matcher.find()) { - writeOptionalString(matcher.group(1)); // message - writeOptionalString(matcher.group(2)); // resource - } else { - // didn't match - writeOptionalString("???"); // message - writeOptionalString("???"); // resource - } + writeOptionalString(((CorruptIndexException)throwable).getOriginalMessage()); + writeOptionalString(((CorruptIndexException)throwable).getResourceDescription()); writeMessage = false; } else if (throwable instanceof IndexFormatTooNewException) { writeVInt(2); - // Lucene 5.3 will have getters for all these - // we should switch to using getters instead of trying to parse the message: - // writeOptionalString(((CorruptIndexException)throwable).getResource()); - // writeInt(((IndexFormatTooNewException)throwable).getVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); - Matcher matcher = INDEX_FORMAT_TOO_NEW_EXCEPTION_REGEX.matcher(throwable.getMessage()); - if (matcher.find()) { - writeOptionalString(matcher.group(1)); // resource - writeInt(parseIntSafe(matcher.group(2), -1)); // version - writeInt(parseIntSafe(matcher.group(3), -1)); // min version - writeInt(parseIntSafe(matcher.group(4), -1)); // max version - } else { - // didn't match - writeOptionalString("???"); // resource - writeInt(-1); // version - writeInt(-1); // min version - writeInt(-1); // max version - } + writeOptionalString(((IndexFormatTooNewException)throwable).getResourceDescription()); + writeInt(((IndexFormatTooNewException)throwable).getVersion()); + writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); + writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); writeMessage = false; writeCause = false; } else if (throwable instanceof IndexFormatTooOldException) { writeVInt(3); - // Lucene 5.3 will have getters for all these - // we should switch to using getters instead of trying to parse the message: - // writeOptionalString(((CorruptIndexException)throwable).getResource()); - // writeInt(((IndexFormatTooNewException)throwable).getVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMinVersion()); - // writeInt(((IndexFormatTooNewException)throwable).getMaxVersion()); - Matcher matcher = INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_1.matcher(throwable.getMessage()); - if (matcher.find()) { - // version with numeric version in constructor - writeOptionalString(matcher.group(1)); // resource - writeBoolean(true); - writeInt(parseIntSafe(matcher.group(2), -1)); // version - writeInt(parseIntSafe(matcher.group(3), -1)); // min version - writeInt(parseIntSafe(matcher.group(4), -1)); // max version + IndexFormatTooOldException t = (IndexFormatTooOldException) throwable; + writeOptionalString(t.getResourceDescription()); + if (t.getVersion() == null) { + writeBoolean(false); + writeOptionalString(t.getReason()); } else { - matcher = INDEX_FORMAT_TOO_OLD_EXCEPTION_REGEX_2.matcher(throwable.getMessage()); - if (matcher.matches()) { - writeOptionalString(matcher.group(1)); // resource - writeBoolean(false); - writeOptionalString(matcher.group(2)); // version - } else { - // didn't match - writeOptionalString("???"); // resource - writeBoolean(true); - writeInt(-1); // version - writeInt(-1); // min version - writeInt(-1); // max version - } + writeBoolean(true); + writeInt(t.getVersion()); + writeInt(t.getMinVersion()); + writeInt(t.getMaxVersion()); } writeMessage = false; writeCause = false; diff --git a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 600c23899cb..f82ec128ed7 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -62,7 +62,7 @@ public class Lucene { public static final Version QUERYPARSER_VERSION = VERSION; public static final String LATEST_DOC_VALUES_FORMAT = "Lucene50"; public static final String LATEST_POSTINGS_FORMAT = "Lucene50"; - public static final String LATEST_CODEC = "Lucene50"; + public static final String LATEST_CODEC = "Lucene53"; static { Deprecated annotation = PostingsFormat.forName(LATEST_POSTINGS_FORMAT).getClass().getAnnotation(Deprecated.class); @@ -138,36 +138,6 @@ public class Lucene { return SegmentInfos.readCommit(directory, segmentsFileName); } - /** - * Tries to acquire the {@link IndexWriter#WRITE_LOCK_NAME} on the given directory. The returned lock must be closed once - * the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown. - * This method uses the {@link IndexWriterConfig#getDefaultWriteLockTimeout()} as the lock timeout. - */ - public static Lock acquireWriteLock(Directory directory) throws IOException { - return acquireLock(directory, IndexWriter.WRITE_LOCK_NAME, IndexWriterConfig.getDefaultWriteLockTimeout()); - } - - /** - * Tries to acquire a lock on the given directory. The returned lock must be closed once - * the lock is released. If the lock can't be obtained a {@link LockObtainFailedException} is thrown. - */ - @SuppressForbidden(reason = "this method uses trappy Directory#makeLock API") - public static Lock acquireLock(Directory directory, String lockName, long timeout) throws IOException { - final Lock writeLock = directory.makeLock(lockName); - boolean success = false; - try { - if (writeLock.obtain(timeout) == false) { - throw new LockObtainFailedException("failed to obtain lock: " + writeLock); - } - success = true; - } finally { - if (success == false) { - writeLock.close(); - } - } - return writeLock; - } - /** * This method removes all files from the given directory that are not referenced by the given segments file. * This method will open an IndexWriter and relies on index file deleter to remove all unreferenced files. Segment files @@ -179,7 +149,7 @@ public class Lucene { */ public static SegmentInfos pruneUnreferencedFiles(String segmentsFileName, Directory directory) throws IOException { final SegmentInfos si = readSegmentInfos(segmentsFileName, directory); - try (Lock writeLock = acquireWriteLock(directory)) { + try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { int foundSegmentFiles = 0; for (final String file : directory.listAll()) { /** @@ -218,7 +188,7 @@ public class Lucene { * this operation fails. */ public static void cleanLuceneIndex(Directory directory) throws IOException { - try (Lock writeLock = acquireWriteLock(directory)) { + try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (final String file : directory.listAll()) { if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { directory.deleteFile(file); // remove all segment_N files diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index a28d635a04e..9853659ca06 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -19,27 +19,32 @@ package org.elasticsearch.common.lucene.all; +import org.apache.lucene.analysis.payloads.PayloadHelper; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.TermState; import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.CollectionStatistics; +import org.apache.lucene.search.Explanation; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.payloads.AveragePayloadFunction; -import org.apache.lucene.search.payloads.PayloadTermQuery; +import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.search.Weight; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; -import org.apache.lucene.search.spans.SpanWeight; -import org.apache.lucene.search.spans.TermSpans; -import org.apache.lucene.util.Bits; +import org.apache.lucene.search.similarities.Similarity.SimWeight; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.ToStringUtils; import java.io.IOException; - -import static org.apache.lucene.analysis.payloads.PayloadHelper.decodeFloat; +import java.util.Set; /** * A term query that takes all payload boost values into account. @@ -49,78 +54,12 @@ import static org.apache.lucene.analysis.payloads.PayloadHelper.decodeFloat; * determine how the payload should be factored in, it just parses * the float and multiplies the average with the regular score. */ -public final class AllTermQuery extends PayloadTermQuery { +public final class AllTermQuery extends Query { + + private final Term term; public AllTermQuery(Term term) { - super(term, new AveragePayloadFunction()); - } - - @Override - public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { - // TODO: needsScores - // we should be able to just return a regular SpanTermWeight, at most here if needsScores == false? - return new AllTermWeight(this, searcher, needsScores); - } - - class AllTermWeight extends PayloadTermWeight { - - AllTermWeight(AllTermQuery query, IndexSearcher searcher, boolean needsScores) throws IOException { - super(query, searcher, needsScores); - } - - @Override - public AllTermSpanScorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - if (this.stats == null) { - return null; - } - // we have a custom weight class, we must check in case something is wrong with _all - Terms terms = context.reader().terms(query.getField()); - if (terms != null && terms.hasPositions() == false) { - throw new IllegalStateException("field \"" + term.field() + "\" was indexed without position data; cannot run AllTermQuery (term=" + term.text() + ")"); - } - TermSpans spans = (TermSpans) query.getSpans(context, acceptDocs, termContexts); - if (spans == null) { - return null; - } - SimScorer sloppySimScorer = similarity.simScorer(stats, context); - return new AllTermSpanScorer(spans, this, sloppySimScorer); - } - - class AllTermSpanScorer extends PayloadTermSpanScorer { - final PostingsEnum postings; - - AllTermSpanScorer(TermSpans spans, SpanWeight weight, Similarity.SimScorer docScorer) throws IOException { - super(spans, weight, docScorer); - postings = spans.getPostings(); - } - - @Override - protected void processPayload(Similarity similarity) throws IOException { - // note: similarity is ignored here (we just use decodeFloat always). - // this is the only difference between this class and PayloadTermQuery. - if (spans.isPayloadAvailable()) { - BytesRef payload = postings.getPayload(); - payloadScore += decodeFloat(payload.bytes, payload.offset); - payloadsSeen++; - } - } - } - } - - @Override - public int hashCode() { - return super.hashCode() + 1; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (!super.equals(obj)) - return false; - if (getClass() != obj.getClass()) - return false; - return true; + this.term = term; } @Override @@ -150,4 +89,144 @@ public final class AllTermQuery extends PayloadTermQuery { return this; } + @Override + public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + if (needsScores == false) { + return new TermQuery(term).createWeight(searcher, needsScores); + } + final TermContext termStates = TermContext.build(searcher.getTopReaderContext(), term); + final CollectionStatistics collectionStats = searcher.collectionStatistics(term.field()); + final TermStatistics termStats = searcher.termStatistics(term, termStates); + final Similarity similarity = searcher.getSimilarity(needsScores); + final SimWeight stats = similarity.computeWeight(getBoost(), collectionStats, termStats); + return new Weight(this) { + + @Override + public final float getValueForNormalization() throws IOException { + return stats.getValueForNormalization(); + } + + @Override + public final void normalize(float norm, float topLevelBoost) { + stats.normalize(norm, topLevelBoost); + } + + @Override + public void extractTerms(Set terms) { + terms.add(term); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + AllTermScorer scorer = scorer(context); + if (scorer != null) { + int newDoc = scorer.advance(doc); + if (newDoc == doc) { + float score = scorer.score(); + float freq = scorer.freq(); + SimScorer docScorer = similarity.simScorer(stats, context); + Explanation freqExplanation = Explanation.match(freq, "termFreq=" + freq); + Explanation termScoreExplanation = docScorer.explain(doc, freqExplanation); + Explanation payloadBoostExplanation = Explanation.match(scorer.payloadBoost(), "payloadBoost=" + scorer.payloadBoost()); + return Explanation.match( + score, + "weight(" + getQuery() + " in " + doc + ") [" + + similarity.getClass().getSimpleName() + "], product of:", + termScoreExplanation, payloadBoostExplanation); + } + } + return Explanation.noMatch("no matching term"); + } + + @Override + public AllTermScorer scorer(LeafReaderContext context) throws IOException { + final Terms terms = context.reader().terms(term.field()); + if (terms == null) { + return null; + } + final TermsEnum termsEnum = terms.iterator(); + if (termsEnum == null) { + return null; + } + final TermState state = termStates.get(context.ord); + termsEnum.seekExact(term.bytes(), state); + PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); + assert docs != null; + return new AllTermScorer(this, docs, similarity.simScorer(stats, context)); + } + + }; + } + + private static class AllTermScorer extends Scorer { + + final PostingsEnum postings; + final Similarity.SimScorer docScorer; + int doc = -1; + float payloadBoost; + + AllTermScorer(Weight weight, PostingsEnum postings, Similarity.SimScorer docScorer) { + super(weight); + this.postings = postings; + this.docScorer = docScorer; + } + + float payloadBoost() throws IOException { + if (doc != docID()) { + final int freq = postings.freq(); + payloadBoost = 0; + for (int i = 0; i < freq; ++i) { + postings.nextPosition(); + final BytesRef payload = postings.getPayload(); + float boost; + if (payload == null) { + boost = 1; + } else { + assert payload.length == 4; + boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset); + } + payloadBoost += boost; + } + payloadBoost /= freq; + doc = docID(); + } + return payloadBoost; + } + + @Override + public float score() throws IOException { + return payloadBoost() * docScorer.score(postings.docID(), postings.freq()); + } + + @Override + public int freq() throws IOException { + return postings.freq(); + } + + @Override + public int docID() { + return postings.docID(); + } + + @Override + public int nextDoc() throws IOException { + return postings.nextDoc(); + } + + @Override + public int advance(int target) throws IOException { + return postings.advance(target); + } + + @Override + public long cost() { + return postings.cost(); + } + } + + @Override + public String toString(String field) { + return new TermQuery(term).toString(field) + ToStringUtils.boost(getBoost()); + } + } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 8e8c47c4614..47ed0dbe3f4 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -25,11 +25,12 @@ import org.apache.lucene.index.PostingsEnum; import org.apache.lucene.index.Terms; import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.FilteredDocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.BitDocIdSet; +import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; @@ -96,25 +97,32 @@ public class FilterableTermsEnum extends TermsEnum { if (termsEnum == null) { continue; } - Bits bits = null; + BitSet bits = null; if (weight != null) { - // we want to force apply deleted docs - Scorer docs = weight.scorer(context, context.reader().getLiveDocs()); + DocIdSetIterator docs = weight.scorer(context); if (docs == null) { // fully filtered, none matching, no need to iterate on this continue; } + // we want to force apply deleted docs + final Bits liveDocs = context.reader().getLiveDocs(); + if (liveDocs != null) { + docs = new FilteredDocIdSetIterator(docs) { + @Override + protected boolean match(int doc) { + return liveDocs.get(doc); + } + }; + } + BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc()); builder.or(docs); bits = builder.build().bits(); // Count how many docs are in our filtered set // TODO make this lazy-loaded only for those that need it? - docs = weight.scorer(context, context.reader().getLiveDocs()); - while (docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { - numDocs++; - } + numDocs += bits.cardinality(); } enums.add(new Holder(termsEnum, bits)); } @@ -147,10 +155,13 @@ public class FilterableTermsEnum extends TermsEnum { totalTermFreq += leafTotalTermFreq; } } else { - final PostingsEnum docsEnum = anEnum.docsEnum = anEnum.termsEnum.postings(anEnum.bits, anEnum.docsEnum, docsEnumFlag); + final PostingsEnum docsEnum = anEnum.docsEnum = anEnum.termsEnum.postings(anEnum.docsEnum, docsEnumFlag); // 2 choices for performing same heavy loop - one attempts to calculate totalTermFreq and other does not if (docsEnumFlag == PostingsEnum.FREQS) { for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { + if (anEnum.bits != null && anEnum.bits.get(docId) == false) { + continue; + } docFreq++; // docsEnum.freq() returns 1 if doc indexed with IndexOptions.DOCS_ONLY so no way of knowing if value // is really 1 or unrecorded when filtering like this @@ -158,6 +169,9 @@ public class FilterableTermsEnum extends TermsEnum { } } else { for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { + if (anEnum.bits != null && anEnum.bits.get(docId) == false) { + continue; + } // docsEnum.freq() behaviour is undefined if docsEnumFlag==PostingsEnum.FLAG_NONE so don't bother with call docFreq++; } @@ -204,7 +218,7 @@ public class FilterableTermsEnum extends TermsEnum { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index b1c1b87fd3a..5bb92235044 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -44,7 +44,7 @@ public class FilteredCollector implements Collector { @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - final Scorer filterScorer = filter.scorer(context, null); + final Scorer filterScorer = filter.scorer(context); final LeafCollector in = collector.getLeafCollector(context); final Bits bits = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java index 1a5d2687565..6bbd97bfccb 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/MoreLikeThisQuery.java @@ -166,7 +166,7 @@ public class MoreLikeThisQuery extends Query { BooleanQuery bq = new BooleanQuery(); if (this.likeFields != null) { Query mltQuery = mlt.like(this.likeFields); - Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); + mltQuery = Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); bq.add(mltQuery, BooleanClause.Occur.SHOULD); } if (this.likeText != null) { @@ -176,7 +176,7 @@ public class MoreLikeThisQuery extends Query { } //LUCENE 4 UPGRADE this mapps the 3.6 behavior (only use the first field) Query mltQuery = mlt.like(moreLikeFields[0], readers); - Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); + mltQuery = Queries.applyMinimumShouldMatch((BooleanQuery) mltQuery, minimumShouldMatch); bq.add(mltQuery, BooleanClause.Occur.SHOULD); } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index 19b94fc6d72..9e49f79921d 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -107,9 +107,9 @@ public class Queries { return false; } - public static void applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) { + public static BooleanQuery applyMinimumShouldMatch(BooleanQuery query, @Nullable String minimumShouldMatch) { if (minimumShouldMatch == null) { - return; + return query; } int optionalClauses = 0; for (BooleanClause c : query.clauses()) { @@ -120,8 +120,17 @@ public class Queries { int msm = calculateMinShouldMatch(optionalClauses, minimumShouldMatch); if (0 < msm) { - query.setMinimumNumberShouldMatch(msm); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + builder.setDisableCoord(query.isCoordDisabled()); + for (BooleanClause clause : query) { + builder.add(clause); + } + builder.setMinimumNumberShouldMatch(msm); + BooleanQuery bq = builder.build(); + bq.setBoost(query.getBoost()); + query = bq; } + return query; } private static Pattern spaceAroundLessThanPattern = Pattern.compile("(\\s+<\\s*)|(\\s*<\\s+)"); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java b/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java index 4999f2a7cf4..4275647df0a 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/XMoreLikeThis.java @@ -852,7 +852,7 @@ public final class XMoreLikeThis { continue; } - final PostingsEnum docs = termsEnum.postings(null, null); + final PostingsEnum docs = termsEnum.postings(null); int freq = 0; while(docs != null && docs.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { freq += docs.freq(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index 91e93bec943..e95da1d8731 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -169,11 +169,11 @@ public class FiltersFunctionScoreQuery extends Query { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { // we ignore scoreDocsInOrder parameter, because we need to score in // order if documents are scored with a script. The // ShardLookup depends on in order scoring. - Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs); + Scorer subQueryScorer = subQueryWeight.scorer(context); if (subQueryScorer == null) { return null; } @@ -182,7 +182,7 @@ public class FiltersFunctionScoreQuery extends Query { for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); - Scorer filterScorer = filterWeights[i].scorer(context, null); // no need to apply accepted docs + Scorer filterScorer = filterWeights[i].scorer(context); docSets[i] = Lucene.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); } return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore, needsScores); @@ -208,7 +208,7 @@ public class FiltersFunctionScoreQuery extends Query { } Bits docSet = Lucene.asSequentialAccessBits(context.reader().maxDoc(), - filterWeights[i].scorer(context, null)); + filterWeights[i].scorer(context)); if (docSet.get(doc)) { Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); diff --git a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java index b3ad83e4d21..448eda8154c 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/search/function/FunctionScoreQuery.java @@ -128,8 +128,8 @@ public class FunctionScoreQuery extends Query { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - Scorer subQueryScorer = subQueryWeight.scorer(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + Scorer subQueryScorer = subQueryWeight.scorer(context); if (subQueryScorer == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java index bfde845f299..85bb5fe8904 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/uid/PerThreadIDAndVersionLookup.java @@ -105,9 +105,13 @@ final class PerThreadIDAndVersionLookup { // Use NDV to retrieve the version, in which case we only need PostingsEnum: // there may be more than one matching docID, in the case of nested docs, so we want the last one: - PostingsEnum docs = docsEnums[seg] = termsEnums[seg].postings(liveDocs[seg], docsEnums[seg], 0); + PostingsEnum docs = docsEnums[seg] = termsEnums[seg].postings(docsEnums[seg], 0); + final Bits liveDocs = this.liveDocs[seg]; int docID = DocIdSetIterator.NO_MORE_DOCS; for (int d = docs.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = docs.nextDoc()) { + if (liveDocs != null && liveDocs.get(d) == false) { + continue; + } docID = d; } @@ -125,9 +129,13 @@ final class PerThreadIDAndVersionLookup { } // ... but used to be stored as payloads; in this case we must use PostingsEnum - PostingsEnum dpe = posEnums[seg] = termsEnums[seg].postings(liveDocs[seg], posEnums[seg], PostingsEnum.PAYLOADS); + PostingsEnum dpe = posEnums[seg] = termsEnums[seg].postings(posEnums[seg], PostingsEnum.PAYLOADS); assert dpe != null; // terms has payloads + final Bits liveDocs = this.liveDocs[seg]; for (int d = dpe.nextDoc(); d != DocIdSetIterator.NO_MORE_DOCS; d = dpe.nextDoc()) { + if (liveDocs != null && liveDocs.get(d) == false) { + continue; + } dpe.nextPosition(); final BytesRef payload = dpe.getPayload(); if (payload != null && payload.length == 8) { diff --git a/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java b/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java index 82ed6f2bde8..b26039141c2 100644 --- a/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java +++ b/core/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; @@ -86,7 +85,7 @@ public class MultiDataPathUpgrader { ShardStateMetaData.FORMAT.write(loaded, loaded.version, targetPath.getShardStatePath()); Files.createDirectories(targetPath.resolveIndex()); try (SimpleFSDirectory directory = new SimpleFSDirectory(targetPath.resolveIndex())) { - try (final Lock lock = Lucene.acquireWriteLock(directory)) { + try (final Lock lock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { upgradeFiles(shard, targetPath, targetPath.resolveIndex(), ShardPath.INDEX_FOLDER_NAME, paths); } catch (LockObtainFailedException ex) { throw new IllegalStateException("Can't obtain lock on " + targetPath.resolveIndex(), ex); diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index e4cdeb94db7..c75e26fbf36 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -30,13 +30,11 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; @@ -154,7 +152,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { - locks[dirIndex] = Lucene.acquireLock(luceneDir, NODE_LOCK_FILENAME, 0); + locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); nodePaths[dirIndex] = new NodePath(dir, environment); localNodeId = possibleLockId; } catch (LockObtainFailedException ex) { @@ -324,7 +322,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { dirs[i] = new SimpleFSDirectory(p, FsDirectoryService.buildLockFactory(indexSettings)); // create a lock for the "write.lock" file try { - locks[i] = Lucene.acquireWriteLock(dirs[i]); + locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME); } catch (IOException ex) { throw new LockObtainFailedException("unable to acquire " + IndexWriter.WRITE_LOCK_NAME + " for " + p); @@ -730,7 +728,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { if (!closed.get() && locks != null) { for (Lock lock : locks) { try { - assert lock.isLocked() : "Lock: " + lock + "is not locked"; + lock.ensureValid(); } catch (IOException e) { logger.warn("lock assertion failed", e); return false; diff --git a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java index 2ba4aeb4c9d..aa29f79ba77 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/core/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -22,8 +22,8 @@ package org.elasticsearch.index.codec; import com.google.common.collect.ImmutableMap; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -65,8 +65,8 @@ public class CodecService extends AbstractIndexComponent { this.mapperService = mapperService; MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene50Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene50Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene53Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene53Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); diff --git a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index b8f1276d23d..b8e44bdadb6 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/core/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -21,11 +21,10 @@ package org.elasticsearch.index.codec; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; @@ -39,7 +38,7 @@ import org.elasticsearch.index.mapper.core.CompletionFieldMapper; * configured for a specific field the default postings format is used. */ // LUCENE UPGRADE: make sure to move to a new codec depending on the lucene version -public class PerFieldMappingPostingFormatCodec extends Lucene50Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene53Codec { private final ESLogger logger; private final MapperService mapperService; diff --git a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java b/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java index 71ff9e27dbf..9b29c9cc815 100644 --- a/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java +++ b/core/src/main/java/org/elasticsearch/index/codec/postingsformat/BloomFilterPostingsFormat.java @@ -323,8 +323,8 @@ public class BloomFilterPostingsFormat extends PostingsFormat { @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { - return getDelegate().postings(liveDocs, reuse, flags); + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { + return getDelegate().postings(reuse, flags); } } @@ -384,7 +384,7 @@ public class BloomFilterPostingsFormat extends PostingsFormat { bloomFilters.put(fieldInfo, bloomFilter); } // Make sure there's at least one doc for this term: - postings = termsEnum.postings(null, postings, 0); + postings = termsEnum.postings(postings, 0); if (postings.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { bloomFilter.put(term); } diff --git a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 91725899c17..6e6b0cfda69 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/core/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.engine; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; @@ -30,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.indexing.ShardIndexingService; @@ -56,7 +54,6 @@ public final class EngineConfig { private volatile ByteSizeValue indexingBufferSize; private volatile ByteSizeValue versionMapSize; private volatile String versionMapSizeSetting; - private final int indexConcurrency; private volatile boolean compoundOnFlush = true; private long gcDeletesInMillis = DEFAULT_GC_DELETES.millis(); private volatile boolean enableGcDeletes = true; @@ -79,13 +76,6 @@ public final class EngineConfig { private final QueryCachingPolicy queryCachingPolicy; private final IndexSearcherWrappingService wrappingService; - /** - * Index setting for index concurrency / number of threadstates in the indexwriter. - * The default is depending on the number of CPUs in the system. We use a 0.65 the number of CPUs or at least {@value org.apache.lucene.index.IndexWriterConfig#DEFAULT_MAX_THREAD_STATES} - * This setting is not realtime updateable - */ - public static final String INDEX_CONCURRENCY_SETTING = "index.index_concurrency"; - /** * Index setting for compound file on flush. This setting is realtime updateable. */ @@ -161,7 +151,6 @@ public final class EngineConfig { this.wrappingService = wrappingService; this.optimizeAutoGenerateId = indexSettings.getAsBoolean(EngineConfig.INDEX_OPTIMIZE_AUTOGENERATED_ID_SETTING, false); this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); - this.indexConcurrency = indexSettings.getAsInt(EngineConfig.INDEX_CONCURRENCY_SETTING, Math.max(IndexWriterConfig.DEFAULT_MAX_THREAD_STATES, (int) (EsExecutors.boundedNumberOfProcessors(indexSettings) * 0.65))); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); indexingBufferSize = indexSettings.getAsBytesSize(INDEX_BUFFER_SIZE_SETTING, DEFAULT_INDEX_BUFFER_SIZE); gcDeletesInMillis = indexSettings.getAsTime(INDEX_GC_DELETES_SETTING, EngineConfig.DEFAULT_GC_DELETES).millis(); @@ -235,16 +224,6 @@ public final class EngineConfig { return indexingBufferSize; } - /** - * Returns the index concurrency that directly translates into the number of thread states used in the engines - * {@code IndexWriter}. - * - * @see org.apache.lucene.index.IndexWriterConfig#getMaxThreadStates() - */ - public int getIndexConcurrency() { - return indexConcurrency; - } - /** * Returns true iff flushed segments should be written as compound file system. Defaults to true */ diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5bd733c48f1..b32a5e06321 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -136,7 +136,7 @@ public class InternalEngine extends Engine { this.indexingService = engineConfig.getIndexingService(); this.warmer = engineConfig.getWarmer(); mergeScheduler = scheduler = new EngineMergeScheduler(engineConfig.getShardId(), engineConfig.getIndexSettings(), engineConfig.getMergeSchedulerConfig()); - this.dirtyLocks = new Object[engineConfig.getIndexConcurrency() * 50]; // we multiply it to have enough... + this.dirtyLocks = new Object[Runtime.getRuntime().availableProcessors() * 10]; // we multiply it to have enough... for (int i = 0; i < dirtyLocks.length; i++) { dirtyLocks[i] = new Object(); } @@ -1038,7 +1038,6 @@ public class InternalEngine extends Engine { iwc.setMergePolicy(mergePolicy); iwc.setSimilarity(engineConfig.getSimilarity()); iwc.setRAMBufferSizeMB(engineConfig.getIndexingBufferSize().mbFrac()); - iwc.setMaxThreadStates(engineConfig.getIndexConcurrency()); iwc.setCodec(engineConfig.getCodec()); /* We set this timeout to a highish value to work around * the default poll interval in the Lucene lock that is diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java index 5fe9a4c388d..fa7eef6e6b2 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/ordinals/OrdinalsBuilder.java @@ -470,7 +470,7 @@ public final class OrdinalsBuilder implements Closeable { public BytesRef next() throws IOException { BytesRef ref; if ((ref = termsEnum.next()) != null) { - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.NONE); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); nextOrdinal(); int docId; while ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java index d5ef33ca82e..dce5e403e2a 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/PagedBytesIndexFieldData.java @@ -97,7 +97,7 @@ public class PagedBytesIndexFieldData extends AbstractIndexOrdinalsFieldData { final long termOrd = builder.nextOrdinal(); assert termOrd == termOrdToBytesOffset.size(); termOrdToBytesOffset.add(bytes.copyUsingLengthPrefix(term)); - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.NONE); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { builder.addDoc(docId); } diff --git a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index 1b4b2d5dd67..ae7d4986e47 100644 --- a/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/core/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -194,7 +194,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData 0; if (size == 1) { // Can't use 'reuse' since we don't know to which previous TermsEnum it belonged to. - return states.get(stateSlots.get(0)).termsEnum.postings(liveDocs, null, flags); + return states.get(stateSlots.get(0)).termsEnum.postings(null, flags); } else { List docsEnums = new ArrayList<>(stateSlots.size()); for (int i = 0; i < stateSlots.size(); i++) { - docsEnums.add(states.get(stateSlots.get(i)).termsEnum.postings(liveDocs, null, flags)); + docsEnums.add(states.get(stateSlots.get(i)).termsEnum.postings(null, flags)); } return new CompoundDocsEnum(docsEnums); } diff --git a/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java index 66ea85eb818..6476ea814f3 100644 --- a/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java @@ -166,7 +166,7 @@ public class BoolQueryParser implements QueryParser { booleanQuery.add(clause); } booleanQuery.setBoost(boost); - Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch); + booleanQuery = Queries.applyMinimumShouldMatch(booleanQuery, minimumShouldMatch); Query query = adjustPureNegative ? fixNegativeQueryIfNeeded(booleanQuery) : booleanQuery; if (queryName != null) { parseContext.addNamedQuery(queryName, query); diff --git a/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java index 62177abe331..2bf0d7cb605 100644 --- a/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java @@ -168,7 +168,7 @@ public class MatchQueryParser implements QueryParser { } if (query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); } else if (query instanceof ExtendedCommonTermsQuery) { ((ExtendedCommonTermsQuery)query).setLowFreqMinimumNumberShouldMatch(minimumShouldMatch); } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java index 929f7c16cd9..64afdd2a692 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java @@ -233,7 +233,7 @@ public class QueryStringQueryParser implements QueryParser { } query = fixNegativeQueryIfNeeded(query); if (query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch()); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch()); } if (queryName != null) { parseContext.addNamedQuery(queryName, query); diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java index fc916f55611..48f3ce64e50 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryParser.java @@ -171,29 +171,26 @@ public class SimpleQueryParser extends org.apache.lucene.queryparser.simple.Simp // rewind buffer buffer.reset(); - BytesRef bytes = termAtt == null ? null : termAtt.getBytesRef(); if (numTokens == 0) { return null; } else if (numTokens == 1) { try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.fillBytesRef(); } catch (IOException e) { // safe to ignore, because we know the number of tokens } - return new PrefixQuery(new Term(field, BytesRef.deepCopyOf(bytes))); + return new PrefixQuery(new Term(field, BytesRef.deepCopyOf(termAtt.getBytesRef()))); } else { BooleanQuery bq = new BooleanQuery(); for (int i = 0; i < numTokens; i++) { try { boolean hasNext = buffer.incrementToken(); assert hasNext == true; - termAtt.fillBytesRef(); } catch (IOException e) { // safe to ignore, because we know the number of tokens } - bq.add(new BooleanClause(new PrefixQuery(new Term(field, BytesRef.deepCopyOf(bytes))), BooleanClause.Occur.SHOULD)); + bq.add(new BooleanClause(new PrefixQuery(new Term(field, BytesRef.deepCopyOf(termAtt.getBytesRef()))), BooleanClause.Occur.SHOULD)); } return bq; } diff --git a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java index d80423d90fd..a3614bef72a 100644 --- a/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java @@ -215,7 +215,7 @@ public class SimpleQueryStringParser implements QueryParser { } if (minimumShouldMatch != null && query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); } if (query != null) { diff --git a/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java index 0d5ae097ab9..c18ef81d8c1 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java @@ -201,8 +201,7 @@ public class TermsQueryParser implements QueryParser { bq.add(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(term))), Occur.SHOULD); } } - Queries.applyMinimumShouldMatch(bq, minShouldMatch); - query = bq; + query = Queries.applyMinimumShouldMatch(bq, minShouldMatch); } query.setBoost(boost); diff --git a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 621e7d0afca..34bf9445131 100644 --- a/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -29,7 +29,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryParseContext; @@ -55,7 +54,7 @@ public class MultiMatchQuery extends MatchQuery { private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException { Query query = parse(type, fieldName, value); if (query instanceof BooleanQuery) { - Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); + query = Queries.applyMinimumShouldMatch((BooleanQuery) query, minimumShouldMatch); } if (boostValue != null && query != null) { query.setBoost(boostValue); diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java index 55484996232..4ec1007bbb1 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java @@ -198,13 +198,13 @@ public class ChildrenConstantScoreQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { if (remaining == 0) { return null; } if (shortCircuitFilter != null) { - DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, acceptDocs); + DocIdSet docIdSet = shortCircuitFilter.getDocIdSet(context, null); if (!Lucene.isEmpty(docIdSet)) { DocIdSetIterator iterator = docIdSet.iterator(); if (iterator != null) { @@ -214,7 +214,7 @@ public class ChildrenConstantScoreQuery extends IndexCacheableQuery { return null; } - DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, acceptDocs); + DocIdSet parentDocIdSet = this.parentFilter.getDocIdSet(context, null); if (!Lucene.isEmpty(parentDocIdSet)) { // We can't be sure of the fact that liveDocs have been applied, so we apply it here. The "remaining" // count down (short circuit) logic will then work as expected. diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java index c07ccbacbae..b869a4f7cb6 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java @@ -262,8 +262,8 @@ public final class ChildrenQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - DocIdSet parentsSet = parentFilter.getDocIdSet(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + DocIdSet parentsSet = parentFilter.getDocIdSet(context, null); if (Lucene.isEmpty(parentsSet) || remaining == 0) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java index bad39130e75..af764bd70e7 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java @@ -22,7 +22,17 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.Term; -import org.apache.lucene.search.*; +import org.apache.lucene.search.BitsFilteredDocIdSet; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilteredDocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; import org.elasticsearch.common.lucene.IndexCacheableQuery; @@ -162,14 +172,16 @@ public class ParentConstantScoreQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + DocIdSet childrenDocIdSet = childrenFilter.getDocIdSet(context, null); if (Lucene.isEmpty(childrenDocIdSet)) { return null; } SortedDocValues globalValues = globalIfd.load(context).getOrdinalsValues(parentType); if (globalValues != null) { + // we forcefully apply live docs here so that deleted children don't give matching parents + childrenDocIdSet = BitsFilteredDocIdSet.wrap(childrenDocIdSet, context.reader().getLiveDocs()); DocIdSetIterator innerIterator = childrenDocIdSet.iterator(); if (innerIterator != null) { ChildrenDocIdIterator childrenDocIdIterator = new ChildrenDocIdIterator( diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java index cc34da404bb..7743cfe0ab4 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java @@ -158,27 +158,25 @@ final class ParentIdsFilter extends Filter { parentIds.get(i, idSpare); BytesRef uid = Uid.createUidAsBytes(parentTypeBr, idSpare, uidSpare); if (termsEnum.seekExact(uid)) { + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); int docId; - docsEnum = termsEnum.postings(acceptDocs, docsEnum, PostingsEnum.NONE); - if (result == null) { - docId = docsEnum.nextDoc(); - if (docId != DocIdSetIterator.NO_MORE_DOCS) { - // very rough heuristic that tries to get an idea of the number of documents - // in the set based on the number of parent ids that we didn't find in this segment - final int expectedCardinality = size / (i + 1); - // similar heuristic to BitDocIdSet.Builder - if (expectedCardinality >= (context.reader().maxDoc() >>> 10)) { - result = new FixedBitSet(context.reader().maxDoc()); - } else { - result = new SparseFixedBitSet(context.reader().maxDoc()); - } - } else { - continue; + for (docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { + if (acceptDocs == null || acceptDocs.get(docId)) { + break; } - } else { - docId = docsEnum.nextDoc(); - if (docId == DocIdSetIterator.NO_MORE_DOCS) { - continue; + } + if (docId == DocIdSetIterator.NO_MORE_DOCS) { + continue; + } + if (result == null) { + // very rough heuristic that tries to get an idea of the number of documents + // in the set based on the number of parent ids that we didn't find in this segment + final int expectedCardinality = size / (i + 1); + // similar heuristic to BitDocIdSet.Builder + if (expectedCardinality >= (context.reader().maxDoc() >>> 10)) { + result = new FixedBitSet(context.reader().maxDoc()); + } else { + result = new SparseFixedBitSet(context.reader().maxDoc()); } } if (nonNestedDocs != null) { diff --git a/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java index d574066e08d..dff42416af1 100644 --- a/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.Term; +import org.apache.lucene.search.BitsFilteredDocIdSet; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.DocIdSet; @@ -243,8 +244,10 @@ public class ParentQuery extends IndexCacheableQuery { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + DocIdSet childrenDocSet = childrenFilter.getDocIdSet(context, null); + // we forcefully apply live docs here so that deleted children don't give matching parents + childrenDocSet = BitsFilteredDocIdSet.wrap(childrenDocSet, context.reader().getLiveDocs()); if (Lucene.isEmpty(childrenDocSet)) { return null; } diff --git a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java index e951f78cd64..c590ea08301 100644 --- a/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeQuery.java @@ -141,10 +141,10 @@ public class GeoDistanceRangeQuery extends Query { } return new ConstantScoreWeight(this) { @Override - public Scorer scorer(LeafReaderContext context, final Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { final DocIdSetIterator approximation; if (boundingBoxWeight != null) { - approximation = boundingBoxWeight.scorer(context, null); + approximation = boundingBoxWeight.scorer(context); } else { approximation = DocIdSetIterator.all(context.reader().maxDoc()); } @@ -157,9 +157,6 @@ public class GeoDistanceRangeQuery extends Query { @Override public boolean matches() throws IOException { final int doc = approximation.docID(); - if (acceptDocs != null && acceptDocs.get(doc) == false) { - return false; - } values.setDocument(doc); final int length = values.count(); for (int i = 0; i < length; i++) { diff --git a/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java b/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java index 8b2f7a7720a..553685d0b56 100644 --- a/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java +++ b/core/src/main/java/org/elasticsearch/index/search/nested/IncludeNestedDocsQuery.java @@ -107,8 +107,8 @@ public class IncludeNestedDocsQuery extends Query { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { - final Scorer parentScorer = parentWeight.scorer(context, acceptDocs); + public Scorer scorer(LeafReaderContext context) throws IOException { + final Scorer parentScorer = parentWeight.scorer(context); // no matches if (parentScorer == null) { diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 10217983f40..4d7815e7579 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -251,7 +251,7 @@ public class IndexShard extends AbstractIndexShardComponent { if (indexSettings.getAsBoolean(IndexCacheModule.QUERY_CACHE_EVERYTHING, false)) { cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE; } else { - assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_2_1; + assert Version.CURRENT.luceneVersion == org.apache.lucene.util.Version.LUCENE_5_3_0; // TODO: remove this hack in Lucene 5.4, use UsageTrackingQueryCachingPolicy directly // See https://issues.apache.org/jira/browse/LUCENE-6748 // cachingPolicy = new UsageTrackingQueryCachingPolicy(); diff --git a/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java b/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java index 04517b028d3..42bd5420ac3 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java +++ b/core/src/main/java/org/elasticsearch/index/shard/VersionFieldUpgrader.java @@ -133,9 +133,13 @@ class VersionFieldUpgrader extends FilterCodecReader { final GrowableWriter versions = new GrowableWriter(2, reader.maxDoc(), PackedInts.COMPACT); PostingsEnum dpe = null; for (BytesRef uid = uids.next(); uid != null; uid = uids.next()) { - dpe = uids.postings(reader.getLiveDocs(), dpe, PostingsEnum.PAYLOADS); + dpe = uids.postings(dpe, PostingsEnum.PAYLOADS); assert terms.hasPayloads() : "field has payloads"; + final Bits liveDocs = reader.getLiveDocs(); for (int doc = dpe.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = dpe.nextDoc()) { + if (liveDocs != null && liveDocs.get(doc) == false) { + continue; + } dpe.nextPosition(); final BytesRef payload = dpe.getPayload(); if (payload != null && payload.length == 8) { diff --git a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index ba301b4835f..dfd6cdf6b50 100644 --- a/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java +++ b/core/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.store; import com.google.common.collect.Sets; import org.apache.lucene.store.*; import org.apache.lucene.util.Constants; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; @@ -94,11 +93,11 @@ public class FsDirectoryService extends DirectoryService implements StoreRateLim } /* - * We are mmapping docvalues as well as term dictionaries, all other files are served through NIOFS + * We are mmapping norms, docvalues as well as term dictionaries, all other files are served through NIOFS * this provides good random access performance while not creating unnecessary mmaps for files like stored * fields etc. */ - private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("dvd", "tim")); + private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("nvd", "dvd", "tim")); protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/index/store/Store.java b/core/src/main/java/org/elasticsearch/index/store/Store.java index 847f3e3774f..39a0f5365ba 100644 --- a/core/src/main/java/org/elasticsearch/index/store/Store.java +++ b/core/src/main/java/org/elasticsearch/index/store/Store.java @@ -258,7 +258,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref metadataLock.writeLock().lock(); // we make sure that nobody fetches the metadata while we do this rename operation here to ensure we don't // get exceptions if files are still open. - try (Lock writeLock = Lucene.acquireWriteLock(directory())) { + try (Lock writeLock = directory().obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (Map.Entry entry : entries) { String tempFile = entry.getKey(); String origFile = entry.getValue(); @@ -593,7 +593,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref */ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException { metadataLock.writeLock().lock(); - try (Lock writeLock = Lucene.acquireWriteLock(directory)) { + try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { final StoreDirectory dir = directory; for (String existingFile : dir.listAll()) { if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) { diff --git a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java index 70c14b1295e..30cd6de1233 100644 --- a/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/core/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; -import org.apache.lucene.util.Bits; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.ShardCoreKeyMap; @@ -253,9 +252,9 @@ public class IndicesQueryCache extends AbstractComponent implements QueryCache, } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { shardKeyMap.add(context.reader()); - return in.scorer(context, acceptDocs); + return in.scorer(context); } } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index beefbc64508..ba776e33d35 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -109,9 +109,9 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; - Scorer parentScorer = parentFilter.scorer(ctx, null); + Scorer parentScorer = parentFilter.scorer(ctx); final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); - if (childFilter.scorer(ctx, null) != null) { + if (childFilter.scorer(ctx) != null) { replay.add(ctx); } return new LeafBucketCollector() { @@ -146,7 +146,7 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { this.replay = null; for (LeafReaderContext ctx : replay) { - DocIdSetIterator childDocsIter = childFilter.scorer(ctx, ctx.reader().getLiveDocs()); + DocIdSetIterator childDocsIter = childFilter.scorer(ctx); if (childDocsIter == null) { continue; } @@ -157,7 +157,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { // Set the scorer, since we now replay only the child docIds sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f)); + final Bits liveDocs = ctx.reader().getLiveDocs(); for (int docId = childDocsIter.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = childDocsIter.nextDoc()) { + if (liveDocs != null && liveDocs.get(docId) == false) { + continue; + } long globalOrdinal = globalOrdinals.getOrd(docId); if (globalOrdinal != -1) { long bucketOrd = parentOrdToBuckets.get(globalOrdinal); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index 0f904e4da03..b1308444894 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -58,7 +58,7 @@ public class FilterAggregator extends SingleBucketAggregator { public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { // no need to provide deleted docs to the filter - final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null)); + final Bits bits = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index 781d47f68eb..3cd67f835ec 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -91,7 +91,7 @@ public class FiltersAggregator extends BucketsAggregator { // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { - bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx, null)); + bits[i] = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx)); } return new LeafBucketCollectorBase(sub, null) { @Override diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 75b4b4f912d..460346c44c0 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -190,7 +190,7 @@ public final class InnerHitsContext { public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { return new ConstantScoreWeight(this) { @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { // Nested docs only reside in a single segment, so no need to evaluate all segments if (!context.reader().getCoreCacheKey().equals(leafReader.getCoreCacheKey())) { return null; @@ -209,7 +209,7 @@ public final class InnerHitsContext { return null; } - final DocIdSet children = childFilter.getDocIdSet(context, acceptDocs); + final DocIdSet children = childFilter.getDocIdSet(context, null); if (children == null) { return null; } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java index 75e62d63f9d..87965321af4 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java @@ -85,7 +85,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase { Query filter = entry.getValue(); final Weight weight = hitContext.topLevelSearcher().createNormalizedWeight(filter, false); - final Scorer scorer = weight.scorer(hitContext.readerContext(), null); + final Scorer scorer = weight.scorer(hitContext.readerContext()); if (scorer == null) { continue; } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java index 8f12dd0f9b4..a2d762461c0 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/CustomQueryScorer.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.highlight; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.queries.BlendedTermQuery; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.QueryScorer; @@ -87,7 +86,7 @@ public final class CustomQueryScorer extends QueryScorer { } else if (query instanceof FilteredQuery) { query = ((FilteredQuery) query).getQuery(); extract(query, terms); - } else if (query instanceof BlendedTermQuery) { + } else { extractWeightedTerms(terms, query); } } diff --git a/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java b/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java index 43c28d05304..09c78d250fb 100644 --- a/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java +++ b/core/src/main/java/org/elasticsearch/search/lookup/IndexFieldTerm.java @@ -19,12 +19,19 @@ package org.elasticsearch.search.lookup; -import org.apache.lucene.index.*; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.FilterLeafReader.FilterPostingsEnum; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.TermContext; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.TermStatistics; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lucene.search.EmptyScorer; import java.io.IOException; import java.util.Iterator; @@ -144,7 +151,28 @@ public class IndexFieldTerm implements Iterable { if (terms != null) { TermsEnum termsEnum = terms.iterator(); if (termsEnum.seekExact(identifier.bytes())) { - newPostings = termsEnum.postings(reader.getLiveDocs(), postings, luceneFlags); + newPostings = termsEnum.postings(postings, luceneFlags); + final Bits liveDocs = reader.getLiveDocs(); + if (liveDocs != null) { + newPostings = new FilterPostingsEnum(newPostings) { + private int doNext(int d) throws IOException { + for (; d != NO_MORE_DOCS; d = super.nextDoc()) { + if (liveDocs.get(d)) { + return d; + } + } + return NO_MORE_DOCS; + } + @Override + public int nextDoc() throws IOException { + return doNext(super.nextDoc()); + } + @Override + public int advance(int target) throws IOException { + return doNext(super.advance(target)); + } + }; + } } } } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java index 4ee79025adf..c5b1b5931e9 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProvider.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.suggest.completion; import com.carrotsearch.hppc.ObjectLongHashMap; import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.TokenStreamToAutomaton; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.index.PostingsEnum; @@ -40,6 +41,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IntsRef; import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.LimitedFiniteStringsIterator; import org.apache.lucene.util.fst.ByteSequenceOutputs; import org.apache.lucene.util.fst.FST; import org.apache.lucene.util.fst.PairOutputs; @@ -56,6 +58,7 @@ import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.TreeMap; @@ -156,7 +159,7 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider if (term == null) { break; } - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS); builder.startTerm(term); int docFreq = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -397,6 +400,8 @@ public class AnalyzingCompletionLookupProvider extends CompletionLookupProvider @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream); + return prototype.toFiniteStrings(stream); } + + } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java index 103fd0dcf0a..ebcf0456f87 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionTokenStream.java @@ -135,11 +135,6 @@ public final class CompletionTokenStream extends TokenStream { private final BytesRefBuilder bytes = new BytesRefBuilder(); private CharsRefBuilder charsRef; - @Override - public void fillBytesRef() { - // does nothing - we change in place - } - @Override public BytesRefBuilder builder() { return bytes; diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java index 62f4dc77060..ba6e6b6532d 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -346,8 +346,8 @@ public abstract class AbstractTermVectorsTestCase extends ESIntegTestCase { assertNotNull(luceneTermEnum.next()); assertThat(esTermEnum.totalTermFreq(), equalTo(luceneTermEnum.totalTermFreq())); - PostingsEnum esDocsPosEnum = esTermEnum.postings(null, null, PostingsEnum.POSITIONS); - PostingsEnum luceneDocsPosEnum = luceneTermEnum.postings(null, null, PostingsEnum.POSITIONS); + PostingsEnum esDocsPosEnum = esTermEnum.postings(null, PostingsEnum.POSITIONS); + PostingsEnum luceneDocsPosEnum = luceneTermEnum.postings(null, PostingsEnum.POSITIONS); if (luceneDocsPosEnum == null) { // test we expect that... assertFalse(field.storedOffset); diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java index 0e1c978dda8..1d0c317f5ad 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqIT.java @@ -119,7 +119,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase { assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq())); } - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(numDocs)); @@ -176,7 +176,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase { assertThat("expected ttf of " + string, -1, equalTo((int) iterator.totalTermFreq())); - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(-1)); @@ -236,7 +236,7 @@ public class GetTermVectorsCheckDocFreqIT extends ESIntegTestCase { assertThat("expected ttf of " + string, numDocs, equalTo((int) iterator.totalTermFreq())); } - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); assertThat(iterator.docFreq(), equalTo(numDocs)); diff --git a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 47031b828b3..6f046974633 100644 --- a/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/core/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -335,7 +335,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { assertThat(infoString, next, notNullValue()); // do not test ttf or doc frequency, because here we have // many shards and do not know how documents are distributed - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); // docs and pos only returns something if positions or // payloads or offsets are stored / requestd Otherwise use // DocsEnum? @@ -464,7 +464,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { TermsEnum iterator = terms.iterator(); while (iterator.next() != null) { String term = iterator.term().utf8ToString(); - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); List curPayloads = payloads.get(term); assertThat(term, curPayloads, notNullValue()); @@ -658,7 +658,7 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { assertThat(next, notNullValue()); // do not test ttf or doc frequency, because here we have many // shards and do not know how documents are distributed - PostingsEnum docsAndPositions = iterator.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions = iterator.postings(null, PostingsEnum.ALL); assertThat(docsAndPositions.nextDoc(), equalTo(0)); assertThat(freq[j], equalTo(docsAndPositions.freq())); int[] termPos = pos[j]; @@ -753,8 +753,8 @@ public class GetTermVectorsIT extends AbstractTermVectorsTestCase { assertThat("term: " + string0, iter0.totalTermFreq(), equalTo(iter1.totalTermFreq())); // compare freq and docs - PostingsEnum docsAndPositions0 = iter0.postings(null, null, PostingsEnum.ALL); - PostingsEnum docsAndPositions1 = iter1.postings(null, null, PostingsEnum.ALL); + PostingsEnum docsAndPositions0 = iter0.postings(null, PostingsEnum.ALL); + PostingsEnum docsAndPositions1 = iter1.postings(null, PostingsEnum.ALL); assertThat("term: " + string0, docsAndPositions0.nextDoc(), equalTo(docsAndPositions1.nextDoc())); assertThat("term: " + string0, docsAndPositions0.freq(), equalTo(docsAndPositions1.freq())); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java index 70a2a2c6018..5511796a2ed 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.Bits; import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; @@ -73,7 +72,7 @@ public class IndexCacheableQueryTests extends ESTestCase { } @Override - public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + public Scorer scorer(LeafReaderContext context) throws IOException { return null; } @@ -104,10 +103,7 @@ public class IndexCacheableQueryTests extends ESTestCase { } IndexReader reader = writer.getReader(); - // IndexReader wrapping is disabled because of LUCENE-6500. - // Add it back when we are on 5.3 - assert Version.LATEST == Version.LUCENE_5_2_1; - IndexSearcher searcher = newSearcher(reader, false); + IndexSearcher searcher = newSearcher(reader); reader = searcher.getIndexReader(); // reader might be wrapped searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(policy); @@ -123,10 +119,7 @@ public class IndexCacheableQueryTests extends ESTestCase { writer.addDocument(new Document()); IndexReader reader2 = writer.getReader(); - // IndexReader wrapping is disabled because of LUCENE-6500. - // Add it back when we are on 5.3 - assert Version.LATEST == Version.LUCENE_5_2_1; - searcher = newSearcher(reader2, false); + searcher = newSearcher(reader2); reader2 = searcher.getIndexReader(); // reader might be wrapped searcher.setQueryCache(cache); searcher.setQueryCachingPolicy(policy); diff --git a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java index d410c86890a..77fd17eec80 100644 --- a/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java +++ b/core/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java @@ -214,7 +214,7 @@ public class SimpleLuceneTests extends ESTestCase { TermsEnum termsEnum = terms.iterator(); termsEnum.next(); - PostingsEnum termDocs = termsEnum.postings(atomicReader.getLiveDocs(), null); + PostingsEnum termDocs = termsEnum.postings(null); assertThat(termDocs.nextDoc(), equalTo(0)); assertThat(termDocs.docID(), equalTo(0)); assertThat(termDocs.freq(), equalTo(1)); @@ -222,7 +222,7 @@ public class SimpleLuceneTests extends ESTestCase { terms = atomicReader.terms("int2"); termsEnum = terms.iterator(); termsEnum.next(); - termDocs = termsEnum.postings(atomicReader.getLiveDocs(), termDocs); + termDocs = termsEnum.postings(termDocs); assertThat(termDocs.nextDoc(), equalTo(0)); assertThat(termDocs.docID(), equalTo(0)); assertThat(termDocs.freq(), equalTo(2)); diff --git a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java index fc967189482..e45f1c469b0 100644 --- a/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java +++ b/core/src/test/java/org/elasticsearch/index/codec/CodecTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.codecs.lucene49.Lucene49Codec; import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -51,7 +52,8 @@ public class CodecTests extends ESSingleNodeTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene50Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene53Codec.class)); + assertThat(codecService.codec("Lucene50"), instanceOf(Lucene50Codec.class)); assertThat(codecService.codec("Lucene410"), instanceOf(Lucene410Codec.class)); assertThat(codecService.codec("Lucene49"), instanceOf(Lucene49Codec.class)); assertThat(codecService.codec("Lucene46"), instanceOf(Lucene46Codec.class)); diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index f3d45a8061c..deebc4511c0 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -116,7 +116,6 @@ public class InternalEngineTests extends ESTestCase { protected InternalEngine replicaEngine; private Settings defaultSettings; - private int indexConcurrency; private String codecName; private Path primaryTranslogDir; private Path replicaTranslogDir; @@ -127,7 +126,6 @@ public class InternalEngineTests extends ESTestCase { super.setUp(); CodecService codecService = new CodecService(shardId.index()); - indexConcurrency = randomIntBetween(1, 20); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { // some codecs are read only so we only take the ones that we have in the service and randomly @@ -140,7 +138,6 @@ public class InternalEngineTests extends ESTestCase { .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING, codecName) - .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); @@ -1507,8 +1504,6 @@ public class InternalEngineTests extends ESTestCase { assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(engine.config().getIndexConcurrency(), indexConcurrency); - assertEquals(currentIndexWriterConfig.getMaxThreadStates(), indexConcurrency); } @Test diff --git a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index 7b45a3b90cd..5d431c5d9e6 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -91,7 +91,6 @@ public class ShadowEngineTests extends ESTestCase { protected Engine replicaEngine; private Settings defaultSettings; - private int indexConcurrency; private String codecName; private Path dirPath; @@ -100,7 +99,6 @@ public class ShadowEngineTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); CodecService codecService = new CodecService(shardId.index()); - indexConcurrency = randomIntBetween(1, 20); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { // some codecs are read only so we only take the ones that we have in the service and randomly @@ -113,7 +111,6 @@ public class ShadowEngineTests extends ESTestCase { .put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, randomBoolean()) .put(EngineConfig.INDEX_GC_DELETES_SETTING, "1h") // make sure this doesn't kick in on us .put(EngineConfig.INDEX_CODEC_SETTING, codecName) - .put(EngineConfig.INDEX_CONCURRENCY_SETTING, indexConcurrency) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); // TODO randomize more settings threadPool = new ThreadPool(getClass().getName()); @@ -921,7 +918,6 @@ public class ShadowEngineTests extends ESTestCase { public void testSettings() { CodecService codecService = new CodecService(shardId.index()); assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(replicaEngine.config().getIndexConcurrency(), indexConcurrency); } @Test diff --git a/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java b/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java index 7a1aad21824..488aca2a34e 100644 --- a/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/index/fielddata/plain/ParentChildFilteredTermsEnumTests.java @@ -59,7 +59,7 @@ public class ParentChildFilteredTermsEnumTests extends ESTestCase { for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) { ++expected; assertThat(term.utf8ToString(), equalTo(format(expected))); - PostingsEnum docsEnum = termsEnum.postings(null, null); + PostingsEnum docsEnum = termsEnum.postings(null); assertThat(docsEnum, notNullValue()); int docId = docsEnum.nextDoc(); assertThat(docId, not(equalTo(-1))); @@ -98,7 +98,7 @@ public class ParentChildFilteredTermsEnumTests extends ESTestCase { for (BytesRef term = termsEnum.next(); term != null; term = termsEnum.next()) { ++expected; assertThat(term.utf8ToString(), equalTo(format(expected))); - PostingsEnum docsEnum = termsEnum.postings(null, null); + PostingsEnum docsEnum = termsEnum.postings(null); assertThat(docsEnum, notNullValue()); int numDocs = 0; for (int docId = docsEnum.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = docsEnum.nextDoc()) { diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java index 2577de5f1c9..95b3bca7694 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -258,8 +259,14 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTestCase { for (String id : parentIds) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", id)); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); } else if (seekStatus == TermsEnum.SeekStatus.END) { break; } diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java index 60a98ffbe6b..d8d09fe0b9c 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -231,8 +232,14 @@ public class ChildrenQueryTests extends AbstractChildTestCase { if (count >= minChildren && (maxChildren == 0 || count <= maxChildren)) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("parent", entry.getKey())); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); scores[docsEnum.docID()] = new FloatArrayList(entry.getValue()); } else if (seekStatus == TermsEnum.SeekStatus.END) { break; diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java index 55047d85bb5..71eb8214d1d 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -209,8 +210,14 @@ public class ParentConstantScoreQueryTests extends AbstractChildTestCase { for (String id : childIds) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", id)); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); } else if (seekStatus == TermsEnum.SeekStatus.END) { break; } diff --git a/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java index 3fd638473ce..57dd8af9efd 100644 --- a/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java +++ b/core/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java @@ -29,6 +29,7 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; @@ -207,8 +208,14 @@ public class ParentQueryTests extends AbstractChildTestCase { for (Map.Entry entry : childIdsAndScore.entrySet()) { TermsEnum.SeekStatus seekStatus = termsEnum.seekCeil(Uid.createUidAsBytes("child", entry.getKey())); if (seekStatus == TermsEnum.SeekStatus.FOUND) { - docsEnum = termsEnum.postings(slowLeafReader.getLiveDocs(), docsEnum, PostingsEnum.NONE); - expectedResult.set(docsEnum.nextDoc()); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.NONE); + final Bits liveDocs = slowLeafReader.getLiveDocs(); + for (int doc = docsEnum.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = docsEnum.nextDoc()) { + if (liveDocs == null || liveDocs.get(doc)) { + break; + } + } + expectedResult.set(docsEnum.docID()); FloatArrayList s = scores[docsEnum.docID()]; if (s == null) { scores[docsEnum.docID()] = s = new FloatArrayList(2); diff --git a/core/src/test/java/org/elasticsearch/index/store/StoreTest.java b/core/src/test/java/org/elasticsearch/index/store/StoreTest.java index d5f929e12ce..5757764d250 100644 --- a/core/src/test/java/org/elasticsearch/index/store/StoreTest.java +++ b/core/src/test/java/org/elasticsearch/index/store/StoreTest.java @@ -22,8 +22,8 @@ import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.codecs.FilterCodec; import org.apache.lucene.codecs.SegmentInfoFormat; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50SegmentInfoFormat; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.document.*; import org.apache.lucene.index.*; import org.apache.lucene.store.*; @@ -181,7 +181,7 @@ public class StoreTest extends ESTestCase { private static final class OldSIMockingCodec extends FilterCodec { protected OldSIMockingCodec() { - super(new Lucene50Codec().getName(), new Lucene50Codec()); + super(new Lucene53Codec().getName(), new Lucene53Codec()); } @Override @@ -239,6 +239,10 @@ public class StoreTest extends ESTestCase { } // IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + @AwaitsFix(bugUrl="Fails with seed E1394B038144F6E") + // The test currently fails because the segment infos and the index don't + // agree on the oldest version of a segment. We should fix this test by + // switching to a static bw index @Test public void testWriteLegacyChecksums() throws IOException { final ShardId shardId = new ShardId(new Index("index"), 1); @@ -754,7 +758,6 @@ public class StoreTest extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setMaxThreadStates(1); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random); Store store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId)); @@ -785,7 +788,6 @@ public class StoreTest extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setMaxThreadStates(1); final ShardId shardId = new ShardId(new Index("index"), 1); DirectoryService directoryService = new LuceneManagedDirectoryService(random); store = new Store(shardId, Settings.EMPTY, directoryService, new DummyShardLock(shardId)); @@ -826,7 +828,6 @@ public class StoreTest extends ESTestCase { IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(random.nextBoolean()); - iwc.setMaxThreadStates(1); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); IndexWriter writer = new IndexWriter(store.directory(), iwc); writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs)))); @@ -862,7 +863,6 @@ public class StoreTest extends ESTestCase { iwc = new IndexWriterConfig(new MockAnalyzer(random)).setCodec(TestUtil.getDefaultCodec()); iwc.setMergePolicy(NoMergePolicy.INSTANCE); iwc.setUseCompoundFile(true); // force CFS - easier to test here since we know it will add 3 files - iwc.setMaxThreadStates(1); iwc.setOpenMode(IndexWriterConfig.OpenMode.APPEND); writer = new IndexWriter(store.directory(), iwc); writer.addDocument(docs.get(0)); diff --git a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java index 7a98d3e0516..82e08588b58 100644 --- a/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerIT.java @@ -20,24 +20,15 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; -import org.elasticsearch.action.admin.indices.segments.IndexSegments; -import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; -import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; -import org.elasticsearch.action.admin.indices.segments.ShardSegments; + import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; import org.elasticsearch.action.admin.indices.warmer.put.PutWarmerResponse; -import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.index.engine.Segment; -import org.elasticsearch.index.mapper.MappedFieldType.Loading; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.warmer.IndexWarmerMissingException; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.test.ESIntegTestCase; @@ -45,10 +36,12 @@ import org.hamcrest.Matchers; import org.junit.Test; import java.util.List; -import java.util.Locale; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; public class SimpleIndicesWarmerIT extends ESIntegTestCase { @@ -264,94 +257,6 @@ public class SimpleIndicesWarmerIT extends ESIntegTestCase { return indicesStatsResponse.getIndex("test").getPrimaries().warmer.total(); } - private long getSegmentsMemoryUsage(String idx) { - IndicesSegmentResponse response = client().admin().indices().segments(Requests.indicesSegmentsRequest(idx)).actionGet(); - IndexSegments indicesSegments = response.getIndices().get(idx); - long total = 0; - for (IndexShardSegments indexShardSegments : indicesSegments) { - for (ShardSegments shardSegments : indexShardSegments) { - for (Segment segment : shardSegments) { - logger.debug("+=" + segment.memoryInBytes + " " + indexShardSegments.getShardId() + " " + shardSegments.getShardRouting().getIndex()); - total += segment.memoryInBytes; - } - } - } - return total; - } - - private enum LoadingMethod { - LAZY { - @Override - CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) { - return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)); - } - }, - EAGER { - @Override - CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) { - return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.EAGER_VALUE)); - } - - @Override - boolean isLazy() { - return false; - } - }, - EAGER_PER_FIELD { - @Override - CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) throws Exception { - return client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(SINGLE_SHARD_NO_REPLICA).put(SearchService.NORMS_LOADING_KEY, Loading.LAZY_VALUE)).addMapping(type, JsonXContent.contentBuilder() - .startObject() - .startObject(type) - .startObject("properties") - .startObject(fieldName) - .field("type", "string") - .startObject("norms") - .field("loading", Loading.EAGER_VALUE) - .endObject() - .endObject() - .endObject() - .endObject() - .endObject() - ); - } - - @Override - boolean isLazy() { - return false; - } - }; - private static Settings SINGLE_SHARD_NO_REPLICA = Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0).build(); - - abstract CreateIndexRequestBuilder createIndex(String indexName, String type, String fieldName) throws Exception; - - boolean isLazy() { - return true; - } - } - - // NOTE: we have to ensure we defeat compression strategies of the default codec... - public void testEagerLoading() throws Exception { - for (LoadingMethod method : LoadingMethod.values()) { - logger.debug("METHOD " + method); - String indexName = method.name().toLowerCase(Locale.ROOT); - assertAcked(method.createIndex(indexName, "t", "foo")); - // index a doc with 1 token, and one with 3 tokens so we dont get CONST compressed (otherwise norms take zero memory usage) - client().prepareIndex(indexName, "t", "1").setSource("foo", "bar").execute().actionGet(); - client().prepareIndex(indexName, "t", "2").setSource("foo", "bar baz foo").setRefresh(true).execute().actionGet(); - ensureGreen(indexName); - long memoryUsage0 = getSegmentsMemoryUsage(indexName); - // queries load norms if they were not loaded before - client().prepareSearch(indexName).setQuery(QueryBuilders.matchQuery("foo", "bar")).execute().actionGet(); - long memoryUsage1 = getSegmentsMemoryUsage(indexName); - if (method.isLazy()) { - assertThat(memoryUsage1, greaterThan(memoryUsage0)); - } else { - assertThat(memoryUsage1, equalTo(memoryUsage0)); - } - } - } - public void testQueryCacheOnWarmer() { createIndex("test"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java index ca98047a590..6635f1e5b52 100644 --- a/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java +++ b/core/src/test/java/org/elasticsearch/search/fetch/FetchSubPhasePluginIT.java @@ -171,7 +171,7 @@ public class FetchSubPhasePluginIT extends ESIntegTestCase { TermsEnum terms = termVector.getFields().terms(field).iterator(); BytesRef term; while ((term = terms.next()) != null) { - tv.put(term.utf8ToString(), terms.postings(null, null, PostingsEnum.ALL).freq()); + tv.put(term.utf8ToString(), terms.postings(null, PostingsEnum.ALL).freq()); } hitField.values().add(tv); } catch (IOException e) { diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index d9f69de6947..0c569b14d87 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -1406,6 +1406,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { } @Test + @AwaitsFix(bugUrl="Broken now that BoostingQuery does not extend BooleanQuery anymore") public void testBoostingQueryTermVector() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); @@ -1546,7 +1547,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmentSize(-1).numOfFragments(2).fragmenter("simple")).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE)) @@ -1554,7 +1555,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmentSize(-1).numOfFragments(2).fragmenter("span")).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); assertFailures(client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQueryBuilder.Type.PHRASE)) @@ -2054,7 +2055,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); } @Test @@ -2561,6 +2562,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { } @Test + @AwaitsFix(bugUrl="Broken now that BoostingQuery does not extend BooleanQuery anymore") public void testFastVectorHighlighterPhraseBoost() throws Exception { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); phraseBoostTestCase("fvh"); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java index 53e17966968..fde5037b850 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionTokenStreamTest.java @@ -54,7 +54,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenStream, payload, new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + return suggester.toFiniteStrings(stream); } })); assertTokenStreamContents(suggestTokenStream, new String[] {"mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10"}, new int[] { 1 }, null, null); @@ -73,7 +73,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(filter, payload, new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + return suggester.toFiniteStrings(stream); } })); assertTokenStreamContents(suggestTokenStream, new String[] {"mysynonym", "mykeyword"}, null, null, new String[] {"Surface keyword|friggin payload|10", "Surface keyword|friggin payload|10"}, new int[] { 2, 0 }, null, null); @@ -97,7 +97,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - Set finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + Set finiteStrings = suggester.toFiniteStrings(stream); return finiteStrings; } }); @@ -137,7 +137,7 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new CompletionTokenStream(filter, new BytesRef("Surface keyword|friggin payload|10"), new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - Set finiteStrings = suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + Set finiteStrings = suggester.toFiniteStrings(stream); return finiteStrings; } }); @@ -156,17 +156,15 @@ public class CompletionTokenStreamTest extends ESTokenStreamTestCase { TokenStream suggestTokenStream = new ByteTermAttrToCharTermAttrFilter(new CompletionTokenStream(tokenizer, payload, new CompletionTokenStream.ToFiniteStrings() { @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return suggester.toFiniteStrings(suggester.getTokenStreamToAutomaton(), stream); + return suggester.toFiniteStrings(stream); } })); TermToBytesRefAttribute termAtt = suggestTokenStream.getAttribute(TermToBytesRefAttribute.class); - BytesRef ref = termAtt.getBytesRef(); - assertNotNull(ref); + assertNotNull(termAtt.getBytesRef()); suggestTokenStream.reset(); while (suggestTokenStream.incrementToken()) { - termAtt.fillBytesRef(); - assertThat(ref.utf8ToString(), equalTo("mykeyword")); + assertThat(termAtt.getBytesRef().utf8ToString(), equalTo("mykeyword")); } suggestTokenStream.end(); suggestTokenStream.close(); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java index 23f92bd7ed3..eb78b6599d6 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/AnalyzingCompletionLookupProviderV1.java @@ -154,7 +154,7 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide if (term == null) { break; } - docsEnum = termsEnum.postings(null, docsEnum, PostingsEnum.PAYLOADS); + docsEnum = termsEnum.postings(docsEnum, PostingsEnum.PAYLOADS); builder.startTerm(term); int docFreq = 0; while (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { @@ -330,6 +330,6 @@ public class AnalyzingCompletionLookupProviderV1 extends CompletionLookupProvide @Override public Set toFiniteStrings(TokenStream stream) throws IOException { - return prototype.toFiniteStrings(prototype.getTokenStreamToAutomaton(), stream); + return prototype.toFiniteStrings(stream); } } \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java index 0bbd1cef8bf..35a222a75e1 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/completion/CompletionPostingsFormatTest.java @@ -23,7 +23,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.FieldsConsumer; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene50.Lucene50Codec; +import org.apache.lucene.codecs.lucene53.Lucene53Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.Fields; @@ -44,7 +44,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RAMDirectory; -import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LineFileDocs; import org.elasticsearch.Version; @@ -282,7 +281,7 @@ public class CompletionPostingsFormatTest extends ESTestCase { public Lookup buildAnalyzingLookup(final CompletionFieldMapper mapper, String[] terms, String[] surfaces, long[] weights) throws IOException { RAMDirectory dir = new RAMDirectory(); - Codec codec = new Lucene50Codec() { + Codec codec = new Lucene53Codec() { public PostingsFormat getPostingsFormatForField(String field) { final PostingsFormat in = super.getPostingsFormatForField(field); return mapper.fieldType().postingsFormat(in); @@ -401,13 +400,13 @@ public class CompletionPostingsFormatTest extends ESTestCase { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { final TermPosAndPayload data = current; return new PostingsEnum() { boolean done = false; @Override public int nextPosition() throws IOException { - return current.pos; + return data.pos; } @Override @@ -422,7 +421,7 @@ public class CompletionPostingsFormatTest extends ESTestCase { @Override public BytesRef getPayload() throws IOException { - return current.payload; + return data.payload; } @Override diff --git a/core/src/test/java/org/elasticsearch/test/ESTestCase.java b/core/src/test/java/org/elasticsearch/test/ESTestCase.java index e0c24a541c6..3624b0ae168 100644 --- a/core/src/test/java/org/elasticsearch/test/ESTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESTestCase.java @@ -21,7 +21,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.RandomizedContext; import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.Listeners; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope.Scope; @@ -92,10 +91,6 @@ import static org.hamcrest.Matchers.equalTo; LoggingListener.class, AssertionErrorThreadDumpPrinter.class }) -// remove this entire annotation on upgrade to 5.3! -@ThreadLeakFilters(defaultFilters = true, filters = { - IBMJ9HackThreadFilters.class, -}) @ThreadLeakScope(Scope.SUITE) @ThreadLeakLingering(linger = 5000) // 5 sec lingering @TimeoutSuite(millis = 20 * TimeUnits.MINUTE) diff --git a/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java index 685b158862f..29a1a3362d9 100644 --- a/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java +++ b/core/src/test/java/org/elasticsearch/test/ESTokenStreamTestCase.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.carrotsearch.randomizedtesting.annotations.Listeners; -import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.analysis.BaseTokenStreamTestCase; @@ -35,10 +34,6 @@ import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; @Listeners({ ReproduceInfoPrinter.class }) -//remove this entire annotation on upgrade to 5.3! -@ThreadLeakFilters(defaultFilters = true, filters = { - IBMJ9HackThreadFilters.class, -}) @TimeoutSuite(millis = TimeUnits.HOUR) @LuceneTestCase.SuppressReproduceLine @LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose") diff --git a/core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java b/core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java deleted file mode 100644 index 45c8277dc02..00000000000 --- a/core/src/test/java/org/elasticsearch/test/IBMJ9HackThreadFilters.java +++ /dev/null @@ -1,53 +0,0 @@ -package org.elasticsearch.test; - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -import com.carrotsearch.randomizedtesting.ThreadFilter; - -import org.apache.lucene.util.Constants; -import org.apache.lucene.util.Version; - -/** temporary workaround for https://issues.apache.org/jira/browse/LUCENE-6518 - * remove me on upgrade to 5.3! I am just an updated version of QuickPatchThreadFilters from lucene */ -public class IBMJ9HackThreadFilters implements ThreadFilter { - static final boolean isJ9; - - static { - assert Version.LATEST.equals(Version.LUCENE_5_2_1) : "please remove this entire class for 5.3"; - isJ9 = Constants.JAVA_VENDOR.startsWith("IBM"); - } - - @Override - public boolean reject(Thread t) { - if (isJ9) { - // LUCENE-6518 - if ("ClassCache Reaper".equals(t.getName())) { - return true; - } - - // LUCENE-4736 - StackTraceElement [] stack = t.getStackTrace(); - if (stack.length > 0 && stack[stack.length - 1].getClassName().equals("java.util.Timer$TimerImpl")) { - return true; - } - } - return false; - } -} diff --git a/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java index 365bf7fb652..422b9375a1e 100644 --- a/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java +++ b/core/src/test/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java @@ -146,13 +146,13 @@ public class ThrowingLeafReaderWrapper extends FilterLeafReader { } @Override - public PostingsEnum postings(Bits liveDocs, PostingsEnum reuse, int flags) throws IOException { + public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException { if ((flags & PostingsEnum.POSITIONS) != 0) { thrower.maybeThrow(Flags.DocsAndPositionsEnum); } else { thrower.maybeThrow(Flags.DocsEnum); } - return super.postings(liveDocs, reuse, flags); + return super.postings(reuse, flags); } } diff --git a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index be493cd242d..b55fd550aea 100644 --- a/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/core/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -233,13 +233,13 @@ public class SimpleValidateQueryIT extends ESIntegTestCase { // common terms queries assertExplanation(QueryBuilders.commonTermsQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("(field:huge field:brown) +field:pidgin"), true); + containsString("+field:pidgin (field:huge field:brown)"), true); assertExplanation(QueryBuilders.commonTermsQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); // match queries with cutoff frequency assertExplanation(QueryBuilders.matchQuery("field", "huge brown pidgin").cutoffFrequency(1), - containsString("(field:huge field:brown) +field:pidgin"), true); + containsString("+field:pidgin (field:huge field:brown)"), true); assertExplanation(QueryBuilders.matchQuery("field", "the brown").analyzer("stop"), containsString("field:brown"), true); diff --git a/distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 deleted file mode 100644 index 48f8e581476..00000000000 --- a/distribution/licenses/lucene-analyzers-common-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -33b7cc17d5a7c939af6fe3f67563f4709926d7f5 diff --git a/distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 b/distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 new file mode 100644 index 00000000000..4d79ce9d9e2 --- /dev/null +++ b/distribution/licenses/lucene-analyzers-common-5.3.0.jar.sha1 @@ -0,0 +1 @@ +1502beac94cf437baff848ffbbb8f76172befa6b diff --git a/distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 deleted file mode 100644 index f01d68718f2..00000000000 --- a/distribution/licenses/lucene-backward-codecs-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -603d1f06b133449272799d698e5118db65e523ba diff --git a/distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 b/distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 new file mode 100644 index 00000000000..9b802fb5e04 --- /dev/null +++ b/distribution/licenses/lucene-backward-codecs-5.3.0.jar.sha1 @@ -0,0 +1 @@ +f654901e55fe56bdbe4be202767296929c2f8d9e diff --git a/distribution/licenses/lucene-core-5.2.1.jar.sha1 b/distribution/licenses/lucene-core-5.2.1.jar.sha1 deleted file mode 100644 index cbebe2b858c..00000000000 --- a/distribution/licenses/lucene-core-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a175590aa8b04e079eb1a136fd159f9163482ba4 diff --git a/distribution/licenses/lucene-core-5.3.0.jar.sha1 b/distribution/licenses/lucene-core-5.3.0.jar.sha1 new file mode 100644 index 00000000000..9765d65189b --- /dev/null +++ b/distribution/licenses/lucene-core-5.3.0.jar.sha1 @@ -0,0 +1 @@ +9e12bb7c39e964a544e3a23b9c8ffa9599d38f10 diff --git a/distribution/licenses/lucene-expressions-5.2.1.jar.sha1 b/distribution/licenses/lucene-expressions-5.2.1.jar.sha1 deleted file mode 100644 index 1823826d962..00000000000 --- a/distribution/licenses/lucene-expressions-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b966460caa7a91be5969dc5c0053d8de4e861fd6 diff --git a/distribution/licenses/lucene-expressions-5.3.0.jar.sha1 b/distribution/licenses/lucene-expressions-5.3.0.jar.sha1 new file mode 100644 index 00000000000..232b4f3ff34 --- /dev/null +++ b/distribution/licenses/lucene-expressions-5.3.0.jar.sha1 @@ -0,0 +1 @@ +dc6f5e352f787d71a7896025c0cdd0eb665b2985 diff --git a/distribution/licenses/lucene-grouping-5.2.1.jar.sha1 b/distribution/licenses/lucene-grouping-5.2.1.jar.sha1 deleted file mode 100644 index 23cea6c545f..00000000000 --- a/distribution/licenses/lucene-grouping-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5682a9820d4f8ef99150b80dcc260919e68ebf39 diff --git a/distribution/licenses/lucene-grouping-5.3.0.jar.sha1 b/distribution/licenses/lucene-grouping-5.3.0.jar.sha1 new file mode 100644 index 00000000000..82b09e61a01 --- /dev/null +++ b/distribution/licenses/lucene-grouping-5.3.0.jar.sha1 @@ -0,0 +1 @@ +2d27582889b8676dfed6880a920148f3e32c9b42 diff --git a/distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 b/distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 deleted file mode 100644 index 67e9e8ee40a..00000000000 --- a/distribution/licenses/lucene-highlighter-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dd9bba952e362970a1084201fe4858e08f1ceb1f diff --git a/distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 b/distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 new file mode 100644 index 00000000000..406bc446a08 --- /dev/null +++ b/distribution/licenses/lucene-highlighter-5.3.0.jar.sha1 @@ -0,0 +1 @@ +3b9d67c0f93e107a9ad8c179505df56a85e3f027 diff --git a/distribution/licenses/lucene-join-5.2.1.jar.sha1 b/distribution/licenses/lucene-join-5.2.1.jar.sha1 deleted file mode 100644 index 00c2c22e08e..00000000000 --- a/distribution/licenses/lucene-join-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -168e9c9b826faf60489a25645e4322fb8d130574 diff --git a/distribution/licenses/lucene-join-5.3.0.jar.sha1 b/distribution/licenses/lucene-join-5.3.0.jar.sha1 new file mode 100644 index 00000000000..fbf636c2649 --- /dev/null +++ b/distribution/licenses/lucene-join-5.3.0.jar.sha1 @@ -0,0 +1 @@ +95ddffcd889af106136704ecb7dc7173b3e9cdb3 diff --git a/distribution/licenses/lucene-memory-5.2.1.jar.sha1 b/distribution/licenses/lucene-memory-5.2.1.jar.sha1 deleted file mode 100644 index 93c743ba1ad..00000000000 --- a/distribution/licenses/lucene-memory-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -601f5404c137600488f5b2f2ca635db4ac9fd0cb diff --git a/distribution/licenses/lucene-memory-5.3.0.jar.sha1 b/distribution/licenses/lucene-memory-5.3.0.jar.sha1 new file mode 100644 index 00000000000..0f39068c29b --- /dev/null +++ b/distribution/licenses/lucene-memory-5.3.0.jar.sha1 @@ -0,0 +1 @@ +44f50f425264b4b17e6781ba07bdc80b4d36bb65 diff --git a/distribution/licenses/lucene-misc-5.2.1.jar.sha1 b/distribution/licenses/lucene-misc-5.2.1.jar.sha1 deleted file mode 100644 index 227b55c2d23..00000000000 --- a/distribution/licenses/lucene-misc-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be0a4f0ac06f0a2fa3689b4bf6cd1fe6847f9969 diff --git a/distribution/licenses/lucene-misc-5.3.0.jar.sha1 b/distribution/licenses/lucene-misc-5.3.0.jar.sha1 new file mode 100644 index 00000000000..50949e57486 --- /dev/null +++ b/distribution/licenses/lucene-misc-5.3.0.jar.sha1 @@ -0,0 +1 @@ +d03ce6d1bb8ab3926b3acc717418c474a49ade69 diff --git a/distribution/licenses/lucene-queries-5.2.1.jar.sha1 b/distribution/licenses/lucene-queries-5.2.1.jar.sha1 deleted file mode 100644 index 026e3a9032e..00000000000 --- a/distribution/licenses/lucene-queries-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5bada7fe2251e097413a23eefc8c87d009dac24f diff --git a/distribution/licenses/lucene-queries-5.3.0.jar.sha1 b/distribution/licenses/lucene-queries-5.3.0.jar.sha1 new file mode 100644 index 00000000000..51486ac5c70 --- /dev/null +++ b/distribution/licenses/lucene-queries-5.3.0.jar.sha1 @@ -0,0 +1 @@ +a0e8ff0bb90fd762800afdd434fdf769b1f9ac28 diff --git a/distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 b/distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 deleted file mode 100644 index a2d8e2cc291..00000000000 --- a/distribution/licenses/lucene-queryparser-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -73be0a2d4ab3e6b574be1938bfb27f7f730f0ad9 diff --git a/distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 b/distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 new file mode 100644 index 00000000000..f542844d20b --- /dev/null +++ b/distribution/licenses/lucene-queryparser-5.3.0.jar.sha1 @@ -0,0 +1 @@ +2c5e08580316c90b56a52e3cb686e1cf69db3f9e diff --git a/distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 b/distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 deleted file mode 100644 index 3caf3072079..00000000000 --- a/distribution/licenses/lucene-sandbox-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d2355e5d8c95a4c3188ee2734a9f98829b2b10b diff --git a/distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 b/distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 new file mode 100644 index 00000000000..b1bf9194e10 --- /dev/null +++ b/distribution/licenses/lucene-sandbox-5.3.0.jar.sha1 @@ -0,0 +1 @@ +152da54a3b1ea6e3e8648d767616a51857b66a8e diff --git a/distribution/licenses/lucene-spatial-5.2.1.jar.sha1 b/distribution/licenses/lucene-spatial-5.2.1.jar.sha1 deleted file mode 100644 index 20f07e938cb..00000000000 --- a/distribution/licenses/lucene-spatial-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec498e52fdfc8ab751d9712b04c76e26e75e5014 diff --git a/distribution/licenses/lucene-spatial-5.3.0.jar.sha1 b/distribution/licenses/lucene-spatial-5.3.0.jar.sha1 new file mode 100644 index 00000000000..6499667fa8e --- /dev/null +++ b/distribution/licenses/lucene-spatial-5.3.0.jar.sha1 @@ -0,0 +1 @@ +6d57880a0950416035112f4fcc725854c011b081 diff --git a/distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 b/distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 new file mode 100644 index 00000000000..d1dd3219632 --- /dev/null +++ b/distribution/licenses/lucene-spatial3d-5.3.0.jar.sha1 @@ -0,0 +1 @@ +23cfd7c19ead7b6fc6b2921f9c490ad3d043770d diff --git a/distribution/licenses/lucene-suggest-5.2.1.jar.sha1 b/distribution/licenses/lucene-suggest-5.2.1.jar.sha1 deleted file mode 100644 index 12a585d32bc..00000000000 --- a/distribution/licenses/lucene-suggest-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0d62b25d52f9949b243c9cdb8a78830aa4944415 diff --git a/distribution/licenses/lucene-suggest-5.3.0.jar.sha1 b/distribution/licenses/lucene-suggest-5.3.0.jar.sha1 new file mode 100644 index 00000000000..dc59343223c --- /dev/null +++ b/distribution/licenses/lucene-suggest-5.3.0.jar.sha1 @@ -0,0 +1 @@ +a155fc16a20b11205f99603950025522b173edc9 diff --git a/pom.xml b/pom.xml index b4fa4af2da0..2c8ea831820 100644 --- a/pom.xml +++ b/pom.xml @@ -41,8 +41,8 @@ 1.7 - 5.2.1 - 5.2.1 + 5.3.0 + 5.3.0 2.1.16 2.5.3 1.6.2 From 05b48b904d55a237d69db3de28c7069cab901a7b Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Tue, 1 Sep 2015 12:15:28 +0200 Subject: [PATCH 33/51] set timeout for refresh and flush to default Since #13068 refresh and flush requests go to the primary first and are then replicated. One difference to before is though that if a shard is not available (INITIALIZING for example) we wait a little for an indexing request but for refresh we don't and just give up immediately. Before, refresh requests were just send to the shards regardless of what their state is. In tests we sometimes create an index, issue an indexing request, refresh and then get the document. But we do not wait until all nodes know that all primaries have ben assigned. Now potentially one node can be one cluster state behind and not know yet that the shards have ben started. If the refresh is executed through this node then the refresh request will silently fail on shards that are started already because from the nodes perspective they are still initializing. As a consequence, documents that expected to be available in the test are now not. Example test failures are here: http://build-us-00.elastic.co/job/elasticsearch-20-oracle-jdk7/395/ This commit changes the timeout to 1m (default) to make sure we don't miss shards when we refresh. This will trigger the same retry mechanism as for indexing requests. We still have to make a decision if this change of behavior is acceptable. see #13238 --- .../action/admin/indices/flush/TransportFlushAction.java | 2 +- .../action/admin/indices/refresh/TransportRefreshAction.java | 2 +- .../action/support/replication/BroadcastReplicationTests.java | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index d43cbb25ab5..2882b508a81 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -53,7 +53,7 @@ public class TransportFlushAction extends TransportBroadcastReplicationAction Date: Tue, 1 Sep 2015 09:44:13 +0200 Subject: [PATCH 34/51] rename actions back to admin/* and add suffix [s] instead --- .../action/admin/indices/flush/TransportShardFlushAction.java | 2 +- .../admin/indices/refresh/TransportShardRefreshAction.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 9b7c0643be2..239a487614f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -44,7 +44,7 @@ import org.elasticsearch.transport.TransportService; */ public class TransportShardFlushAction extends TransportReplicationAction { - public static final String NAME = "indices:data/write/flush"; + public static final String NAME = FlushAction.NAME + "[s]"; @Inject public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService, diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java index 2604d67b1b0..ac3911abfbf 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportShardRefreshAction.java @@ -45,7 +45,7 @@ import org.elasticsearch.transport.TransportService; */ public class TransportShardRefreshAction extends TransportReplicationAction { - public static final String NAME = "indices:data/write/refresh"; + public static final String NAME = RefreshAction.NAME + "[s]"; @Inject public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService, From 20921fcc3d21e99d258e97fbf6d31f8a7a88bdb5 Mon Sep 17 00:00:00 2001 From: Clinton Gormley Date: Tue, 1 Sep 2015 13:43:03 +0200 Subject: [PATCH 35/51] Document transport.ping_schedule Closes #13241 --- docs/reference/modules/transport.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index f836f66bcfe..1c92eb24e4c 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -43,6 +43,9 @@ time setting format). Defaults to `30s`. |`transport.tcp.compress` |Set to `true` to enable compression (LZF) between all nodes. Defaults to `false`. + +|`transport.ping_schedule` | Schedule a regular ping message to ensure that connections are kept alive. Defaults to `5s` in the transport client and `-1` (disabled) elsewhere. + |======================================================================= It also uses the common From 1c773e235af139c17671de294663c7f0dd3c970b Mon Sep 17 00:00:00 2001 From: Marshall Bockrath-Vandegrift Date: Tue, 1 Sep 2015 08:39:08 -0400 Subject: [PATCH 36/51] Estimate HyperLogLog bias via k-NN regression MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The implementation this commit replaces was almost k-NN regression with k=2, but had two bugs: (a) it depends on the empirical raw estimates being in strictly non-decreasing order for the binary search (which they are not); and (b) it weights the biases positively with increased distance from the corresponding raw estimate. “HyperLogLog in Practice” leaves the choice of exact algorithm here fairly vague, just noting: “We use k-nearest neighbor interpolation to get the bias for a given raw estimate (for k = 6).” The majority of other open source HyperLogLog++ implementations appear to use k-NN regression with uniform weights (and generally k = 6). Uniform weighting does decrease variance, but also introduces bias at the domain extrema. This problem, plus the use of the word “interpolation” in the original paper, suggests (inverse) distance-weighted k-NN, as implemented here. --- .../cardinality/HyperLogLogPlusPlus.java | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index b21bd5224c3..6eb5153e59d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -62,6 +62,7 @@ public final class HyperLogLogPlusPlus implements Releasable { private static final boolean HYPERLOGLOG = true; private static final float MAX_LOAD_FACTOR = 0.75f; private static final int P2 = 25; + private static final int BIAS_K = 6; /** * Compute the required precision so that count distinct entries @@ -374,23 +375,30 @@ public final class HyperLogLogPlusPlus implements Releasable { private double estimateBias(double e) { final double[] rawEstimateData = rawEstimateData(); final double[] biasData = biasData(); - int index = Arrays.binarySearch(rawEstimateData, e); - if (index >= 0) { - return biasData[index]; - } else { - index = -1 - index; - if (index == 0) { - return biasData[0]; - } else if (index >= biasData.length) { - return biasData[biasData.length - 1]; - } else { - double w1 = (e - rawEstimateData[index - 1]); - assert w1 >= 0; - double w2 = (rawEstimateData[index] - e); - assert w2 >= 0; - return (biasData[index - 1] * w1 + biasData[index] * w2) / (w1 + w2); + + final double[] weights = new double[BIAS_K]; + int index = biasData.length - BIAS_K; + for (int i = 0; i < rawEstimateData.length; ++i) { + final double w = 1.0 / Math.abs(rawEstimateData[i] - e); + final int j = i % weights.length; + if (Double.isInfinite(w)) { + return biasData[i]; + } else if (weights[j] >= w) { + index = i - BIAS_K; + break; } + weights[j] = w; } + + double weightSum = 0.0; + double biasSum = 0.0; + for (int i = 0, j = index; i < BIAS_K; ++i, ++j) { + final double w = weights[i]; + final double b = biasData[j]; + biasSum += w * b; + weightSum += w; + } + return biasSum / weightSum; } private double[] biasData() { From 0d3e3f81fc267f717038a007e4b7f08191f6a5d6 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Sep 2015 08:52:10 -0400 Subject: [PATCH 37/51] Lithuanian analysis --- .../index/analysis/Analysis.java | 2 + .../index/analysis/AnalysisModule.java | 1 + .../analysis/LithuanianAnalyzerProvider.java | 50 +++++++++++++++++++ .../analysis/StemmerTokenFilterFactory.java | 3 ++ .../indices/analysis/PreBuiltAnalyzers.java | 10 ++++ .../BasicAnalysisBackwardCompatibilityIT.java | 3 ++ .../analysis/analyzers/lang-analyzer.asciidoc | 50 ++++++++++++++++++- .../snowball-tokenfilter.asciidoc | 4 +- .../tokenfilters/stemmer-tokenfilter.asciidoc | 4 ++ 9 files changed, 123 insertions(+), 4 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java diff --git a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java index a58b0f20cf8..d97b22d18cb 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -46,6 +46,7 @@ import org.apache.lucene.analysis.hu.HungarianAnalyzer; import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.apache.lucene.analysis.it.ItalianAnalyzer; +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.apache.lucene.analysis.no.NorwegianAnalyzer; @@ -145,6 +146,7 @@ public class Analysis { .put("_irish_", IrishAnalyzer.getDefaultStopSet()) .put("_italian_", ItalianAnalyzer.getDefaultStopSet()) .put("_latvian_", LatvianAnalyzer.getDefaultStopSet()) + .put("_lithuanian_", LithuanianAnalyzer.getDefaultStopSet()) .put("_norwegian_", NorwegianAnalyzer.getDefaultStopSet()) .put("_persian_", PersianAnalyzer.getDefaultStopSet()) .put("_portuguese_", PortugueseAnalyzer.getDefaultStopSet()) diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java index 21667cf6128..393f1c96317 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java @@ -492,6 +492,7 @@ public class AnalysisModule extends AbstractModule { analyzersBindings.processAnalyzer("irish", IrishAnalyzerProvider.class); analyzersBindings.processAnalyzer("italian", ItalianAnalyzerProvider.class); analyzersBindings.processAnalyzer("latvian", LatvianAnalyzerProvider.class); + analyzersBindings.processAnalyzer("lithuanian", LithuanianAnalyzerProvider.class); analyzersBindings.processAnalyzer("norwegian", NorwegianAnalyzerProvider.class); analyzersBindings.processAnalyzer("persian", PersianAnalyzerProvider.class); analyzersBindings.processAnalyzer("portuguese", PortugueseAnalyzerProvider.class); diff --git a/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java b/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java new file mode 100644 index 00000000000..bac14a479c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/analysis/LithuanianAnalyzerProvider.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; +import org.apache.lucene.analysis.util.CharArraySet; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.assistedinject.Assisted; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.settings.IndexSettings; + +/** + * Provider for {@link LithuanianAnalyzer} + */ +public class LithuanianAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final LithuanianAnalyzer analyzer; + + @Inject + public LithuanianAnalyzerProvider(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) { + super(index, indexSettings, name, settings); + analyzer = new LithuanianAnalyzer(Analysis.parseStopWords(env, settings, LithuanianAnalyzer.getDefaultStopSet()), + Analysis.parseStemExclusion(settings, CharArraySet.EMPTY_SET)); + analyzer.setVersion(version); + } + + @Override + public LithuanianAnalyzer get() { + return this.analyzer; + } +} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java index 1b2018f07b7..84ebe4087af 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/StemmerTokenFilterFactory.java @@ -185,6 +185,9 @@ public class StemmerTokenFilterFactory extends AbstractTokenFilterFactory { } else if ("latvian".equalsIgnoreCase(language)) { return new LatvianStemFilter(tokenStream); + } else if ("lithuanian".equalsIgnoreCase(language)) { + return new SnowballFilter(tokenStream, new LithuanianStemmer()); + // Norwegian (Bokmål) stemmers } else if ("norwegian".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new NorwegianStemmer()); diff --git a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java index 7aeafcf3ee7..cb07779164e 100644 --- a/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java +++ b/core/src/main/java/org/elasticsearch/indices/analysis/PreBuiltAnalyzers.java @@ -46,6 +46,7 @@ import org.apache.lucene.analysis.hu.HungarianAnalyzer; import org.apache.lucene.analysis.hy.ArmenianAnalyzer; import org.apache.lucene.analysis.id.IndonesianAnalyzer; import org.apache.lucene.analysis.it.ItalianAnalyzer; +import org.apache.lucene.analysis.lt.LithuanianAnalyzer; import org.apache.lucene.analysis.lv.LatvianAnalyzer; import org.apache.lucene.analysis.nl.DutchAnalyzer; import org.apache.lucene.analysis.no.NorwegianAnalyzer; @@ -378,6 +379,15 @@ public enum PreBuiltAnalyzers { } }, + LITHUANIAN { + @Override + protected Analyzer create(Version version) { + Analyzer a = new LithuanianAnalyzer(); + a.setVersion(version.luceneVersion); + return a; + } + }, + NORWEGIAN { @Override protected Analyzer create(Version version) { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java index aec57c57816..4b38cf73919 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityIT.java @@ -112,6 +112,9 @@ public class BasicAnalysisBackwardCompatibilityIT extends ESBackcompatTestCase { if (preBuiltAnalyzers == PreBuiltAnalyzers.SORANI && compatibilityVersion().before(Version.V_1_3_0)) { continue; // SORANI was added in 1.3.0 } + if (preBuiltAnalyzers == PreBuiltAnalyzers.LITHUANIAN && compatibilityVersion().before(Version.V_2_1_0)) { + continue; // LITHUANIAN was added in 2.1.0 + } return preBuiltAnalyzers.name().toLowerCase(Locale.ROOT); } diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 69388ffa7a1..5d849807bc7 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -25,6 +25,7 @@ following types are supported: <>, <>, <>, +<>, <>, <>, <>, @@ -56,8 +57,9 @@ with the `keywords` set to the value of the `stem_exclusion` parameter. The following analyzers support setting custom `stem_exclusion` list: `arabic`, `armenian`, `basque`, `catalan`, `bulgarian`, `catalan`, `czech`, `finnish`, `dutch`, `english`, `finnish`, `french`, `galician`, -`german`, `irish`, `hindi`, `hungarian`, `indonesian`, `italian`, `latvian`, `norwegian`, -`portuguese`, `romanian`, `russian`, `sorani`, `spanish`, `swedish`, `turkish`. +`german`, `irish`, `hindi`, `hungarian`, `indonesian`, `italian`, `latvian`, +`lithuanian`, `norwegian`, `portuguese`, `romanian`, `russian`, `sorani`, +`spanish`, `swedish`, `turkish`. ==== Reimplementing language analyzers @@ -1082,6 +1084,50 @@ The `latvian` analyzer could be reimplemented as a `custom` analyzer as follows: <2> This filter should be removed unless there are words which should be excluded from stemming. +[[lithuanian-analyzer]] +===== `lithuanian` analyzer + +The `lithuanian` analyzer could be reimplemented as a `custom` analyzer as follows: + +[source,js] +---------------------------------------------------- +{ + "settings": { + "analysis": { + "filter": { + "lithuanian_stop": { + "type": "stop", + "stopwords": "_lithuanian_" <1> + }, + "lithuanian_keywords": { + "type": "keyword_marker", + "keywords": [] <2> + }, + "lithuanian_stemmer": { + "type": "stemmer", + "language": "lithuanian" + } + }, + "analyzer": { + "lithuanian": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "lithuanian_stop", + "lithuanian_keywords", + "lithuanian_stemmer" + ] + } + } + } + } +} +---------------------------------------------------- +<1> The default stopwords can be overridden with the `stopwords` + or `stopwords_path` parameters. +<2> This filter should be removed unless there are words which should + be excluded from stemming. + [[norwegian-analyzer]] ===== `norwegian` analyzer diff --git a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc index 58d88988745..6042642027c 100644 --- a/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/snowball-tokenfilter.asciidoc @@ -5,8 +5,8 @@ A filter that stems words using a Snowball-generated stemmer. The `language` parameter controls the stemmer with the following available values: `Armenian`, `Basque`, `Catalan`, `Danish`, `Dutch`, `English`, `Finnish`, `French`, `German`, `German2`, `Hungarian`, `Italian`, `Kp`, -`Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, `Russian`, -`Spanish`, `Swedish`, `Turkish`. +`Lithuanian`, `Lovins`, `Norwegian`, `Porter`, `Portuguese`, `Romanian`, +`Russian`, `Spanish`, `Swedish`, `Turkish`. For example: diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index 3d83b2044d9..548342c521b 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -133,6 +133,10 @@ Latvian:: http://lucene.apache.org/core/4_9_0/analyzers-common/org/apache/lucene/analysis/lv/LatvianStemmer.html[*`latvian`*] +Lithuanian:: + +http://svn.apache.org/viewvc/lucene/dev/branches/lucene_solr_5_3/lucene/analysis/common/src/java/org/apache/lucene/analysis/lt/stem_ISO_8859_1.sbl?view=markup[*`lithuanian`*] + Norwegian (Bokmål):: http://snowball.tartarus.org/algorithms/norwegian/stemmer.html[*`norwegian`*], From 3619cce53ba712ea523edcedc2fa0151f4844100 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 1 Sep 2015 15:16:29 +0200 Subject: [PATCH 38/51] Update licenses for analysis plugins. --- .../analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 | 1 + .../licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 | 1 + .../licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 | 1 - .../licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 | 1 + .../licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 | 1 + .../licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 | 1 - .../licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 | 1 + .../licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 | 1 + .../licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 | 1 - .../licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 | 1 + .../licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 | 1 + .../licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 | 1 - .../licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 | 1 + 15 files changed, 10 insertions(+), 5 deletions(-) create mode 100644 plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 diff --git a/plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 new file mode 100644 index 00000000000..0ffab888d78 --- /dev/null +++ b/plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 @@ -0,0 +1 @@ +bbff73b298fa832fa55c848e554a7cb6a892ecf5 diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 deleted file mode 100644 index b98bcd59656..00000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b80ddba6b892937f3e9b8321fa6cef5cdd0fadfa diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 new file mode 100644 index 00000000000..393ebc59ee0 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-5.3.0.jar.sha1 @@ -0,0 +1 @@ +e6dd489db555ad84279732c5f189406d20b63c84 diff --git a/plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 new file mode 100644 index 00000000000..9d3d628c2ed --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 @@ -0,0 +1 @@ +c48fe705c1f87f2382ab6ca0c1f43d2acde4cf4e diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 deleted file mode 100644 index 0553975c25a..00000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bd348c9f18662b73d22427209075a739ad33d689 diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 new file mode 100644 index 00000000000..b9e01cd40fd --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-5.3.0.jar.sha1 @@ -0,0 +1 @@ +b3e67473646e3869fcdeb4a3151ab597b957fbf2 diff --git a/plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 new file mode 100644 index 00000000000..154794e15c5 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 @@ -0,0 +1 @@ +01b0800c08719aebae92b293556bc86dac52f182 diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 deleted file mode 100644 index ccd7a4db633..00000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf61aebd9d895884dcac77e7ed17e45683ddbd66 diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 new file mode 100644 index 00000000000..1008732a647 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-5.3.0.jar.sha1 @@ -0,0 +1 @@ +471f3ee15053413e75c5c24a978494a6d4984240 diff --git a/plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 new file mode 100644 index 00000000000..161b9d9c74d --- /dev/null +++ b/plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 @@ -0,0 +1 @@ +4ac9dcbf65b44c46eaaa3f5685cd989ddcba8b90 diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 deleted file mode 100644 index c514c619955..00000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81a9e3dc63fb3661cccfa7e90e1b38535e895933 diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 new file mode 100644 index 00000000000..34377b92824 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-5.3.0.jar.sha1 @@ -0,0 +1 @@ +e37000b73d34ba33dda26f46893b09ba275c5294 diff --git a/plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 new file mode 100644 index 00000000000..75f23688919 --- /dev/null +++ b/plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 @@ -0,0 +1 @@ +3651cf61aff23b8af5032ead5120ba6e7452c2b6 diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 deleted file mode 100644 index f542650fccf..00000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.2.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bf939976dc70ccab9a9ba40bde58c247afb72d99 diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 new file mode 100644 index 00000000000..6c2857f65b1 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-5.3.0.jar.sha1 @@ -0,0 +1 @@ +fcc4bf8ccbda52435d13525d7cfc66cecf5c5125 From c6d282f9f69375e06e3484319c490ba81a24fbc7 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 1 Sep 2015 17:44:57 +0200 Subject: [PATCH 39/51] Remove extra licenses --- .../analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 | 1 - .../licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 | 1 - .../licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 | 1 - .../licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 | 1 - .../licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 | 1 - 5 files changed, 5 deletions(-) delete mode 100644 plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 diff --git a/plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 deleted file mode 100644 index 0ffab888d78..00000000000 --- a/plugins/analysis-icu/licenses/analysis-icu-2.1.0-SNAPSHOT.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bbff73b298fa832fa55c848e554a7cb6a892ecf5 diff --git a/plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 deleted file mode 100644 index 9d3d628c2ed..00000000000 --- a/plugins/analysis-kuromoji/licenses/analysis-kuromoji-2.1.0-SNAPSHOT.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c48fe705c1f87f2382ab6ca0c1f43d2acde4cf4e diff --git a/plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 deleted file mode 100644 index 154794e15c5..00000000000 --- a/plugins/analysis-phonetic/licenses/analysis-phonetic-2.1.0-SNAPSHOT.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -01b0800c08719aebae92b293556bc86dac52f182 diff --git a/plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 deleted file mode 100644 index 161b9d9c74d..00000000000 --- a/plugins/analysis-smartcn/licenses/analysis-smartcn-2.1.0-SNAPSHOT.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ac9dcbf65b44c46eaaa3f5685cd989ddcba8b90 diff --git a/plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 b/plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 deleted file mode 100644 index 75f23688919..00000000000 --- a/plugins/analysis-stempel/licenses/analysis-stempel-2.1.0-SNAPSHOT.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3651cf61aff23b8af5032ead5120ba6e7452c2b6 From 7cbd0a1bf53081c66ace79b23cdeedfafaad34c6 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Tue, 1 Sep 2015 17:54:44 +0200 Subject: [PATCH 40/51] [test] delete index after test for repetition DateHistogramIT has suite scope and therefore must take care of cleaning up after each test. Otherwise we cannot run tests in several times with tests.iters. --- .../aggregations/bucket/DateHistogramIT.java | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 5b815a40544..04f54000774 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -50,17 +50,10 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; -import static org.elasticsearch.search.aggregations.AggregationBuilders.max; -import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; -import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.notNullValue; /** @@ -1260,6 +1253,7 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(bucket.getDocCount(), equalTo(0l)); } } + internalCluster().wipeIndices("test12278"); } @Test @@ -1343,6 +1337,7 @@ public class DateHistogramIT extends ESIntegTestCase { Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("2014-01-01T00:00:00.000+02:00")); + internalCluster().wipeIndices("test9491"); } public void testIssue8209() throws InterruptedException, ExecutionException { @@ -1367,6 +1362,7 @@ public class DateHistogramIT extends ESIntegTestCase { assertThat(histo.getBuckets().get(2).getDocCount(), equalTo(0L)); assertThat(histo.getBuckets().get(3).getKeyAsString(), equalTo("2014-04-01T00:00:00.000+02:00")); assertThat(histo.getBuckets().get(3).getDocCount(), equalTo(2L)); + internalCluster().wipeIndices("test8209"); } /** From e4410482fe4f52aa1333f0bae2f1a3213fca17cc Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 21 Aug 2015 13:05:50 -0700 Subject: [PATCH 41/51] Packaging: Install all plugins during bats tests Related to #12717 --- qa/vagrant/pom.xml | 92 ++++- .../packaging/scripts/20_tar_package.bats | 12 +- .../packaging/scripts/25_tar_plugins.bats | 316 +----------------- .../packaging/scripts/50_plugins.bats | 260 +------------- .../scripts/packaging_test_utils.bash | 95 +++--- .../packaging/scripts/plugin_test_cases.bash | 185 ++++++++++ .../resources/packaging/scripts/plugins.bash | 104 ++++++ .../test/resources/packaging/scripts/tar.bash | 73 ++++ 8 files changed, 493 insertions(+), 644 deletions(-) mode change 100644 => 120000 qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats mode change 100644 => 120000 qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats create mode 100644 qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash create mode 100644 qa/vagrant/src/test/resources/packaging/scripts/plugins.bash create mode 100644 qa/vagrant/src/test/resources/packaging/scripts/tar.bash diff --git a/qa/vagrant/pom.xml b/qa/vagrant/pom.xml index 8f3b8ade0f8..ed9422b64e2 100644 --- a/qa/vagrant/pom.xml +++ b/qa/vagrant/pom.xml @@ -98,6 +98,90 @@ ${elasticsearch.version} zip + + org.elasticsearch.plugin + analysis-icu + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-kuromoji + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-phonetic + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-smartcn + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + analysis-stempel + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + cloud-aws + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + cloud-azure + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + cloud-gce + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + delete-by-query + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + lang-javascript + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + lang-python + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + mapper-size + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + mapper-murmur3 + ${elasticsearch.version} + zip + + + org.elasticsearch.plugin + site-example + ${elasticsearch.version} + zip + @@ -192,14 +276,6 @@ - - - org.elasticsearch.distribution - elasticsearch-rpm - ${elasticsearch.version} - rpm - - ok diff --git a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats index 61210d2df68..a4332ce4eb1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/20_tar_package.bats @@ -30,6 +30,7 @@ # Load test utilities load packaging_test_utils +load tar setup() { skip_not_tar_gz @@ -61,13 +62,12 @@ setup() { count=$(find /tmp -type d -name 'elasticsearch*' | wc -l) [ "$count" -eq 1 ] -} -################################## -# Check that the archive is correctly installed -################################## -@test "[TAR] verify archive installation" { - verify_archive_installation "/tmp/elasticsearch" + # Its simpler to check that the install was correct in this test rather + # than in another test because install_archive sets a number of path + # variables that verify_archive_installation reads. To separate this into + # another test you'd have to recreate the variables. + verify_archive_installation } ################################## diff --git a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats deleted file mode 100644 index 2b8fe631cec..00000000000 --- a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the installation and removal -# of plugins with a tar gz archive. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It removes the 'elasticsearch' -# user/group and also many directories. Do not execute this file -# unless you know exactly what you are doing. - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load packaging_test_utils - -setup() { - # Cleans everything for every test execution - clean_before_test -} - -################################## -# Install plugins with a tar archive -################################## -@test "[TAR] install jvm-example plugin" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - echo "Running jvm-example's bin script...." - /tmp/elasticsearch/bin/jvm-example/test | grep test - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" -} - -@test "[TAR] install jvm-example plugin with a custom path.plugins" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Creates a temporary directory - TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Modify the path.plugins setting in configuration file - echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/tmp/elasticsearch/config/elasticsearch.yml" - - run chown -R elasticsearch:elasticsearch "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/plugin-descriptor.properties" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_not_exist "$TEMP_PLUGINS_DIR/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] -} - -@test "[TAR] install jvm-example plugin with a custom CONFIG_DIR" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Move configuration files to the new configuration directory - run mv /tmp/elasticsearch/config/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - run chown -R elasticsearch:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example with the CONF_DIR environment variable - run env "CONF_DIR=$TEMP_CONFIG_DIR" /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} - -@test "[TAR] install jvm-example plugin with a custom ES_JAVA_OPTS" { - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Move configuration files to the new configuration directory - run mv /tmp/elasticsearch/config/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - run chown -R elasticsearch:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Export ES_JAVA_OPTS - export ES_JAVA_OPTS="-Des.path.conf=$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} - -@test "[TAR] install jvm-example plugin to elasticsearch directory with a space" { - export ES_DIR="/tmp/elastic search" - - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Move the Elasticsearch installation to a directory with a space in it - rm -rf "$ES_DIR" - mv /tmp/elasticsearch "$ES_DIR" - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run "$ES_DIR/bin/plugin" install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "$ES_DIR/bin/jvm-example" - assert_file_exist "$ES_DIR/bin/jvm-example/test" - assert_file_exist "$ES_DIR/config/jvm-example" - assert_file_exist "$ES_DIR/config/jvm-example/example.yaml" - assert_file_exist "$ES_DIR/plugins/jvm-example" - assert_file_exist "$ES_DIR/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "$ES_DIR/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run "$ES_DIR/bin/plugin" remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "$ES_DIR/bin/jvm-example" - assert_file_exist "$ES_DIR/config/jvm-example" - assert_file_exist "$ES_DIR/config/jvm-example/example.yaml" - assert_file_not_exist "$ES_DIR/plugins/jvm-example" - - #Cleanup our temporary Elasticsearch installation - rm -rf "$ES_DIR" -} - -@test "[TAR] install jvm-example plugin from a directory with a space" { - export EXAMPLE_PLUGIN_ZIP_WITH_SPACE="/tmp/plugins with space/jvm-example.zip" - - # Install the archive - install_archive - - # Checks that the archive is correctly installed - verify_archive_installation - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Copy the jvm-example plugin to a directory with a space in it - rm -f "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" - mkdir -p "$(dirname "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE")" - cp $EXAMPLE_PLUGIN_ZIP "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" - - # Install jvm-example - run /tmp/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly installed - assert_file_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/bin/jvm-example/test" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/tmp/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /tmp/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/tmp/elasticsearch/bin/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example" - assert_file_exist "/tmp/elasticsearch/config/jvm-example/example.yaml" - assert_file_not_exist "/tmp/elasticsearch/plugins/jvm-example" - - #Cleanup our plugin directory with a space - rm -rf "$EXAMPLE_PLUGIN_ZIP_WITH_SPACE" -} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats new file mode 120000 index 00000000000..8f55b1eb78c --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/25_tar_plugins.bats @@ -0,0 +1 @@ +plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats deleted file mode 100644 index 77ce7f0cecb..00000000000 --- a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the installation and removal -# of plugins when Elasticsearch is installed as a DEB/RPM -# package. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It removes the 'elasticsearch' -# user/group and also many directories. Do not execute this file -# unless you know exactly what you are doing. - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load packaging_test_utils - -setup() { - # Cleans everything for every test execution - clean_before_test -} - -# Install a deb or rpm package -install_package() { - if is_rpm; then - run rpm -i elasticsearch*.rpm >&2 - [ "$status" -eq 0 ] - - elif is_dpkg; then - run dpkg -i elasticsearch*.deb >&2 - [ "$status" -eq 0 ] - fi -} - -################################## -# Install plugins with DEB/RPM package -################################## -@test "[PLUGINS] install jvm-example plugin" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_not_exist "/usr/share/elasticsearch/plugins/jvm-example" -} - -@test "[PLUGINS] install jvm-example plugin with a custom path.plugins" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Creates a temporary directory - TEMP_PLUGINS_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Modify the path.plugins setting in configuration file - echo "path.plugins: $TEMP_PLUGINS_DIR" >> "/etc/elasticsearch/elasticsearch.yml" - - # Sets privileges - run chown -R root:elasticsearch "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] - - run chmod -R 750 "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/plugin-descriptor.properties" - assert_file_exist "$TEMP_PLUGINS_DIR/jvm-example/jvm-example-"*".jar" - - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example" - assert_file_exist "/etc/elasticsearch/jvm-example/example.yaml" - assert_file_not_exist "$TEMP_PLUGINS_DIR/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_PLUGINS_DIR" - [ "$status" -eq 0 ] -} - -@test "[PLUGINS] install jvm-example plugin with a custom CONFIG_DIR" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Modify the CONF_DIR variable in environment file - if is_rpm; then - echo "CONF_DIR=$TEMP_CONFIG_DIR" >> "/etc/sysconfig/elasticsearch" - elif is_dpkg; then - echo "CONF_DIR=$TEMP_CONFIG_DIR" >> "/etc/default/elasticsearch" - fi - - # Move configuration files to the new configuration directory - run mv /etc/elasticsearch/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Sets privileges - run chown -R root:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - run chmod -R 750 "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-exampel - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/usr/share/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} - -@test "[PLUGINS] install jvm-example plugin with a custom ES_JAVA_OPTS" { - # Install the package - install_package - - # Checks that the package is correctly installed - verify_package_installation - - # Creates a temporary directory - TEMP_CONFIG_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'tmp'` - - # Move configuration files to the new configuration directory - run mv /etc/elasticsearch/* $TEMP_CONFIG_DIR - [ "$status" -eq 0 ] - - assert_file_exist "$TEMP_CONFIG_DIR/elasticsearch.yml" - - # Sets privileges - run chown -R root:elasticsearch "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - run chmod -R 750 "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Export ES_JAVA_OPTS - export ES_JAVA_OPTS="-Des.path.conf=$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] - - # Checks that plugin archive is available - [ -e "$EXAMPLE_PLUGIN_ZIP" ] - - # Install jvm-example - run /usr/share/elasticsearch/bin/plugin install jvm-example -u "file://$EXAMPLE_PLUGIN_ZIP" - [ "$status" -eq 0 ] - - # Checks that jvm-example is correctly installed - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "/usr/share/elasticsearch/bin/jvm-example/test" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/plugin-descriptor.properties" - assert_file_exist "/usr/share/elasticsearch/plugins/jvm-example/jvm-example-"*".jar" - - # Remove the plugin - run /usr/share/elasticsearch/bin/plugin remove jvm-example - [ "$status" -eq 0 ] - - # Checks that the plugin is correctly removed - assert_file_not_exist "/usr/share/elasticsearch/bin/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example" - assert_file_exist "$TEMP_CONFIG_DIR/jvm-example/example.yaml" - assert_file_not_exist "/usr/share/elasticsearch/plugins/jvm-example" - - # Delete the custom plugins directory - run rm -rf "$TEMP_CONFIG_DIR" - [ "$status" -eq 0 ] -} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats new file mode 120000 index 00000000000..8f55b1eb78c --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/50_plugins.bats @@ -0,0 +1 @@ +plugin_test_cases.bash \ No newline at end of file diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index cbb68389cc0..ec58dd0816d 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -25,10 +25,6 @@ # specific language governing permissions and limitations # under the License. - -# Variables used by tests -EXAMPLE_PLUGIN_ZIP=$(readlink -m jvm-example-*.zip) - # Checks if necessary commands are available to run the tests if [ ! -x /usr/bin/which ]; then @@ -151,16 +147,18 @@ assert_file_not_exist() { } assert_file() { - local file=$1 + local file="$1" local type=$2 local user=$3 local privileges=$4 - [ -n "$file" ] && [ -e "$file" ] + assert_file_exist "$file" if [ "$type" = "d" ]; then + echo "And be a directory...." [ -d "$file" ] else + echo "And be a regular file...." [ -f "$file" ] fi @@ -179,6 +177,17 @@ assert_output() { echo "$output" | grep -E "$1" } +# Install the rpm or deb package +install_package() { + if is_rpm; then + rpm -i elasticsearch*.rpm + elif is_dpkg; then + dpkg -i elasticsearch*.deb + else + skip "Only rpm or deb supported" + fi +} + # Checks that all directories & files are correctly installed # after a package (deb/rpm) install verify_package_installation() { @@ -234,59 +243,21 @@ verify_package_installation() { fi } - -# Install the tar.gz archive -install_archive() { - local eshome="/tmp" - if [ "x$1" != "x" ]; then - eshome="$1" - fi - - tar -xzvf elasticsearch*.tar.gz -C "$eshome" - - find "$eshome" -depth -type d -name 'elasticsearch*' -exec mv {} "$eshome/elasticsearch" \; - - # ES cannot run as root so create elasticsearch user & group if needed - if ! getent group "elasticsearch" > /dev/null 2>&1 ; then - if is_dpkg; then - addgroup --system "elasticsearch" - else - groupadd -r "elasticsearch" - fi - fi - if ! id "elasticsearch" > /dev/null 2>&1 ; then - if is_dpkg; then - adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch" - else - useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch" - fi - fi - - chown -R elasticsearch:elasticsearch "$eshome/elasticsearch" -} - - # Checks that all directories & files are correctly installed # after a archive (tar.gz/zip) install verify_archive_installation() { - local eshome="/tmp/elasticsearch" - if [ "x$1" != "x" ]; then - eshome="$1" - fi - - assert_file "$eshome" d - assert_file "$eshome/bin" d - assert_file "$eshome/bin/elasticsearch" f - assert_file "$eshome/bin/elasticsearch.in.sh" f - assert_file "$eshome/bin/plugin" f - assert_file "$eshome/config" d - assert_file "$eshome/config/elasticsearch.yml" f - assert_file "$eshome/config/logging.yml" f - assert_file "$eshome/config" d - assert_file "$eshome/lib" d - assert_file "$eshome/NOTICE.txt" f - assert_file "$eshome/LICENSE.txt" f - assert_file "$eshome/README.textile" f + assert_file "$ESHOME" d + assert_file "$ESHOME/bin" d + assert_file "$ESHOME/bin/elasticsearch" f + assert_file "$ESHOME/bin/elasticsearch.in.sh" f + assert_file "$ESHOME/bin/plugin" f + assert_file "$ESCONFIG" d + assert_file "$ESCONFIG/elasticsearch.yml" f + assert_file "$ESCONFIG/logging.yml" f + assert_file "$ESHOME/lib" d + assert_file "$ESHOME/NOTICE.txt" f + assert_file "$ESHOME/LICENSE.txt" f + assert_file "$ESHOME/README.textile" f } # Deletes everything before running a test file @@ -478,3 +449,15 @@ run_elasticsearch_tests() { curl -s -XDELETE 'http://localhost:9200/_all' } + +# Move the config directory to another directory and properly chown it. +move_config() { + local oldConfig="$ESCONFIG" + export ESCONFIG="${1:-$(mktemp -d -t 'config.XXXX')}" + echo "Moving configuration directory from $oldConfig to $ESCONFIG" + + # Move configuration files to the new configuration directory + mv "$oldConfig"/* "$ESCONFIG" + chown -R elasticsearch:elasticsearch "$ESCONFIG" + assert_file_exist "$ESCONFIG/elasticsearch.yml" +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash new file mode 100644 index 00000000000..ebd23eeba2d --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugin_test_cases.bash @@ -0,0 +1,185 @@ +#!/usr/bin/env bats + +# This file is used to test the installation and removal +# of plugins after Elasticsearch has been installed with tar.gz, +# rpm, and deb. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# The test case can be executed with the Bash Automated +# Testing System tool available at https://github.com/sstephenson/bats +# Thanks to Sam Stephenson! + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +################################## +# Common test cases for both tar and rpm/deb based plugin tests +################################## +# This file is symlinked to both 25_tar_plugins.bats and 50_plugins.bats so its +# executed twice - once to test plugins using the tar distribution and once to +# test files using the rpm distribution or the deb distribution, whichever the +# system uses. + +# Load test utilities +load packaging_test_utils +load plugins + +setup() { + # The rules on when we should clean an reinstall are complex - all the + # jvm-example tests need cleaning because they are rough on the filesystem. + # The first and any tests that find themselves without an ESHOME need to + # clean as well.... this is going to mostly only happen on the first + # non-jvm-example-plugin-test _and_ any first test if you comment out the + # other tests. Commenting out lots of test cases seems like a reasonably + # common workflow. + if [ $BATS_TEST_NUMBER == 1 ] || + [[ $BATS_TEST_NAME =~ install_jvm.*example ]] || + [ ! -d "$ESHOME" ]; then + echo "cleaning" >> /tmp/ss + clean_before_test + install + fi +} + +if [[ "$BATS_TEST_FILENAME" =~ 25_tar_plugins.bats$ ]]; then + load tar + GROUP='TAR PLUGINS' + install() { + install_archive + verify_archive_installation + } + export ESHOME=/tmp/elasticsearch + export_elasticsearch_paths +else + if is_rpm; then + GROUP='RPM PLUGINS' + elif is_dpkg; then + GROUP='DEB PLUGINS' + fi + export ESHOME="/usr/share/elasticsearch" + export ESPLUGINS="$ESHOME/plugins" + export ESCONFIG="/etc/elasticsearch" + install() { + install_package + verify_package_installation + } +fi + +@test "[$GROUP] install jvm-example plugin" { + install_jvm_example + remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin with a custom path.plugins" { + # Clean up after the last time this test was run + rm -rf /tmp/plugins.* + + local oldPlugins="$ESPLUGINS" + export ESPLUGINS=$(mktemp -d -t 'plugins.XXXX') + + # Modify the path.plugins setting in configuration file + echo "path.plugins: $ESPLUGINS" >> "$ESCONFIG/elasticsearch.yml" + chown -R elasticsearch:elasticsearch "$ESPLUGINS" + + install_jvm_example + remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin with a custom CONFIG_DIR" { + # Clean up after the last time we ran this test + rm -rf /tmp/config.* + + move_config + + CONF_DIR="$ESCONFIG" install_jvm_example + CONF_DIR="$ESCONFIG" remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin from a directory with a space" { + rm -rf "/tmp/plugins with space" + mkdir -p "/tmp/plugins with space" + local zip=$(ls jvm-example-*.zip) + cp $zip "/tmp/plugins with space" + + install_jvm_example "/tmp/plugins with space/$zip" + remove_jvm_example +} + +@test "[$GROUP] install jvm-example plugin to elasticsearch directory with a space" { + [ "$GROUP" == "TAR PLUGINS" ] || skip "Test case only supported by TAR PLUGINS" + + move_elasticsearch "/tmp/elastic search" + + install_jvm_example + remove_jvm_example +} + +@test "[$GROUP] install icu plugin" { + install_and_remove_special_plugin analysis icu icu4j-*.jar +} + +@test "[$GROUP] install kuromoji plugin" { + install_and_remove_special_plugin analysis kuromoji +} + +@test "[$GROUP] install phonetic plugin" { + install_and_remove_special_plugin analysis phonetic commons-codec-*.jar +} + +@test "[$GROUP] install smartcn plugin" { + install_and_remove_special_plugin analysis smartcn +} + +@test "[$GROUP] install stempel plugin" { + install_and_remove_special_plugin analysis stempel +} + +@test "[$GROUP] install aws plugin" { + install_and_remove_special_plugin cloud aws aws-java-sdk-core-*.jar +} + +@test "[$GROUP] install azure plugin" { + install_and_remove_special_plugin cloud azure azure-core-*.jar +} + +@test "[$GROUP] install gce plugin" { + install_and_remove_special_plugin cloud gce google-api-client-*.jar +} + +@test "[$GROUP] install delete by query" { + install_and_remove_special_plugin - delete-by-query +} + +@test "[$GROUP] install javascript plugin" { + install_and_remove_special_plugin lang javascript rhino-*.jar +} + +@test "[$GROUP] install python plugin" { + install_and_remove_special_plugin lang python jython-standalone-*.jar +} + +@test "[$GROUP] install murmur3 mapper" { + install_and_remove_special_plugin mapper murmur3 +} + +@test "[$GROUP] install size mapper" { + install_and_remove_special_plugin mapper size +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash new file mode 100644 index 00000000000..2ca6e7501ab --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/plugins.bash @@ -0,0 +1,104 @@ +#!/bin/sh + +# This file contains some utilities to test the elasticsearch scripts, +# the .deb/.rpm packages and the SysV/Systemd scripts. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Install a plugin an run all the common post installation tests. +install_plugin() { + local name=$1 + local path="$2" + + assert_file_exist "$path" + + "$ESHOME/bin/plugin" install "file://$path" + + assert_file_exist "$ESPLUGINS/$name" + assert_file_exist "$ESPLUGINS/$name/plugin-descriptor.properties" + assert_file_exist "$ESPLUGINS/$name/$name"*".jar" +} + +# Remove a plugin and make sure its plugin directory is removed. +remove_plugin() { + local name=$1 + + echo "Removing $name...." + "$ESHOME/bin/plugin" remove $name + + assert_file_not_exist "$ESPLUGINS/$name" +} + +# Install the jvm-example plugin which fully excercises the special case file +# placements for non-site plugins. +install_jvm_example() { + local relativePath=${1:-$(readlink -m jvm-example-*.zip)} + install_plugin jvm-example "$relativePath" + + assert_file_exist "$ESHOME/bin/jvm-example" + assert_file_exist "$ESHOME/bin/jvm-example/test" + assert_file_exist "$ESCONFIG/jvm-example" + assert_file_exist "$ESCONFIG/jvm-example/example.yaml" + + echo "Running jvm-example's bin script...." + "$ESHOME/bin/jvm-example/test" | grep test +} + +# Remove the jvm-example plugin which fully excercises the special cases of +# removing bin and not removing config. +remove_jvm_example() { + remove_plugin jvm-example + + assert_file_not_exist "$ESHOME/bin/jvm-example" + assert_file_exist "$ESCONFIG/jvm-example" + assert_file_exist "$ESCONFIG/jvm-example/example.yaml" +} + +# Install and remove a plugin with a special prefix. For the most part prefixes +# are just useful for grouping but the "analysis" prefix is special because all +# analysis plugins come with a corresponding lucene-analyzers jar. +# $1 - the prefix +# $2 - the plugin name +# $@ - all remaining arguments are jars that must exist in the plugin's +# installation directory +install_and_remove_special_plugin() { + local prefix=$1 + shift + local name=$1 + shift + + if [ "$prefix" == "-" ]; then + local fullName="$name" + else + local fullName="$prefix-$name" + fi + + install_plugin $fullName "$(readlink -m $fullName-*.zip)" + if [ $prefix == 'analysis' ]; then + assert_file_exist "$(readlink -m $ESPLUGINS/$fullName/lucene-analyzers-$name-*.jar)" + fi + for file in "$@"; do + assert_file_exist "$(readlink -m $ESPLUGINS/$fullName/$file)" + done + remove_plugin $fullName +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/tar.bash b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash new file mode 100644 index 00000000000..7725edf4b94 --- /dev/null +++ b/qa/vagrant/src/test/resources/packaging/scripts/tar.bash @@ -0,0 +1,73 @@ +#!/bin/sh + +# This file contains some utilities to test the elasticsearch scripts, +# the .deb/.rpm packages and the SysV/Systemd scripts. + +# WARNING: This testing file must be executed as root and can +# dramatically change your system. It removes the 'elasticsearch' +# user/group and also many directories. Do not execute this file +# unless you know exactly what you are doing. + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +# Install the tar.gz archive +install_archive() { + export ESHOME=${1:-/tmp/elasticsearch} + + echo "Unpacking tarball to $ESHOME" + rm -rf /tmp/untar + mkdir -p /tmp/untar + tar -xzf elasticsearch*.tar.gz -C /tmp/untar + + find /tmp/untar -depth -type d -name 'elasticsearch*' -exec mv {} "$ESHOME" \; > /dev/null + + # ES cannot run as root so create elasticsearch user & group if needed + if ! getent group "elasticsearch" > /dev/null 2>&1 ; then + if is_dpkg; then + addgroup --system "elasticsearch" + else + groupadd -r "elasticsearch" + fi + fi + if ! id "elasticsearch" > /dev/null 2>&1 ; then + if is_dpkg; then + adduser --quiet --system --no-create-home --ingroup "elasticsearch" --disabled-password --shell /bin/false "elasticsearch" + else + useradd --system -M --gid "elasticsearch" --shell /sbin/nologin --comment "elasticsearch user" "elasticsearch" + fi + fi + + chown -R elasticsearch:elasticsearch "$ESHOME" + export_elasticsearch_paths +} + +# Move the unzipped tarball to another location. +move_elasticsearch() { + local oldhome="$ESHOME" + export ESHOME="$1" + rm -rf "$ESHOME" + mv "$oldhome" "$ESHOME" + export_elasticsearch_paths +} + +# Export some useful paths. +export_elasticsearch_paths() { + export ESPLUGINS="$ESHOME/plugins" + export ESCONFIG="$ESHOME/config" +} From 90c2b3a384c036e7f9345b2586e69fda056387fd Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 1 Sep 2015 20:10:54 +0200 Subject: [PATCH 42/51] Add simple comparator tests Relates to #13249 --- .../gateway/PriorityComparatorTests.java | 64 ++++++++++++++++++- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java index a846bc2cc9b..88499bf96cd 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PriorityComparatorTests.java @@ -23,12 +23,70 @@ import org.elasticsearch.cluster.routing.*; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESTestCase; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; +import java.util.*; public class PriorityComparatorTests extends ESTestCase { + public void testPreferNewIndices() { + RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); + List shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); + Collections.shuffle(shardRoutings, random()); + for (ShardRouting routing : shardRoutings) { + shards.add(routing); + } + shards.sort(new PriorityComparator() { + @Override + protected Settings getIndexSettings(String index) { + if ("oldest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10) + .put(IndexMetaData.SETTING_PRIORITY, 1).build(); + } else if ("newest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100) + .put(IndexMetaData.SETTING_PRIORITY, 1).build(); + } + return Settings.EMPTY; + } + }); + RoutingNodes.UnassignedShards.UnassignedIterator iterator = shards.iterator(); + ShardRouting next = iterator.next(); + assertEquals("newest", next.index()); + next = iterator.next(); + assertEquals("oldest", next.index()); + assertFalse(iterator.hasNext()); + } + + public void testPreferPriorityIndices() { + RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); + List shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null, + randomBoolean(), ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar"))); + Collections.shuffle(shardRoutings, random()); + for (ShardRouting routing : shardRoutings) { + shards.add(routing); + } + shards.sort(new PriorityComparator() { + @Override + protected Settings getIndexSettings(String index) { + if ("oldest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10) + .put(IndexMetaData.SETTING_PRIORITY, 100).build(); + } else if ("newest".equals(index)) { + return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100) + .put(IndexMetaData.SETTING_PRIORITY, 1).build(); + } + return Settings.EMPTY; + } + }); + RoutingNodes.UnassignedShards.UnassignedIterator iterator = shards.iterator(); + ShardRouting next = iterator.next(); + assertEquals("oldest", next.index()); + next = iterator.next(); + assertEquals("newest", next.index()); + assertFalse(iterator.hasNext()); + } + public void testPriorityComparatorSort() { RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null); int numIndices = randomIntBetween(3, 99); From 99d2f0463e20f463c853d54d039c51e91da1afea Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Mon, 24 Aug 2015 12:26:06 -0400 Subject: [PATCH 43/51] [packaging] clean up more bats tests This cleans up deb, rpm, systemd, and sysvinit tests: 1. Move skip_not_rpm, skip_not_dpkg, etc to the setup() methods for faster runtime and cleaner code. 2. Removed lots of needless invocations of `run` 3. Created install_package for use in the systemd and sysvinit tests. 4. Removed lots of needless stderr to stdout redirects. Closes #13075 Related to #13074 --- .../packaging/scripts/30_deb_package.bats | 43 +++------ .../packaging/scripts/40_rpm_package.bats | 34 ++----- .../packaging/scripts/60_systemd.bats | 90 ++++++------------- .../packaging/scripts/70_sysv_initd.bats | 56 +++--------- .../scripts/packaging_test_utils.bash | 34 +++---- 5 files changed, 74 insertions(+), 183 deletions(-) diff --git a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats index aa7a370d80b..3367e62fcf8 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/30_deb_package.bats @@ -34,50 +34,39 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_dpkg } ################################## # Install DEB package ################################## @test "[DEB] dpkg command is available" { - skip_not_dpkg - run dpkg --version - [ "$status" -eq 0 ] + clean_before_test + dpkg --version } @test "[DEB] package is available" { - skip_not_dpkg count=$(find . -type f -name 'elastic*.deb' | wc -l) [ "$count" -eq 1 ] } @test "[DEB] package is not installed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 + run dpkg -s 'elasticsearch' [ "$status" -eq 1 ] } @test "[DEB] install package" { - skip_not_dpkg - run dpkg -i elasticsearch*.deb >&2 - [ "$status" -eq 0 ] + dpkg -i elasticsearch*.deb } @test "[DEB] package is installed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 - [ "$status" -eq 0 ] + dpkg -s 'elasticsearch' } ################################## # Check that the package is correctly installed ################################## @test "[DEB] verify package installation" { - skip_not_dpkg - verify_package_installation } @@ -85,8 +74,6 @@ setup() { # Check that Elasticsearch is working ################################## @test "[DEB] test elasticsearch" { - skip_not_dpkg - start_elasticsearch_service run_elasticsearch_tests @@ -96,21 +83,16 @@ setup() { # Uninstall DEB package ################################## @test "[DEB] remove package" { - skip_not_dpkg - run dpkg -r 'elasticsearch' >&2 - [ "$status" -eq 0 ] + dpkg -r 'elasticsearch' } @test "[DEB] package has been removed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 + run dpkg -s 'elasticsearch' [ "$status" -eq 0 ] echo "$output" | grep -i "status" | grep -i "deinstall ok" } @test "[DEB] verify package removal" { - skip_not_dpkg - # The removal must stop the service count=$(ps | grep Elasticsearch | wc -l) [ "$count" -eq 0 ] @@ -146,14 +128,10 @@ setup() { } @test "[DEB] purge package" { - skip_not_dpkg - run dpkg --purge 'elasticsearch' >&2 - [ "$status" -eq 0 ] + dpkg --purge 'elasticsearch' } @test "[DEB] verify package purge" { - skip_not_dpkg - # all remaining files are deleted by the purge assert_file_not_exist "/etc/elasticsearch" assert_file_not_exist "/etc/elasticsearch/elasticsearch.yml" @@ -171,7 +149,6 @@ setup() { } @test "[DEB] package has been completly removed" { - skip_not_dpkg - run dpkg -s 'elasticsearch' >&2 + run dpkg -s 'elasticsearch' [ "$status" -eq 1 ] } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats index 6d8aff66410..cbcdd794c76 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -33,50 +33,39 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_rpm } ################################## # Install RPM package ################################## @test "[RPM] rpm command is available" { - skip_not_rpm - run rpm --version - [ "$status" -eq 0 ] + clean_before_test + rpm --version } @test "[RPM] package is available" { - skip_not_rpm count=$(find . -type f -name 'elastic*.rpm' | wc -l) [ "$count" -eq 1 ] } @test "[RPM] package is not installed" { - skip_not_rpm - run rpm -qe 'elasticsearch' >&2 + run rpm -qe 'elasticsearch' [ "$status" -eq 1 ] } @test "[RPM] install package" { - skip_not_rpm - run rpm -i elasticsearch*.rpm >&2 - [ "$status" -eq 0 ] + rpm -i elasticsearch*.rpm } @test "[RPM] package is installed" { - skip_not_rpm - run rpm -qe 'elasticsearch' >&2 - [ "$status" -eq 0 ] + rpm -qe 'elasticsearch' } ################################## # Check that the package is correctly installed ################################## @test "[RPM] verify package installation" { - skip_not_rpm - verify_package_installation } @@ -84,8 +73,6 @@ setup() { # Check that Elasticsearch is working ################################## @test "[RPM] test elasticsearch" { - skip_not_rpm - start_elasticsearch_service run_elasticsearch_tests @@ -95,20 +82,15 @@ setup() { # Uninstall RPM package ################################## @test "[RPM] remove package" { - skip_not_rpm - run rpm -e 'elasticsearch' >&2 - [ "$status" -eq 0 ] + rpm -e 'elasticsearch' } @test "[RPM] package has been removed" { - skip_not_rpm - run rpm -qe 'elasticsearch' >&2 + run rpm -qe 'elasticsearch' [ "$status" -eq 1 ] } @test "[RPM] verify package removal" { - skip_not_rpm - # The removal must stop the service count=$(ps | grep Elasticsearch | wc -l) [ "$count" -eq 0 ] diff --git a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats index 011b063a161..e8e9817c670 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats @@ -33,42 +33,27 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi - - - # Installs a package before test - if is_dpkg; then - dpkg -i elasticsearch*.deb >&2 || true - fi - if is_rpm; then - rpm -i elasticsearch*.rpm >&2 || true - fi + skip_not_systemd + skip_not_dpkg_or_rpm } -@test "[SYSTEMD] daemon reload" { - skip_not_systemd +@test "[SYSTEMD] install elasticsearch" { + clean_before_test + install_package +} - run systemctl daemon-reload - [ "$status" -eq 0 ] +@test "[SYSTEMD] daemon reload after install" { + systemctl daemon-reload } @test "[SYSTEMD] enable" { - skip_not_systemd + systemctl enable elasticsearch.service - run systemctl enable elasticsearch.service - [ "$status" -eq 0 ] - - run systemctl is-enabled elasticsearch.service - [ "$status" -eq 0 ] + systemctl is-enabled elasticsearch.service } @test "[SYSTEMD] start" { - skip_not_systemd - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] + systemctl start elasticsearch.service wait_for_elasticsearch_status @@ -76,72 +61,53 @@ setup() { } @test "[SYSTEMD] start (running)" { - skip_not_systemd - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] + systemctl start elasticsearch.service } @test "[SYSTEMD] is active (running)" { - skip_not_systemd - run systemctl is-active elasticsearch.service [ "$status" -eq 0 ] [ "$output" = "active" ] } @test "[SYSTEMD] status (running)" { - skip_not_systemd - - run systemctl status elasticsearch.service - [ "$status" -eq 0 ] + systemctl status elasticsearch.service } ################################## # Check that Elasticsearch is working ################################## @test "[SYSTEMD] test elasticsearch" { - skip_not_systemd - run_elasticsearch_tests } @test "[SYSTEMD] restart" { - skip_not_systemd - - run systemctl restart elasticsearch.service - [ "$status" -eq 0 ] + systemctl restart elasticsearch.service wait_for_elasticsearch_status - run service elasticsearch status - [ "$status" -eq 0 ] + service elasticsearch status } @test "[SYSTEMD] stop (running)" { - skip_not_systemd - - run systemctl stop elasticsearch.service - [ "$status" -eq 0 ] + systemctl stop elasticsearch.service run systemctl status elasticsearch.service + [ "$status" -eq 0 ] echo "$output" | grep "Active:" | grep "inactive" } @test "[SYSTEMD] stop (stopped)" { - skip_not_systemd - - run systemctl stop elasticsearch.service - [ "$status" -eq 0 ] + systemctl stop elasticsearch.service run systemctl status elasticsearch.service + [ "$status" -eq 0 ] echo "$output" | grep "Active:" | grep "inactive" } @test "[SYSTEMD] status (stopped)" { - skip_not_systemd - run systemctl status elasticsearch.service + [ "$status" -eq 0 ] echo "$output" | grep "Active:" | grep "inactive" } @@ -150,21 +116,15 @@ setup() { # but it should not block ES from starting # see https://github.com/elastic/elasticsearch/issues/11594 @test "[SYSTEMD] delete PID_DIR and restart" { - skip_not_systemd + rm -rf /var/run/elasticsearch - run rm -rf /var/run/elasticsearch - [ "$status" -eq 0 ] + systemd-tmpfiles --create - run systemd-tmpfiles --create - [ "$status" -eq 0 ] - - run systemctl start elasticsearch.service - [ "$status" -eq 0 ] + systemctl start elasticsearch.service wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - run systemctl stop elasticsearch.service - [ "$status" -eq 0 ] -} \ No newline at end of file + systemctl stop elasticsearch.service +} diff --git a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats b/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats index 97d1cce918f..5bf43163ab1 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/70_sysv_initd.bats @@ -33,24 +33,17 @@ load packaging_test_utils # Cleans everything for the 1st execution setup() { - if [ "$BATS_TEST_NUMBER" -eq 1 ]; then - clean_before_test - fi + skip_not_sysvinit + skip_not_dpkg_or_rpm +} - # Installs a package before test - if is_dpkg; then - dpkg -i elasticsearch*.deb >&2 || true - fi - if is_rpm; then - rpm -i elasticsearch*.rpm >&2 || true - fi +@test "[INIT.D] install elasticsearch" { + clean_before_test + install_package } @test "[INIT.D] start" { - skip_not_sysvinit - - run service elasticsearch start - [ "$status" -eq 0 ] + service elasticsearch start wait_for_elasticsearch_status @@ -58,44 +51,29 @@ setup() { } @test "[INIT.D] status (running)" { - skip_not_sysvinit - - run service elasticsearch status - [ "$status" -eq 0 ] + service elasticsearch status } ################################## # Check that Elasticsearch is working ################################## @test "[INIT.D] test elasticsearch" { - skip_not_sysvinit - run_elasticsearch_tests } @test "[INIT.D] restart" { - skip_not_sysvinit - - run service elasticsearch restart - [ "$status" -eq 0 ] + service elasticsearch restart wait_for_elasticsearch_status - run service elasticsearch status - [ "$status" -eq 0 ] + service elasticsearch status } @test "[INIT.D] stop (running)" { - skip_not_sysvinit - - run service elasticsearch stop - [ "$status" -eq 0 ] - + service elasticsearch stop } @test "[INIT.D] status (stopped)" { - skip_not_sysvinit - run service elasticsearch status # precise returns 4, trusty 3 [ "$status" -eq 3 ] || [ "$status" -eq 4 ] @@ -106,19 +84,13 @@ setup() { # but it should not block ES from starting # see https://github.com/elastic/elasticsearch/issues/11594 @test "[INIT.D] delete PID_DIR and restart" { - skip_not_sysvinit + rm -rf /var/run/elasticsearch - run rm -rf /var/run/elasticsearch - [ "$status" -eq 0 ] - - - run service elasticsearch start - [ "$status" -eq 0 ] + service elasticsearch start wait_for_elasticsearch_status assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - run service elasticsearch stop - [ "$status" -eq 0 ] + service elasticsearch stop } diff --git a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash index ec58dd0816d..9eff5894c17 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/qa/vagrant/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -79,16 +79,16 @@ is_rpm() { # Skip test if the 'dpkg' command is not supported skip_not_dpkg() { - if [ ! -x "`which dpkg 2>/dev/null`" ]; then - skip "dpkg is not supported" - fi + is_dpkg || skip "dpkg is not supported" } # Skip test if the 'rpm' command is not supported skip_not_rpm() { - if [ ! -x "`which rpm 2>/dev/null`" ]; then - skip "rpm is not supported" - fi + is_rpm || skip "rpm is not supported" +} + +skip_not_dpkg_or_rpm() { + is_dpkg || is_rpm || skip "only dpkg or rpm systems are supported" } # Returns 0 if the system supports Systemd @@ -177,17 +177,6 @@ assert_output() { echo "$output" | grep -E "$1" } -# Install the rpm or deb package -install_package() { - if is_rpm; then - rpm -i elasticsearch*.rpm - elif is_dpkg; then - dpkg -i elasticsearch*.deb - else - skip "Only rpm or deb supported" - fi -} - # Checks that all directories & files are correctly installed # after a package (deb/rpm) install verify_package_installation() { @@ -243,6 +232,17 @@ verify_package_installation() { fi } +# Install the rpm or deb package +install_package() { + if is_rpm; then + rpm -i elasticsearch*.rpm + elif is_dpkg; then + dpkg -i elasticsearch*.deb + else + skip "Only rpm or deb supported" + fi +} + # Checks that all directories & files are correctly installed # after a archive (tar.gz/zip) install verify_archive_installation() { From 38147da43e3d3c2bf8c3905222925f0178883a49 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 1 Sep 2015 15:24:04 -0400 Subject: [PATCH 44/51] [tests] Fix exit code check for systemctl --- .../src/test/resources/packaging/scripts/60_systemd.bats | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats index e8e9817c670..8df4f4a980b 100644 --- a/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats +++ b/qa/vagrant/src/test/resources/packaging/scripts/60_systemd.bats @@ -93,7 +93,7 @@ setup() { systemctl stop elasticsearch.service run systemctl status elasticsearch.service - [ "$status" -eq 0 ] + [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped" echo "$output" | grep "Active:" | grep "inactive" } @@ -101,13 +101,13 @@ setup() { systemctl stop elasticsearch.service run systemctl status elasticsearch.service - [ "$status" -eq 0 ] + [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped" echo "$output" | grep "Active:" | grep "inactive" } @test "[SYSTEMD] status (stopped)" { run systemctl status elasticsearch.service - [ "$status" -eq 0 ] + [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped" echo "$output" | grep "Active:" | grep "inactive" } From e5a02b1d96473141f689ed8b13b2d4b3497c3826 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Sep 2015 20:56:23 -0400 Subject: [PATCH 45/51] Remove unused goal to allow for integ test coverage analysis --- pom.xml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pom.xml b/pom.xml index 2af046be847..1379d3b65c4 100644 --- a/pom.xml +++ b/pom.xml @@ -1105,12 +1105,6 @@ org.eclipse.jdt.ui.text.custom_code_templates=report - - default-check - - check - - From 517f00b048cb4446665b3090fa12c7ec670bf6ab Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Sep 2015 20:57:08 -0400 Subject: [PATCH 46/51] doc how to get code coverage for unit,integ,combined --- TESTING.asciidoc | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 4329c88b59e..108c6ba9765 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -527,13 +527,27 @@ sudo bats $BATS/*.bats == Coverage analysis -To run tests instrumented with jacoco and produce a coverage report in -`target/site/jacoco/`: +Tests can be run instrumented with jacoco to produce a coverage report in +`target/site/jacoco/`. + +Unit test coverage: --------------------------------------------------------------------------- mvn -Dtests.coverage test jacoco:report --------------------------------------------------------------------------- +Integration test coverage: + +--------------------------------------------------------------------------- +mvn -Dtests.coverage -Dskip.unit.tests verify jacoco:report +--------------------------------------------------------------------------- + +Combined (Unit+Integration) coverage: + +--------------------------------------------------------------------------- +mvn -Dtests.coverage verify jacoco:report +--------------------------------------------------------------------------- + == Debugging from an IDE If you want to run elasticsearch from your IDE, you should execute ./run.sh From abec2679f02724ac600653595bee86d26a8f3a66 Mon Sep 17 00:00:00 2001 From: Robert Muir Date: Tue, 1 Sep 2015 22:14:49 -0400 Subject: [PATCH 47/51] Improve jacoco coverage Upgrade jacoco version and allow it to run with security manager enabled. --- .../org/elasticsearch/bootstrap/BootstrapForTesting.java | 7 +++++++ pom.xml | 7 +++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java index 83ae87580bb..a7b9043f350 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/BootstrapForTesting.java @@ -106,6 +106,13 @@ public class BootstrapForTesting { if (Strings.hasLength(System.getProperty("tests.config"))) { perms.add(new FilePermission(System.getProperty("tests.config"), "read,readlink")); } + // jacoco coverage output file + if (Boolean.getBoolean("tests.coverage")) { + Path coverageDir = PathUtils.get(System.getProperty("tests.coverage.dir")); + perms.add(new FilePermission(coverageDir.resolve("jacoco.exec").toString(), "read,write")); + // in case we get fancy and use the -integration goals later: + perms.add(new FilePermission(coverageDir.resolve("jacoco-it.exec").toString(), "read,write")); + } Policy.setPolicy(new ESPolicy(perms)); System.setSecurityManager(new TestSecurityManager()); Security.selfTest(); diff --git a/pom.xml b/pom.xml index 1379d3b65c4..c25d62f1fc8 100644 --- a/pom.xml +++ b/pom.xml @@ -47,7 +47,7 @@ 2.5.3 1.6.2 1.2.17 - 0.7.2.201409121644 + 0.7.5.201505241946 s3://download.elasticsearch.org/elasticsearch/staging/ @@ -646,6 +646,8 @@ ${tests.showSuccess} ${tests.thirdparty} ${tests.config} + ${tests.coverage} + ${project.build.directory} ${tests.client.ratio} ${tests.enable_mock_modules} ${tests.assertion.disabled} @@ -1415,7 +1417,8 @@ org.eclipse.jdt.ui.text.custom_code_templates= - false + + true From 89ac6a83f134365dcc3b7bdff5a3f14ddab6c2c5 Mon Sep 17 00:00:00 2001 From: Britta Weber Date: Tue, 1 Sep 2015 13:22:06 +0200 Subject: [PATCH 48/51] Allow reads on shards that are in POST_RECOVERY Currently, we do not allow reads on shards which are in POST_RECOVERY which unfortunately can cause search failures on shards which just recovered if there no replicas (#9421). The reason why we did not allow reads on shards that are in POST_RECOVERY is that after relocating a shard might miss a refresh if the node that executed the refresh is behind with cluster state processing. If that happens, a user might execute index/refresh/search but still not find the document that was indexed. We changed how refresh works now in #13068 to make sure that shards cannot miss a refresh this way by sending refresh requests the same way that we send write requests. This commit changes IndexShard to allow reads on POST_RECOVERY now. In addition it adds two test: - test for issue #9421 (After relocation shards might temporarily not be searchable if still in POST_RECOVERY) - test for visibility issue with relocation and refresh if reads allowed when shard is in POST_RECOVERY closes #9421 --- .../elasticsearch/index/shard/IndexShard.java | 7 +- .../DiscoveryWithServiceDisruptionsIT.java | 295 +++++++++++++++++- 2 files changed, 297 insertions(+), 5 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 10217983f40..b2df1011b4b 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -111,6 +111,7 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.channels.ClosedByInterruptException; import java.util.Arrays; +import java.util.EnumSet; import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; @@ -191,6 +192,8 @@ public class IndexShard extends AbstractIndexShardComponent { private final IndexShardOperationCounter indexShardOperationCounter; + private EnumSet readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); + @Inject public IndexShard(ShardId shardId, IndexSettingsService indexSettingsService, IndicesLifecycle indicesLifecycle, Store store, StoreRecoveryService storeRecoveryService, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, @@ -953,8 +956,8 @@ public class IndexShard extends AbstractIndexShardComponent { public void readAllowed() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read - if (state != IndexShardState.STARTED && state != IndexShardState.RELOCATED) { - throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when started/relocated"); + if (readAllowedStates.contains(state) == false) { + throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + readAllowedStates.toString()); } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index 0f89f0e4383..8f00bf4073f 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -23,21 +23,29 @@ import com.google.common.base.Predicate; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; +import org.elasticsearch.action.admin.indices.flush.FlushResponse; +import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; +import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.DjbHashFunction; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.zen.ZenDiscovery; @@ -48,6 +56,10 @@ import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.recovery.RecoverySource; +import org.elasticsearch.indices.store.IndicesStoreIntegrationIT; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; @@ -55,7 +67,10 @@ import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; import org.elasticsearch.test.disruption.*; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportService; import org.junit.Before; import org.junit.Test; @@ -812,7 +827,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { } - /** Test cluster join with issues in cluster state publishing * */ + /** + * Test cluster join with issues in cluster state publishing * + */ @Test public void testClusterJoinDespiteOfPublishingIssues() throws Exception { List nodes = startCluster(2, 1); @@ -919,6 +936,277 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureStableCluster(3); } + /* + * Tests a visibility issue if a shard is in POST_RECOVERY + * + * When a user indexes a document, then refreshes and then a executes a search and all are successful and no timeouts etc then + * the document must be visible for the search. + * + * When a primary is relocating from node_1 to node_2, there can be a short time where both old and new primary + * are started and accept indexing and read requests. However, the new primary might not be visible to nodes + * that lag behind one cluster state. If such a node then sends a refresh to the index, this refresh request + * must reach the new primary on node_2 too. Otherwise a different node that searches on the new primary might not + * find the indexed document although a refresh was executed before. + * + * In detail: + * Cluster state 0: + * node_1: [index][0] STARTED (ShardRoutingState) + * node_2: no shard + * + * 0. primary ([index][0]) relocates from node_1 to node_2 + * Cluster state 1: + * node_1: [index][0] RELOCATING (ShardRoutingState), (STARTED from IndexShardState perspective on node_1) + * node_2: [index][0] INITIALIZING (ShardRoutingState), (IndexShardState on node_2 is RECOVERING) + * + * 1. node_2 is done recovering, moves its shard to IndexShardState.POST_RECOVERY and sends a message to master that the shard is ShardRoutingState.STARTED + * Cluster state is still the same but the IndexShardState on node_2 has changed and it now accepts writes and reads: + * node_1: [index][0] RELOCATING (ShardRoutingState), (STARTED from IndexShardState perspective on node_1) + * node_2: [index][0] INITIALIZING (ShardRoutingState), (IndexShardState on node_2 is POST_RECOVERY) + * + * 2. any node receives an index request which is then executed on node_1 and node_2 + * + * 3. node_3 sends a refresh but it is a little behind with cluster state processing and still on cluster state 0. + * If refresh was a broadcast operation it send it to node_1 only because it does not know node_2 has a shard too + * + * 4. node_3 catches up with the cluster state and acks it to master which now can process the shard started message + * from node_2 before and updates cluster state to: + * Cluster state 2: + * node_1: [index][0] no shard + * node_2: [index][0] STARTED (ShardRoutingState), (IndexShardState on node_2 is still POST_RECOVERY) + * + * master sends this to all nodes. + * + * 5. node_4 and node_3 process cluster state 2, but node_1 and node_2 have not yet + * + * If now node_4 searches for document that was indexed before, it will search at node_2 because it is on + * cluster state 2. It should be able to retrieve it with a search because the refresh from before was + * successful. + */ + @Test + public void testReadOnPostRecoveryShards() throws Exception { + List clusterStateBlocks = new ArrayList<>(); + try { + configureUnicastCluster(5, null, 1); + // we could probably write a test without a dedicated master node but it is easier if we use one + Future masterNodeFuture = internalCluster().startMasterOnlyNodeAsync(); + // node_1 will have the shard in the beginning + Future node1Future = internalCluster().startDataOnlyNodeAsync(); + final String masterNode = masterNodeFuture.get(); + final String node_1 = node1Future.get(); + logger.info("--> creating index [test] with one shard and zero replica"); + assertAcked(prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexShard.INDEX_REFRESH_INTERVAL, -1)) + .addMapping("doc", jsonBuilder().startObject().startObject("doc") + .startObject("properties").startObject("text").field("type", "string").endObject().endObject() + .endObject().endObject()) + ); + ensureGreen("test"); + logger.info("--> starting three more data nodes"); + List nodeNamesFuture = internalCluster().startDataOnlyNodesAsync(3).get(); + final String node_2 = nodeNamesFuture.get(0); + final String node_3 = nodeNamesFuture.get(1); + final String node_4 = nodeNamesFuture.get(2); + logger.info("--> running cluster_health"); + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForNodes("5") + .setWaitForRelocatingShards(0) + .get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + + logger.info("--> move shard from node_1 to node_2, and wait for relocation to finish"); + + // block cluster state updates on node_3 so that it only sees the shard on node_1 + BlockClusterStateProcessing disruptionNode3 = new BlockClusterStateProcessing(node_3, getRandom()); + clusterStateBlocks.add(disruptionNode3); + internalCluster().setDisruptionScheme(disruptionNode3); + disruptionNode3.startDisrupting(); + // register a Tracer that notifies begin and end of a relocation + MockTransportService transportServiceNode2 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_2); + CountDownLatch beginRelocationLatchNode2 = new CountDownLatch(1); + CountDownLatch endRelocationLatchNode2 = new CountDownLatch(1); + transportServiceNode2.addTracer(new StartRecoveryToShardStaredTracer(logger, beginRelocationLatchNode2, endRelocationLatchNode2)); + + // block cluster state updates on node_1 and node_2 so that we end up with two primaries + BlockClusterStateProcessing disruptionNode2 = new BlockClusterStateProcessing(node_2, getRandom()); + clusterStateBlocks.add(disruptionNode2); + disruptionNode2.applyToCluster(internalCluster()); + BlockClusterStateProcessing disruptionNode1 = new BlockClusterStateProcessing(node_1, getRandom()); + clusterStateBlocks.add(disruptionNode1); + disruptionNode1.applyToCluster(internalCluster()); + + logger.info("--> move shard from node_1 to node_2"); + // don't block on the relocation. cluster state updates are blocked on node_3 and the relocation would timeout + Future rerouteFuture = internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2)).setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).execute(); + + logger.info("--> wait for relocation to start"); + // wait for relocation to start + beginRelocationLatchNode2.await(); + // start to block cluster state updates on node_1 and node_2 so that we end up with two primaries + // one STARTED on node_1 and one in POST_RECOVERY on node_2 + disruptionNode1.startDisrupting(); + disruptionNode2.startDisrupting(); + endRelocationLatchNode2.await(); + final Client node3Client = internalCluster().client(node_3); + final Client node2Client = internalCluster().client(node_2); + final Client node1Client = internalCluster().client(node_1); + final Client node4Client = internalCluster().client(node_4); + logger.info("--> index doc"); + logLocalClusterStates(node1Client, node2Client, node3Client, node4Client); + assertTrue(node3Client.prepareIndex("test", "doc").setSource("{\"text\":\"a\"}").get().isCreated()); + //sometimes refresh and sometimes flush + int refreshOrFlushType = randomIntBetween(1, 2); + switch (refreshOrFlushType) { + case 1: { + logger.info("--> refresh from node_3"); + RefreshResponse refreshResponse = node3Client.admin().indices().prepareRefresh().get(); + assertThat(refreshResponse.getFailedShards(), equalTo(0)); + // the total shards is num replicas + 1 so that can be lower here because one shard + // is relocating and counts twice as successful + assertThat(refreshResponse.getTotalShards(), equalTo(2)); + assertThat(refreshResponse.getSuccessfulShards(), equalTo(2)); + break; + } + case 2: { + logger.info("--> flush from node_3"); + FlushResponse flushResponse = node3Client.admin().indices().prepareFlush().get(); + assertThat(flushResponse.getFailedShards(), equalTo(0)); + // the total shards is num replicas + 1 so that can be lower here because one shard + // is relocating and counts twice as successful + assertThat(flushResponse.getTotalShards(), equalTo(2)); + assertThat(flushResponse.getSuccessfulShards(), equalTo(2)); + break; + } + default: + fail("this is test bug, number should be between 1 and 2"); + } + // now stop disrupting so that node_3 can ack last cluster state to master and master can continue + // to publish the next cluster state + logger.info("--> stop disrupting node_3"); + disruptionNode3.stopDisrupting(); + rerouteFuture.get(); + logger.info("--> wait for node_4 to get new cluster state"); + // wait until node_4 actually has the new cluster state in which node_1 has no shard + assertBusy(new Runnable() { + @Override + public void run() { + ClusterState clusterState = node4Client.admin().cluster().prepareState().setLocal(true).get().getState(); + // get the node id from the name. TODO: Is there a better way to do this? + String nodeId = null; + for (RoutingNode node : clusterState.getRoutingNodes()) { + if (node.node().name().equals(node_1)) { + nodeId = node.nodeId(); + } + } + assertNotNull(nodeId); + // check that node_1 does not have the shard in local cluster state + assertFalse(clusterState.getRoutingNodes().routingNodeIter(nodeId).hasNext()); + } + }); + + logger.info("--> run count from node_4"); + logLocalClusterStates(node1Client, node2Client, node3Client, node4Client); + CountResponse countResponse = node4Client.prepareCount("test").setPreference("local").get(); + assertThat(countResponse.getCount(), equalTo(1l)); + logger.info("--> stop disrupting node_1 and node_2"); + disruptionNode2.stopDisrupting(); + disruptionNode1.stopDisrupting(); + // wait for relocation to finish + logger.info("--> wait for relocation to finish"); + clusterHealth = client().admin().cluster().prepareHealth() + .setWaitForRelocatingShards(0) + .get(); + assertThat(clusterHealth.isTimedOut(), equalTo(false)); + } catch (AssertionError e) { + for (BlockClusterStateProcessing blockClusterStateProcessing : clusterStateBlocks) { + blockClusterStateProcessing.stopDisrupting(); + } + throw e; + } + } + + /** + * This Tracer can be used to signal start of a recovery and shard started event after translog was copied + */ + public static class StartRecoveryToShardStaredTracer extends MockTransportService.Tracer { + private final ESLogger logger; + private final CountDownLatch beginRelocationLatch; + private final CountDownLatch sentShardStartedLatch; + + public StartRecoveryToShardStaredTracer(ESLogger logger, CountDownLatch beginRelocationLatch, CountDownLatch sentShardStartedLatch) { + this.logger = logger; + this.beginRelocationLatch = beginRelocationLatch; + this.sentShardStartedLatch = sentShardStartedLatch; + } + + @Override + public void requestSent(DiscoveryNode node, long requestId, String action, TransportRequestOptions options) { + if (action.equals(RecoverySource.Actions.START_RECOVERY)) { + logger.info("sent: {}, relocation starts", action); + beginRelocationLatch.countDown(); + } + if (action.equals(ShardStateAction.SHARD_STARTED_ACTION_NAME)) { + logger.info("sent: {}, shard started", action); + sentShardStartedLatch.countDown(); + } + } + } + + private void logLocalClusterStates(Client... clients) { + int counter = 1; + for (Client client : clients) { + ClusterState clusterState = client.admin().cluster().prepareState().setLocal(true).get().getState(); + logger.info("--> cluster state on node_{} {}", counter, clusterState.prettyPrint()); + counter++; + } + } + + /** + * This test creates a scenario where a primary shard (0 replicas) relocates and is in POST_RECOVERY on the target + * node but already deleted on the source node. Search request should still work. + */ + @Test + public void searchWithRelocationAndSlowClusterStateProcessing() throws Exception { + configureUnicastCluster(3, null, 1); + Future masterNodeFuture = internalCluster().startMasterOnlyNodeAsync(); + Future node_1Future = internalCluster().startDataOnlyNodeAsync(); + + final String node_1 = node_1Future.get(); + final String masterNode = masterNodeFuture.get(); + logger.info("--> creating index [test] with one shard and on replica"); + assertAcked(prepareCreate("test").setSettings( + Settings.builder().put(indexSettings()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)) + ); + ensureGreen("test"); + + Future node_2Future = internalCluster().startDataOnlyNodeAsync(); + final String node_2 = node_2Future.get(); + List indexRequestBuilderList = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("doc").setSource("{\"int_field\":1}")); + } + indexRandom(true, indexRequestBuilderList); + SingleNodeDisruption disruption = new BlockClusterStateProcessing(node_2, getRandom()); + + internalCluster().setDisruptionScheme(disruption); + MockTransportService transportServiceNode2 = (MockTransportService) internalCluster().getInstance(TransportService.class, node_2); + CountDownLatch beginRelocationLatch = new CountDownLatch(1); + CountDownLatch endRelocationLatch = new CountDownLatch(1); + transportServiceNode2.addTracer(new IndicesStoreIntegrationIT.ReclocationStartEndTracer(logger, beginRelocationLatch, endRelocationLatch)); + internalCluster().client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId("test", 0), node_1, node_2)).get(); + // wait for relocation to start + beginRelocationLatch.await(); + disruption.startDisrupting(); + // wait for relocation to finish + endRelocationLatch.await(); + // now search for the documents and see if we get a reply + assertThat(client().prepareCount().get().getCount(), equalTo(100l)); + } + @Test public void testIndexImportedFromDataOnlyNodesIfMasterLostDataFolder() throws Exception { // test for https://github.com/elastic/elasticsearch/issues/8823 @@ -932,6 +1220,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen(); internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { + @Override public boolean clearData(String nodeName) { return true; } From 34ee4c2d6691464f479d093ac795abac1b26745a Mon Sep 17 00:00:00 2001 From: David Pilato Date: Tue, 1 Sep 2015 18:54:52 +0200 Subject: [PATCH 49/51] [build] remove shaded elasticsearch version The shaded version of elasticsearch was built at the very beginning to avoid dependency conflicts in a specific case where: * People use elasticsearch from Java * People needs to embed elasticsearch jar within their own application (as it's today the only way to get a `TransportClient`) * People also embed in their application another (most of the time older) version of dependency we are using for elasticsearch, such as: Guava, Joda, Jackson... This conflict issue can be solved within the projects themselves by either upgrade the dependency version and use the one provided by elasticsearch or by shading elasticsearch project and relocating some conflicting packages. Example ------- As an example, let's say you want to use within your project `Joda 2.1` but elasticsearch `2.0.0-beta1` provides `Joda 2.8`. Let's say you also want to run all that with shield plugin. Create a new maven project or module with: ```xml fr.pilato.elasticsearch.test es-shaded 1.0-SNAPSHOT 2.0.0-beta1 org.elasticsearch elasticsearch ${elasticsearch.version} org.elasticsearch.plugin shield ${elasticsearch.version} ``` And now shade and relocate all packages which conflicts with your own application: ```xml org.apache.maven.plugins maven-shade-plugin 2.4.1 package shade org.joda fr.pilato.thirdparty.joda ``` You can create now a shaded version of elasticsearch + shield by running `mvn clean install`. In your project, you can now depend on: ```xml fr.pilato.elasticsearch.test es-shaded 1.0-SNAPSHOT joda-time joda-time 2.1 ``` Build then your TransportClient as usual: ```java TransportClient client = TransportClient.builder() .settings(Settings.builder() .put("path.home", ".") .put("shield.user", "username:password") .put("plugin.types", "org.elasticsearch.shield.ShieldPlugin") ) .build(); client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress("localhost", 9300))); // Index some data client.prepareIndex("test", "doc", "1").setSource("foo", "bar").setRefresh(true).get(); SearchResponse searchResponse = client.prepareSearch("test").get(); ``` If you want to use your own version of Joda, then import for example `org.joda.time.DateTime`. If you want to access to the shaded version (not recommended though), import `fr.pilato.thirdparty.joda.time.DateTime`. You can run a simple test to make sure that both classes can live together within the same JVM: ```java CodeSource codeSource = new org.joda.time.DateTime().getClass().getProtectionDomain().getCodeSource(); System.out.println("unshaded = " + codeSource); codeSource = new fr.pilato.thirdparty.joda.time.DateTime().getClass().getProtectionDomain().getCodeSource(); System.out.println("shaded = " + codeSource); ``` It will print: ``` unshaded = (file:/path/to/joda-time-2.1.jar ) shaded = (file:/path/to/es-shaded-1.0-SNAPSHOT.jar ) ``` This PR also removes fully-loaded module. By the way, the project can now build with Maven 3.3.3 so we can relax a bit our maven policy. --- core/pom.xml | 3 - .../main/resources/ant/integration-tests.xml | 32 ---- .../resources/forbidden/core-signatures.txt | 5 +- .../third-party-shaded-signatures.txt | 33 ---- ...natures.txt => third-party-signatures.txt} | 0 distribution/deb/pom.xml | 11 +- distribution/fully-loaded/pom.xml | 73 -------- distribution/pom.xml | 57 +++++- distribution/rpm/pom.xml | 10 - distribution/shaded/pom.xml | 173 ------------------ distribution/tar/pom.xml | 9 - distribution/zip/pom.xml | 9 - docs/java-api/docs/index_.asciidoc | 20 +- pom.xml | 10 +- qa/pom.xml | 1 - qa/smoke-test-shaded/pom.xml | 39 ---- .../elasticsearch/shaded/test/ShadedIT.java | 77 -------- 17 files changed, 63 insertions(+), 499 deletions(-) delete mode 100644 dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt rename dev-tools/src/main/resources/forbidden/{third-party-unshaded-signatures.txt => third-party-signatures.txt} (100%) delete mode 100644 distribution/fully-loaded/pom.xml delete mode 100644 distribution/shaded/pom.xml delete mode 100644 qa/smoke-test-shaded/pom.xml delete mode 100644 qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java diff --git a/core/pom.xml b/core/pom.xml index c9f8656eacb..4b55f93aa19 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -105,8 +105,6 @@ - - com.google.guava guava @@ -165,7 +163,6 @@ commons-cli commons-cli - org.codehaus.groovy diff --git a/dev-tools/src/main/resources/ant/integration-tests.xml b/dev-tools/src/main/resources/ant/integration-tests.xml index 68d531cd6af..9c8df5d4c8f 100644 --- a/dev-tools/src/main/resources/ant/integration-tests.xml +++ b/dev-tools/src/main/resources/ant/integration-tests.xml @@ -396,36 +396,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/dev-tools/src/main/resources/forbidden/core-signatures.txt b/dev-tools/src/main/resources/forbidden/core-signatures.txt index 7876306bb8f..f050ec4cf37 100644 --- a/dev-tools/src/main/resources/forbidden/core-signatures.txt +++ b/dev-tools/src/main/resources/forbidden/core-signatures.txt @@ -14,8 +14,7 @@ # either express or implied. See the License for the specific # language governing permissions and limitations under the License. -# For shaded dependencies, please put signatures in third-party-shaded.txt -# and third-party-unshaded.txt instead of here. +# For third-party dependencies, please put signatures in third-party.txt instead of here. @defaultMessage spawns threads with vague names; use a custom thread factory and name threads so that you can tell (by its name) which executor it is associated with @@ -47,7 +46,7 @@ org.apache.lucene.search.NumericRangeFilter#newFloatRange(java.lang.String,java. org.apache.lucene.search.NumericRangeFilter#newIntRange(java.lang.String,java.lang.Integer,java.lang.Integer,boolean,boolean) org.apache.lucene.search.NumericRangeFilter#newLongRange(java.lang.String,java.lang.Long,java.lang.Long,boolean,boolean) -@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead. +@defaultMessage Only use wait / notify when really needed try to use concurrency primitives, latches or callbacks instead. java.lang.Object#wait() java.lang.Object#wait(long) java.lang.Object#wait(long,int) diff --git a/dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt b/dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt deleted file mode 100644 index db1cd6f83be..00000000000 --- a/dev-tools/src/main/resources/forbidden/third-party-shaded-signatures.txt +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on -# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -# either express or implied. See the License for the specific -# language governing permissions and limitations under the License. - -@defaultMessage Use Long.compare instead we are on Java7 -org.elasticsearch.common.primitives.Longs#compare(long,long) - -@defaultMessage unsafe encoders/decoders have problems in the lzf compress library. Use variants of encode/decode functions which take Encoder/Decoder. -org.elasticsearch.common.compress.lzf.impl.UnsafeChunkDecoder#() -org.elasticsearch.common.compress.lzf.util.ChunkDecoderFactory#optimalInstance() - -@defaultMessage Constructing a DateTime without a time zone is dangerous -org.elasticsearch.joda.time.DateTime#() -org.elasticsearch.joda.time.DateTime#(long) -org.elasticsearch.joda.time.DateTime#(int, int, int, int, int) -org.elasticsearch.joda.time.DateTime#(int, int, int, int, int, int) -org.elasticsearch.joda.time.DateTime#(int, int, int, int, int, int, int) -org.elasticsearch.joda.time.DateTime#now() -org.elasticsearch.joda.time.DateTimeZone#getDefault() - -org.elasticsearch.common.collect.Iterators#emptyIterator() @ Use Collections.emptyIterator instead diff --git a/dev-tools/src/main/resources/forbidden/third-party-unshaded-signatures.txt b/dev-tools/src/main/resources/forbidden/third-party-signatures.txt similarity index 100% rename from dev-tools/src/main/resources/forbidden/third-party-unshaded-signatures.txt rename to dev-tools/src/main/resources/forbidden/third-party-signatures.txt diff --git a/distribution/deb/pom.xml b/distribution/deb/pom.xml index a86fad9513d..aae6f6f4595 100644 --- a/distribution/deb/pom.xml +++ b/distribution/deb/pom.xml @@ -24,15 +24,6 @@ dpkg-sig - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - @@ -172,7 +163,7 @@ ${project.build.directory}/../target/lib - ${project.build.finalName}-shaded.jar,${project.build.finalName}-sources.jar,${project.build.finalName}-tests.jar,${project.build.finalName}-test-sources.jar,slf4j-api-*.jar + ${project.build.finalName}-sources.jar,${project.build.finalName}-tests.jar,${project.build.finalName}-test-sources.jar,slf4j-api-*.jar directory perm diff --git a/distribution/fully-loaded/pom.xml b/distribution/fully-loaded/pom.xml deleted file mode 100644 index 92772e9c866..00000000000 --- a/distribution/fully-loaded/pom.xml +++ /dev/null @@ -1,73 +0,0 @@ - - - 4.0.0 - - org.elasticsearch.distribution - distributions - 2.1.0-SNAPSHOT - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - Distribution: with all optional dependencies - pom - - - - org.elasticsearch - elasticsearch - - - - org.apache.lucene - lucene-expressions - - - - com.spatial4j - spatial4j - - - - com.vividsolutions - jts - - - - - com.github.spullara.mustache.java - compiler - - - - org.codehaus.groovy - groovy-all - indy - - - - log4j - log4j - - - - log4j - apache-log4j-extras - - - - - - net.java.dev.jna - jna - - - - diff --git a/distribution/pom.xml b/distribution/pom.xml index 4a22d12f458..94cd8ad75e7 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -69,6 +69,61 @@ httpclient test + + + + org.elasticsearch + elasticsearch + + + + org.apache.lucene + lucene-expressions + + + + com.spatial4j + spatial4j + + + + com.vividsolutions + jts + + + + + com.github.spullara.mustache.java + compiler + + + + org.codehaus.groovy + groovy-all + indy + + + + log4j + log4j + + + + log4j + apache-log4j-extras + + + + + + net.java.dev.jna + jna + @@ -170,8 +225,6 @@ - fully-loaded - shaded tar zip deb diff --git a/distribution/rpm/pom.xml b/distribution/rpm/pom.xml index 488ed97ac04..f0e22b64963 100644 --- a/distribution/rpm/pom.xml +++ b/distribution/rpm/pom.xml @@ -15,15 +15,6 @@ rpm The RPM distribution of Elasticsearch - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - true ${project.build.directory}/releases/ @@ -187,7 +178,6 @@ target/lib/ - ${project.build.finalName}-shaded.jar ${project.build.finalName}-sources.jar ${project.build.finalName}-tests.jar ${project.build.finalName}-test-sources.jar diff --git a/distribution/shaded/pom.xml b/distribution/shaded/pom.xml deleted file mode 100644 index 6a4b54f7b18..00000000000 --- a/distribution/shaded/pom.xml +++ /dev/null @@ -1,173 +0,0 @@ - - - 4.0.0 - - org.elasticsearch.distribution - distributions - 2.1.0-SNAPSHOT - - - org.elasticsearch.distribution.shaded - elasticsearch - Distribution: Shaded JAR - - - - org.elasticsearch - elasticsearch - ${elasticsearch.version} - - - - - - - org.apache.maven.plugins - maven-jar-plugin - - - false - true - - - - org.apache.maven.plugins - maven-shade-plugin - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - check-for-jar-hell - integration-test - - run - - - - - - - - - - - - org.apache.maven.plugins - maven-shade-plugin - - - package - - shade - - - - - false - false - true - true - ${project.build.directory}/dependency-reduced-pom.xml - - - org.apache.lucene:* - com.spatial4j:* - - - - - - - true - - - - - - com.google.common - org.elasticsearch.common - - - com.google.thirdparty - org.elasticsearch.common.thirdparty - - - com.carrotsearch.hppc - org.elasticsearch.common.hppc - - - org.HdrHistogram - org.elasticsearch.common.HdrHistogram - - - org.yaml - org.elasticsearch.common.yaml - - - com.twitter.jsr166e - org.elasticsearch.common.util.concurrent.jsr166e - - - com.fasterxml.jackson - org.elasticsearch.common.jackson - - - org.joda.time - org.elasticsearch.common.joda.time - - - org.joda.convert - org.elasticsearch.common.joda.convert - - - org.jboss.netty - org.elasticsearch.common.netty - - - com.ning.compress - org.elasticsearch.common.compress - - - com.github.mustachejava - org.elasticsearch.common.mustache - - - com.tdunning.math.stats - org.elasticsearch.common.stats - - - org.apache.commons.lang - org.elasticsearch.common.lang - - - org.apache.commons.cli - org.elasticsearch.common.cli.commons - - - - - *:* - - META-INF/license/** - META-INF/* - META-INF/maven/** - LICENSE - NOTICE - /*.txt - build.properties - - - - - - - - - diff --git a/distribution/tar/pom.xml b/distribution/tar/pom.xml index 33181b281ab..744d7c924e7 100644 --- a/distribution/tar/pom.xml +++ b/distribution/tar/pom.xml @@ -19,15 +19,6 @@ The TAR distribution of Elasticsearch - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - ${project.basedir}/../src/main/packaging/packaging.properties diff --git a/distribution/zip/pom.xml b/distribution/zip/pom.xml index 750944f60dc..6bb38fbb579 100644 --- a/distribution/zip/pom.xml +++ b/distribution/zip/pom.xml @@ -19,15 +19,6 @@ The ZIP distribution of Elasticsearch - - - org.elasticsearch.distribution.fully-loaded - elasticsearch - ${elasticsearch.version} - pom - - - ${project.basedir}/../src/main/packaging/packaging.properties diff --git a/docs/java-api/docs/index_.asciidoc b/docs/java-api/docs/index_.asciidoc index 152d76e4a8c..2b29f15fabd 100644 --- a/docs/java-api/docs/index_.asciidoc +++ b/docs/java-api/docs/index_.asciidoc @@ -60,24 +60,8 @@ json.put("message","trying out Elasticsearch"); [[java-docs-index-generate-beans]] ===== Serialize your beans -Elasticsearch already uses Jackson but shades it under -`org.elasticsearch.common.jackson` package. + - So, you can add your own Jackson version in your `pom.xml` file or in -your classpath. See http://wiki.fasterxml.com/JacksonDownload[Jackson -Download Page]. - -For example: - -[source,xml] --------------------------------------------------- - - com.fasterxml.jackson.core - jackson-databind - 2.1.3 - --------------------------------------------------- - -Then, you can start serializing your beans to JSON: +Elasticsearch already uses http://wiki.fasterxml.com/JacksonHome[Jackson]. +So you can use it to serialize your beans to JSON: [source,java] -------------------------------------------------- diff --git a/pom.xml b/pom.xml index c25d62f1fc8..0cc34ed578f 100644 --- a/pom.xml +++ b/pom.xml @@ -52,7 +52,6 @@ ${project.build.directory}/dev-tools - unshaded ${elasticsearch.tools.directory}/license-check/elasticsearch_license_header.txt ${elasticsearch.tools.directory}/license-check/license_header_definition.xml ${elasticsearch.tools.directory}/ant/integration-tests.xml @@ -336,7 +335,6 @@ ${lucene.maven.version} - com.google.guava guava @@ -416,8 +414,6 @@ 1.3.1 - - org.codehaus.groovy groovy-all @@ -551,7 +547,7 @@ - [3.1.0,3.3.0) + [3.1.0,) @@ -846,7 +842,7 @@ ${elasticsearch.tools.directory}/forbidden/core-signatures.txt ${elasticsearch.tools.directory}/forbidden/all-signatures.txt - ${elasticsearch.tools.directory}/forbidden/third-party-${elasticsearch.thirdparty.config}-signatures.txt + ${elasticsearch.tools.directory}/forbidden/third-party-signatures.txt ${forbidden.signatures} **.SuppressForbidden @@ -893,7 +889,7 @@ org.apache.maven.plugins maven-resources-plugin 2.7 - diff --git a/qa/pom.xml b/qa/pom.xml index d918de924ae..f8b1f38f6bd 100644 --- a/qa/pom.xml +++ b/qa/pom.xml @@ -146,7 +146,6 @@ smoke-test-plugins - smoke-test-shaded smoke-test-multinode diff --git a/qa/smoke-test-shaded/pom.xml b/qa/smoke-test-shaded/pom.xml deleted file mode 100644 index c1eb2e72e36..00000000000 --- a/qa/smoke-test-shaded/pom.xml +++ /dev/null @@ -1,39 +0,0 @@ - - - 4.0.0 - - - org.elasticsearch.qa - elasticsearch-qa - 2.1.0-SNAPSHOT - - - smoke-test-shaded - QA: Smoke Test Shaded Jar - Runs a simple - - - shaded - true - - - - - org.elasticsearch.distribution.shaded - elasticsearch - ${elasticsearch.version} - - - org.hamcrest - hamcrest-all - test - - - org.apache.lucene - lucene-test-framework - test - - - diff --git a/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java b/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java deleted file mode 100644 index 9e8f022e715..00000000000 --- a/qa/smoke-test-shaded/src/test/java/org/elasticsearch/shaded/test/ShadedIT.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.shaded.test; - -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.Node; -import org.elasticsearch.node.NodeBuilder; -import org.junit.Test; - -import java.nio.file.Path; - -/** - */ -public class ShadedIT extends LuceneTestCase { - - public void testStartShadedNode() { - ESLoggerFactory.getRootLogger().setLevel("ERROR"); - Path data = createTempDir(); - Settings settings = Settings.builder() - .put("path.home", data.toAbsolutePath().toString()) - .put("node.mode", "local") - .put("http.enabled", "false") - .build(); - NodeBuilder builder = NodeBuilder.nodeBuilder().data(true).settings(settings).loadConfigSettings(false).local(true); - try (Node node = builder.node()) { - Client client = node.client(); - client.admin().indices().prepareCreate("test").get(); - client.prepareIndex("test", "foo").setSource("{ \"field\" : \"value\" }").get(); - client.admin().indices().prepareRefresh().get(); - SearchResponse response = client.prepareSearch("test").get(); - assertEquals(response.getHits().getTotalHits(), 1l); - } - - } - - @Test - public void testLoadShadedClasses() throws ClassNotFoundException { - Class.forName("org.elasticsearch.common.cache.LoadingCache"); - Class.forName("org.elasticsearch.common.joda.time.DateTime"); - Class.forName("org.elasticsearch.common.util.concurrent.jsr166e.LongAdder"); - } - - @Test(expected = ClassNotFoundException.class) - public void testGuavaIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("com.google.common.cache.LoadingCache"); - } - - @Test(expected = ClassNotFoundException.class) - public void testJodaIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("org.joda.time.DateTime"); - } - - @Test(expected = ClassNotFoundException.class) - public void testjsr166eIsNotOnTheCP() throws ClassNotFoundException { - Class.forName("com.twitter.jsr166e.LongAdder"); - } -} From 1d9905a798e350f4a732790cb8731df8ab016059 Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 2 Sep 2015 12:13:15 +0100 Subject: [PATCH 50/51] [DOCS] Added note about valid return types for scripts in the scripted_metric aggregation --- .../metrics/scripted-metric-aggregation.asciidoc | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index 6db8c82a9e8..639fabb62fd 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -79,7 +79,16 @@ The above example can also be specified using file scripts as follows: <1> script parameters for init, map and combine scripts must be specified in a global `params` object so that it can be share between the scripts -For more details on specifying scripts see <>. +For more details on specifying scripts see <>. + +==== Allowed return types + +Whilst and valid script object can be used within a single script. the scripts must return or store in the `_agg` object only the following types: + +* primitive types +* String +* Map (containing only keys and values of the types listed here) +* Array (containing elements of only the types listed here) ==== Scope of scripts From 821021f0e478ca44d828e62c139ea19f0fd7290a Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Tue, 18 Aug 2015 14:55:35 +0100 Subject: [PATCH 51/51] Scripting: Propagate Headers and Context through to ScriptService At the moment if an index script is used in a request, the spawned request to get the indexed script from the `.scripts` index does not get the headers and context copied to it from the original request. This change makes the calls to the `ScriptService` pass in a `HasContextAndHeaders` object that can provide the headers and context. For the `search()` method the context and headers are retrieved from `SearchContext.current()`. Closes #12891 --- .../TransportRenderSearchTemplateAction.java | 2 +- .../percolate/TransportPercolateAction.java | 2 +- .../type/TransportSearchCountAction.java | 3 +- ...TransportSearchDfsQueryAndFetchAction.java | 3 +- ...ransportSearchDfsQueryThenFetchAction.java | 6 +- .../TransportSearchQueryAndFetchAction.java | 3 +- .../TransportSearchQueryThenFetchAction.java | 4 +- .../type/TransportSearchScanAction.java | 4 +- ...nsportSearchScrollQueryAndFetchAction.java | 9 +- ...sportSearchScrollQueryThenFetchAction.java | 9 +- .../type/TransportSearchScrollScanAction.java | 3 +- .../suggest/TransportSuggestAction.java | 2 +- .../action/update/UpdateHelper.java | 2 +- .../DelegatingHasContextAndHeaders.java | 112 ++++++++++++ .../index/mapper/DocumentMapper.java | 5 +- .../index/query/TemplateQueryParser.java | 5 +- .../percolator/PercolateContext.java | 13 +- .../percolator/PercolatorService.java | 52 +++--- .../elasticsearch/script/ScriptService.java | 24 ++- .../elasticsearch/search/SearchService.java | 31 +++- .../aggregations/InternalAggregation.java | 7 +- .../SignificantTermsParametersParser.java | 4 +- .../bucket/significant/heuristics/GND.java | 4 +- .../significant/heuristics/JLHScore.java | 4 +- .../heuristics/NXYSignificanceHeuristic.java | 4 +- .../heuristics/PercentageScore.java | 8 +- .../heuristics/ScriptHeuristic.java | 22 ++- .../SignificanceHeuristicParser.java | 4 +- .../scripted/InternalScriptedMetric.java | 2 +- .../scripted/ScriptedMetricAggregator.java | 6 +- .../BucketScriptPipelineAggregator.java | 2 +- .../BucketSelectorPipelineAggregator.java | 2 +- .../controller/SearchPhaseController.java | 10 +- .../search/internal/DefaultSearchContext.java | 83 +-------- .../internal/FilteredSearchContext.java | 83 +-------- .../search/internal/SearchContext.java | 7 +- .../search/suggest/SuggestContextParser.java | 8 +- .../search/suggest/SuggestParseElement.java | 9 +- .../completion/CompletionSuggestParser.java | 12 +- .../suggest/phrase/PhraseSuggestParser.java | 21 ++- .../phrase/PhraseSuggestionBuilder.java | 51 +++--- .../suggest/term/TermSuggestParser.java | 4 +- .../index/query/TemplateQueryParserTest.java | 1 - .../script/CustomScriptContextIT.java | 27 +-- .../script/NativeScriptTests.java | 9 +- .../script/ScriptServiceTests.java | 106 +++++++----- .../SignificantTermsSignificanceScoreIT.java | 14 +- .../search/suggest/CustomSuggester.java | 4 +- .../elasticsearch/test/TestSearchContext.java | 4 +- .../ContextAndHeaderTransportIT.java | 160 ++++++++++++++++++ 50 files changed, 617 insertions(+), 359 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java index d469e29ca96..ab3090a5a81 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/validate/template/TransportRenderSearchTemplateAction.java @@ -55,7 +55,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction< @Override protected void doRun() throws Exception { - ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH); + ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request); BytesReference processedTemplate = (BytesReference) executable.run(); RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); response.source(processedTemplate); diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index 2194975161a..a9015d24129 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -146,7 +146,7 @@ public class TransportPercolateAction extends TransportBroadcastAction) AtomicArray.empty()); + final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, + (AtomicArray) AtomicArray.empty(), request); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java index 8868379b3bf..7244a1ff58a 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java @@ -134,7 +134,8 @@ public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAc @Override public void doRun() throws IOException { sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, + queryFetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java index de3032eb887..20bb205fef5 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search.type; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -39,8 +40,8 @@ import org.elasticsearch.search.action.SearchServiceTransportAction; import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.query.QuerySearchRequest; @@ -210,7 +211,8 @@ public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeA threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, + fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java index c37fdce7633..3c4f5419f00 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java @@ -81,7 +81,8 @@ public class TransportSearchQueryAndFetchAction extends TransportSearchTypeActio public void doRun() throws IOException { boolean useScroll = request.scroll() != null; sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, firstResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, + firstResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java index edd5cf63d22..c23e5b70c15 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search.type; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -145,7 +146,8 @@ public class TransportSearchQueryThenFetchAction extends TransportSearchTypeActi threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable(listener) { @Override public void doRun() throws IOException { - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, fetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults, + fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java index c5ea86763f4..cf2b4ee8df0 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search.type; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -73,7 +74,8 @@ public class TransportSearchScanAction extends TransportSearchTypeAction { @Override protected void moveToSecondPhase() throws Exception { - final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, (AtomicArray) AtomicArray.empty()); + final InternalSearchResponse internalResponse = searchPhaseController.merge(SearchPhaseController.EMPTY_DOCS, firstResults, + (AtomicArray) AtomicArray.empty(), request); String scrollId = null; if (request.scroll() != null) { scrollId = buildScrollId(request.searchType(), firstResults, ImmutableMap.of("total_hits", Long.toString(internalResponse.hits().totalHits()))); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java index bb2c82d8831..cd4238ccdea 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryAndFetchAction.java @@ -21,7 +21,11 @@ package org.elasticsearch.action.search.type; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.*; +import org.elasticsearch.action.search.ReduceSearchPhaseException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -188,7 +192,8 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent private void innerFinishHim() throws Exception { ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults); - final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults, + queryFetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java index 9c7742615c4..85b06ea7860 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollQueryThenFetchAction.java @@ -20,9 +20,14 @@ package org.elasticsearch.action.search.type; import com.carrotsearch.hppc.IntArrayList; + import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.*; +import org.elasticsearch.action.search.ReduceSearchPhaseException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -239,7 +244,7 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent } private void innerFinishHim() { - InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults); + InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults, request); String scrollId = null; if (request.scroll() != null) { scrollId = request.scrollId(); diff --git a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java index 16ab26f7d46..2bc516b2bbe 100644 --- a/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java +++ b/core/src/main/java/org/elasticsearch/action/search/type/TransportSearchScrollScanAction.java @@ -212,7 +212,8 @@ public class TransportSearchScrollScanAction extends AbstractComponent { docs.add(scoreDoc); } } - final InternalSearchResponse internalResponse = searchPhaseController.merge(docs.toArray(new ScoreDoc[0]), queryFetchResults, queryFetchResults); + final InternalSearchResponse internalResponse = searchPhaseController.merge(docs.toArray(new ScoreDoc[0]), queryFetchResults, + queryFetchResults, request); ((InternalSearchHits) internalResponse.hits()).totalHits = Long.parseLong(this.scrollId.getAttributes().get("total_hits")); diff --git a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java index c584c8856a5..43141f3e501 100644 --- a/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ b/core/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -143,7 +143,7 @@ public class TransportSuggestAction extends TransportBroadcastAction executeScript(UpdateRequest request, Map ctx) { try { if (scriptService != null) { - ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE); + ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... diff --git a/core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java b/core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java new file mode 100644 index 00000000000..38764db9eae --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/DelegatingHasContextAndHeaders.java @@ -0,0 +1,112 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common; + +import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; + +import org.elasticsearch.common.collect.ImmutableOpenMap; + +import java.util.Set; + +public class DelegatingHasContextAndHeaders implements HasContextAndHeaders { + + private HasContextAndHeaders delegate; + + public DelegatingHasContextAndHeaders(HasContextAndHeaders delegate) { + this.delegate = delegate; + } + + @Override + public void putHeader(String key, V value) { + delegate.putHeader(key, value); + } + + @Override + public void copyContextAndHeadersFrom(HasContextAndHeaders other) { + delegate.copyContextAndHeadersFrom(other); + } + + @Override + public V getHeader(String key) { + return delegate.getHeader(key); + } + + @Override + public boolean hasHeader(String key) { + return delegate.hasHeader(key); + } + + @Override + public V putInContext(Object key, Object value) { + return delegate.putInContext(key, value); + } + + @Override + public Set getHeaders() { + return delegate.getHeaders(); + } + + @Override + public void copyHeadersFrom(HasHeaders from) { + delegate.copyHeadersFrom(from); + } + + @Override + public void putAllInContext(ObjectObjectAssociativeContainer map) { + delegate.putAllInContext(map); + } + + @Override + public V getFromContext(Object key) { + return delegate.getFromContext(key); + } + + @Override + public V getFromContext(Object key, V defaultValue) { + return delegate.getFromContext(key, defaultValue); + } + + @Override + public boolean hasInContext(Object key) { + return delegate.hasInContext(key); + } + + @Override + public int contextSize() { + return delegate.contextSize(); + } + + @Override + public boolean isContextEmpty() { + return delegate.isContextEmpty(); + } + + @Override + public ImmutableOpenMap getContext() { + return delegate.getContext(); + } + + @Override + public void copyContextFrom(HasContext other) { + delegate.copyContextFrom(other); + } + + +} diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 80f2603cf32..ff90dd7af98 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -23,6 +23,7 @@ import com.google.common.base.Function; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; @@ -199,7 +200,7 @@ public class DocumentMapper implements ToXContent { List newFieldMappers = new ArrayList<>(); for (MetadataFieldMapper metadataMapper : this.mapping.metadataMappers) { if (metadataMapper instanceof FieldMapper) { - newFieldMappers.add((FieldMapper) metadataMapper); + newFieldMappers.add(metadataMapper); } } MapperUtils.collect(this.mapping.root, newObjectMappers, newFieldMappers); @@ -452,7 +453,7 @@ public class DocumentMapper implements ToXContent { public Map transformSourceAsMap(Map sourceAsMap) { try { // We use the ctx variable and the _source name to be consistent with the update api. - ExecutableScript executable = scriptService.executable(script, ScriptContext.Standard.MAPPING); + ExecutableScript executable = scriptService.executable(script, ScriptContext.Standard.MAPPING, null); Map ctx = new HashMap<>(1); ctx.put("_source", sourceAsMap); executable.setNextVar("ctx", ctx); diff --git a/core/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java index 9b289f30cf1..1b5210d56dd 100644 --- a/core/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java +++ b/core/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java @@ -29,6 +29,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.Template; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.HashMap; @@ -68,7 +69,7 @@ public class TemplateQueryParser implements QueryParser { * Parses the template query replacing template parameters with provided * values. Handles both submitting the template as part of the request as * well as referencing only the template name. - * + * * @param parseContext * parse context containing the templated query. */ @@ -77,7 +78,7 @@ public class TemplateQueryParser implements QueryParser { public Query parse(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); Template template = parse(parser, parseContext.parseFieldMatcher()); - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, SearchContext.current()); BytesReference querySource = (BytesReference) executable.run(); diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 5d11bf56bd0..c54cce7d361 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -32,7 +32,10 @@ import org.apache.lucene.util.Counter; import org.elasticsearch.action.percolate.PercolateShardRequest; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.*; +import org.elasticsearch.common.HasContext; +import org.elasticsearch.common.HasContextAndHeaders; +import org.elasticsearch.common.HasHeaders; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.text.StringText; @@ -75,7 +78,11 @@ import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.scan.ScanContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; -import java.util.*; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentMap; /** @@ -121,7 +128,7 @@ public class PercolateContext extends SearchContext { public PercolateContext(PercolateShardRequest request, SearchShardTarget searchShardTarget, IndexShard indexShard, IndexService indexService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, Query aliasFilter, ParseFieldMatcher parseFieldMatcher) { - super(parseFieldMatcher); + super(parseFieldMatcher, request); this.indexShard = indexShard; this.indexService = indexService; this.fieldDataService = indexService.fieldData(); diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 6096f29262f..73ca113165a 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -19,6 +19,7 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.IntObjectHashMap; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.index.memory.ExtendedMemoryIndex; @@ -40,6 +41,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -63,9 +65,11 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.DocumentMapperForType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; @@ -73,7 +77,10 @@ import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.percolator.QueryCollector.*; +import org.elasticsearch.percolator.QueryCollector.Count; +import org.elasticsearch.percolator.QueryCollector.Match; +import org.elasticsearch.percolator.QueryCollector.MatchAndScore; +import org.elasticsearch.percolator.QueryCollector.MatchAndSort; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchShardTarget; @@ -95,7 +102,9 @@ import java.util.Map; import static org.elasticsearch.common.util.CollectionUtils.eagerTransform; import static org.elasticsearch.index.mapper.SourceToParse.source; -import static org.elasticsearch.percolator.QueryCollector.*; +import static org.elasticsearch.percolator.QueryCollector.count; +import static org.elasticsearch.percolator.QueryCollector.match; +import static org.elasticsearch.percolator.QueryCollector.matchAndScore; public class PercolatorService extends AbstractComponent { @@ -162,9 +171,9 @@ public class PercolatorService extends AbstractComponent { } - public ReduceResult reduce(byte percolatorTypeId, List shardResults) { + public ReduceResult reduce(byte percolatorTypeId, List shardResults, HasContextAndHeaders headersContext) { PercolatorType percolatorType = percolatorTypes.get(percolatorTypeId); - return percolatorType.reduce(shardResults); + return percolatorType.reduce(shardResults, headersContext); } public PercolateShardResponse percolate(PercolateShardRequest request) { @@ -423,7 +432,7 @@ public class PercolatorService extends AbstractComponent { // 0x00 is reserved for empty type. byte id(); - ReduceResult reduce(List shardResults); + ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext); PercolateShardResponse doPercolate(PercolateShardRequest request, PercolateContext context, boolean isNested); @@ -437,14 +446,14 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { long finalCount = 0; for (PercolateShardResponse shardResponse : shardResults) { finalCount += shardResponse.count(); } assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults); + InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); return new ReduceResult(finalCount, reducedAggregations); } @@ -481,8 +490,8 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { - return countPercolator.reduce(shardResults); + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { + return countPercolator.reduce(shardResults, headersContext); } @Override @@ -511,7 +520,7 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { long foundMatches = 0; int numMatches = 0; for (PercolateShardResponse response : shardResults) { @@ -537,7 +546,7 @@ public class PercolatorService extends AbstractComponent { } assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults); + InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations); } @@ -589,8 +598,8 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { - return matchPercolator.reduce(shardResults); + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { + return matchPercolator.reduce(shardResults, headersContext); } @Override @@ -622,8 +631,8 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { - return matchPercolator.reduce(shardResults); + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { + return matchPercolator.reduce(shardResults, headersContext); } @Override @@ -656,7 +665,7 @@ public class PercolatorService extends AbstractComponent { } @Override - public ReduceResult reduce(List shardResults) { + public ReduceResult reduce(List shardResults, HasContextAndHeaders headersContext) { long foundMatches = 0; int nonEmptyResponses = 0; int firstNonEmptyIndex = 0; @@ -735,7 +744,7 @@ public class PercolatorService extends AbstractComponent { } assert !shardResults.isEmpty(); - InternalAggregations reducedAggregations = reduceAggregations(shardResults); + InternalAggregations reducedAggregations = reduceAggregations(shardResults, headersContext); return new ReduceResult(foundMatches, finalMatches.toArray(new PercolateResponse.Match[finalMatches.size()]), reducedAggregations); } @@ -843,7 +852,7 @@ public class PercolatorService extends AbstractComponent { } } - private InternalAggregations reduceAggregations(List shardResults) { + private InternalAggregations reduceAggregations(List shardResults, HasContextAndHeaders headersContext) { if (shardResults.get(0).aggregations() == null) { return null; } @@ -852,14 +861,15 @@ public class PercolatorService extends AbstractComponent { for (PercolateShardResponse shardResult : shardResults) { aggregationsList.add(shardResult.aggregations()); } - InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); + InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService, + headersContext)); if (aggregations != null) { List pipelineAggregators = shardResults.get(0).pipelineAggregators(); if (pipelineAggregators != null) { List newAggs = new ArrayList<>(eagerTransform(aggregations.asList(), PipelineAggregator.AGGREGATION_TRANFORM_FUNCTION)); for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays, - scriptService)); + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext( + bigArrays, scriptService, headersContext)); newAggs.add(newAgg); } aggregations = new InternalAggregations(newAggs); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index c63cf47f4ae..3c356a4120d 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -25,6 +25,7 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; import com.google.common.collect.ImmutableMap; + import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; @@ -37,6 +38,7 @@ import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptRequest import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.Strings; @@ -114,21 +116,25 @@ public class ScriptService extends AbstractComponent implements Closeable { * @deprecated Use {@link org.elasticsearch.script.Script.ScriptField} instead. This should be removed in * 2.0 */ + @Deprecated public static final ParseField SCRIPT_LANG = new ParseField("lang","script_lang"); /** * @deprecated Use {@link ScriptType#getParseField()} instead. This should * be removed in 2.0 */ + @Deprecated public static final ParseField SCRIPT_FILE = new ParseField("script_file"); /** * @deprecated Use {@link ScriptType#getParseField()} instead. This should * be removed in 2.0 */ + @Deprecated public static final ParseField SCRIPT_ID = new ParseField("script_id"); /** * @deprecated Use {@link ScriptType#getParseField()} instead. This should * be removed in 2.0 */ + @Deprecated public static final ParseField SCRIPT_INLINE = new ParseField("script"); @Inject @@ -220,7 +226,7 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(Script script, ScriptContext scriptContext) { + public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -248,14 +254,14 @@ public class ScriptService extends AbstractComponent implements Closeable { " operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are not supported"); } - return compileInternal(script); + return compileInternal(script, headersContext); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, * without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(Script script) { + public CompiledScript compileInternal(Script script, HasContextAndHeaders context) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -292,7 +298,7 @@ public class ScriptService extends AbstractComponent implements Closeable { //the script has been updated in the index since the last look up. final IndexedScript indexedScript = new IndexedScript(lang, name); name = indexedScript.id; - code = getScriptFromIndex(indexedScript.lang, indexedScript.id); + code = getScriptFromIndex(indexedScript.lang, indexedScript.id, context); } String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); @@ -333,13 +339,13 @@ public class ScriptService extends AbstractComponent implements Closeable { return scriptLang; } - String getScriptFromIndex(String scriptLang, String id) { + String getScriptFromIndex(String scriptLang, String id, HasContextAndHeaders context) { if (client == null) { throw new IllegalArgumentException("Got an indexed script with no Client registered."); } scriptLang = validateScriptLanguage(scriptLang); GetRequest getRequest = new GetRequest(SCRIPT_INDEX, scriptLang, id); - getRequest.copyContextAndHeadersFrom(SearchContext.current()); + getRequest.copyContextAndHeadersFrom(context); GetResponse responseFields = client.get(getRequest).actionGet(); if (responseFields.isExists()) { return getScriptFromResponse(responseFields); @@ -432,8 +438,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(Script script, ScriptContext scriptContext) { - return executable(compile(script, scriptContext), script.getParams()); + public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { + return executable(compile(script, scriptContext, headersContext), script.getParams()); } /** @@ -447,7 +453,7 @@ public class ScriptService extends AbstractComponent implements Closeable { * Compiles (or retrieves from cache) and executes the provided search script */ public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { - CompiledScript compiledScript = compile(script, scriptContext); + CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current()); return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); } diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 15eb3c0e8dc..4288e1d098a 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableMap; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -82,10 +83,23 @@ import org.elasticsearch.script.Template; import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.*; -import org.elasticsearch.search.internal.*; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.DefaultSearchContext; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.ScrollContext; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; -import org.elasticsearch.search.query.*; +import org.elasticsearch.search.internal.ShardSearchLocalRequest; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; @@ -736,7 +750,7 @@ public class SearchService extends AbstractLifecycleComponent { BytesReference processedQuery; if (request.template() != null) { - ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); } else { if (!hasLength(request.templateSource())) { @@ -753,7 +767,7 @@ public class SearchService extends AbstractLifecycleComponent { //Try to double parse for nested template id/file parser = null; try { - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); parser = XContentFactory.xContent(processedQuery).createParser(processedQuery); } catch (ElasticsearchParseException epe) { @@ -761,7 +775,7 @@ public class SearchService extends AbstractLifecycleComponent { //for backwards compatibility and keep going template = new Template(template.getScript(), ScriptService.ScriptType.FILE, MustacheScriptEngineService.NAME, null, template.getParams()); - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); } if (parser != null) { @@ -771,7 +785,8 @@ public class SearchService extends AbstractLifecycleComponent { //An inner template referring to a filename or id template = new Template(innerTemplate.getScript(), innerTemplate.getType(), MustacheScriptEngineService.NAME, null, template.getParams()); - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, + searchContext); processedQuery = (BytesReference) executable.run(); } } catch (ScriptParseException e) { @@ -779,7 +794,7 @@ public class SearchService extends AbstractLifecycleComponent { } } } else { - ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH); + ExecutableScript executable = this.scriptService.executable(template, ScriptContext.Standard.SEARCH, searchContext); processedQuery = (BytesReference) executable.run(); } } catch (IOException e) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 286f23ebe83..1c67a941daf 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.search.aggregations; +import org.elasticsearch.common.DelegatingHasContextAndHeaders; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -90,12 +92,13 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St } } - public static class ReduceContext { + public static class ReduceContext extends DelegatingHasContextAndHeaders { private final BigArrays bigArrays; private ScriptService scriptService; - public ReduceContext(BigArrays bigArrays, ScriptService scriptService) { + public ReduceContext(BigArrays bigArrays, ScriptService scriptService, HasContextAndHeaders headersContext) { + super(headersContext); this.bigArrays = bigArrays; this.scriptService = scriptService; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java index ba37c162245..9768a8617da 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsParametersParser.java @@ -60,11 +60,11 @@ public class SignificantTermsParametersParser extends AbstractTermsParametersPar @Override public void parseSpecial(String aggregationName, XContentParser parser, SearchContext context, XContentParser.Token token, String currentFieldName) throws IOException { - + if (token == XContentParser.Token.START_OBJECT) { SignificanceHeuristicParser significanceHeuristicParser = significanceHeuristicParserMapper.get(currentFieldName); if (significanceHeuristicParser != null) { - significanceHeuristic = significanceHeuristicParser.parse(parser, context.parseFieldMatcher()); + significanceHeuristic = significanceHeuristicParser.parse(parser, context.parseFieldMatcher(), context); } else if (context.parseFieldMatcher().match(currentFieldName, BACKGROUND_FILTER)) { filter = context.queryParserService().parseInnerFilter(parser).query(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java index 0ac3b1dba1d..00d12a8e0b4 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/GND.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -115,7 +116,8 @@ public class GND extends NXYSignificanceHeuristic { } @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryParsingException { String givenName = parser.currentName(); boolean backgroundIsSuperset = true; XContentParser.Token token = parser.nextToken(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java index 78f15733291..d5bfc5cb4dd 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -108,7 +109,8 @@ public class JLHScore extends SignificanceHeuristic { public static class JLHScoreParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryParsingException { // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException("failed to parse [jhl] significance heuristic. expected an empty object, but found [{}] instead", parser.currentToken()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index cc684c83ef4..4d866616bd3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -138,7 +139,8 @@ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic { public static abstract class NXYParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryParsingException { String givenName = parser.currentName(); boolean includeNegatives = false; boolean backgroundIsSuperset = true; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java index 1587a8f5fdf..d613ef2f180 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/PercentageScore.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -57,7 +58,7 @@ public class PercentageScore extends SignificanceHeuristic { /** * Indicates the significance of a term in a sample by determining what percentage - * of all occurrences of a term are found in the sample. + * of all occurrences of a term are found in the sample. */ @Override public double getScore(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize) { @@ -65,7 +66,7 @@ public class PercentageScore extends SignificanceHeuristic { if (supersetFreq == 0) { // avoid a divide by zero issue return 0; - } + } return (double) subsetFreq / (double) supersetFreq; } @@ -77,7 +78,8 @@ public class PercentageScore extends SignificanceHeuristic { public static class PercentageScoreParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryParsingException { // move to the closing bracket if (!parser.nextToken().equals(XContentParser.Token.END_OBJECT)) { throw new ElasticsearchParseException("failed to parse [percentage] significance heuristic. expected an empty object, but got [{}] instead", parser.currentToken()); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 1be9df21a58..c20399ea237 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -24,17 +24,21 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; -import org.elasticsearch.script.*; +import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.Script.ScriptField; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Map; @@ -81,8 +85,9 @@ public class ScriptHeuristic extends SignificanceHeuristic { } + @Override public void initialize(InternalAggregation.ReduceContext context) { - searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS); + searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context); searchScript.setNextVar("_subset_freq", subsetDfHolder); searchScript.setNextVar("_subset_size", subsetSizeHolder); searchScript.setNextVar("_superset_freq", supersetDfHolder); @@ -129,7 +134,8 @@ public class ScriptHeuristic extends SignificanceHeuristic { } @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryParsingException { String heuristicName = parser.currentName(); Script script = null; XContentParser.Token token; @@ -169,7 +175,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS); + searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context); } catch (Exception e) { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName); } @@ -204,21 +210,23 @@ public class ScriptHeuristic extends SignificanceHeuristic { public final class LongAccessor extends Number { public long value; + @Override public int intValue() { return (int)value; } + @Override public long longValue() { return value; } @Override public float floatValue() { - return (float)value; + return value; } @Override public double doubleValue() { - return (double)value; + return value; } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java index a6489220f51..cd6f7802dab 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristicParser.java @@ -23,12 +23,14 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; public interface SignificanceHeuristicParser { - SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException; + SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) throws IOException, + QueryParsingException; String[] getNames(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index f4d726aa49f..d39a0335ac3 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -91,7 +91,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement vars.putAll(firstAggregation.reduceScript.getParams()); } CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript, - ScriptContext.Standard.AGGS); + ScriptContext.Standard.AGGS, reduceContext); ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars); aggregation = script.run(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index a24dd4d3a7e..2c1caaa5241 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -58,11 +58,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { this.params = params; ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(initScript, ScriptContext.Standard.AGGS).run(); + scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run(); } this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS); if (combineScript != null) { - this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS); + this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext()); } else { this.combineScript = null; } @@ -159,7 +159,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { return null; } } - + @SuppressWarnings({ "unchecked" }) private static T deepCopyParams(T original, SearchContext context) { T clone; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 06559645821..ff324d849c1 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -104,7 +104,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java index 154a729d046..9e7654fd8e6 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java @@ -98,7 +98,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index 74e263220e4..5836611e2ba 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.collect.HppcMaps; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -296,7 +297,8 @@ public class SearchPhaseController extends AbstractComponent { } } - public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray queryResultsArr, AtomicArray fetchResultsArr) { + public InternalSearchResponse merge(ScoreDoc[] sortedDocs, AtomicArray queryResultsArr, + AtomicArray fetchResultsArr, HasContextAndHeaders headersContext) { List> queryResults = queryResultsArr.asList(); List> fetchResults = fetchResultsArr.asList(); @@ -404,7 +406,7 @@ public class SearchPhaseController extends AbstractComponent { for (AtomicArray.Entry entry : queryResults) { aggregationsList.add((InternalAggregations) entry.value.queryResult().aggregations()); } - aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); + aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService, headersContext)); } } @@ -413,8 +415,8 @@ public class SearchPhaseController extends AbstractComponent { if (pipelineAggregators != null) { List newAggs = new ArrayList<>(eagerTransform(aggregations.asList(), PipelineAggregator.AGGREGATION_TRANFORM_FUNCTION)); for (SiblingPipelineAggregator pipelineAggregator : pipelineAggregators) { - InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays, - scriptService)); + InternalAggregation newAgg = pipelineAggregator.doReduce(new InternalAggregations(newAggs), new ReduceContext( + bigArrays, scriptService, headersContext)); newAggs.add(newAgg); } aggregations = new InternalAggregations(newAggs); diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 057d2098f53..2d24d26bae4 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.internal; -import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; @@ -30,12 +29,8 @@ import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.HasContext; -import org.elasticsearch.common.HasContextAndHeaders; -import org.elasticsearch.common.HasHeaders; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.function.BoostScoreFunction; @@ -78,7 +73,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; /** * @@ -146,7 +140,7 @@ public class DefaultSearchContext extends SearchContext { BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout ) { - super(parseFieldMatcher); + super(parseFieldMatcher, request); this.id = id; this.request = request; this.searchType = request.searchType(); @@ -724,81 +718,6 @@ public class DefaultSearchContext extends SearchContext { return innerHitsContext; } - @Override - public V putInContext(Object key, Object value) { - return request.putInContext(key, value); - } - - @Override - public void putAllInContext(ObjectObjectAssociativeContainer map) { - request.putAllInContext(map); - } - - @Override - public V getFromContext(Object key) { - return request.getFromContext(key); - } - - @Override - public V getFromContext(Object key, V defaultValue) { - return request.getFromContext(key, defaultValue); - } - - @Override - public boolean hasInContext(Object key) { - return request.hasInContext(key); - } - - @Override - public int contextSize() { - return request.contextSize(); - } - - @Override - public boolean isContextEmpty() { - return request.isContextEmpty(); - } - - @Override - public ImmutableOpenMap getContext() { - return request.getContext(); - } - - @Override - public void copyContextFrom(HasContext other) { - request.copyContextFrom(other); - } - - @Override - public void putHeader(String key, V value) { - request.putHeader(key, value); - } - - @Override - public V getHeader(String key) { - return request.getHeader(key); - } - - @Override - public boolean hasHeader(String key) { - return request.hasHeader(key); - } - - @Override - public Set getHeaders() { - return request.getHeaders(); - } - - @Override - public void copyHeadersFrom(HasHeaders from) { - request.copyHeadersFrom(from); - } - - @Override - public void copyContextAndHeadersFrom(HasContextAndHeaders other) { - request.copyContextAndHeadersFrom(other); - } - @Override public Map, Collector> queryCollectors() { return queryCollectors; diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 2f79d03234e..a4b9c4d852b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -19,16 +19,13 @@ package org.elasticsearch.search.internal; -import com.carrotsearch.hppc.ObjectObjectAssociativeContainer; - import org.apache.lucene.search.Collector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.common.*; -import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; @@ -59,7 +56,6 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext; import java.util.List; import java.util.Map; -import java.util.Set; public abstract class FilteredSearchContext extends SearchContext { @@ -67,7 +63,7 @@ public abstract class FilteredSearchContext extends SearchContext { public FilteredSearchContext(SearchContext in) { //inner_hits in percolator ends up with null inner search context - super(in == null ? ParseFieldMatcher.EMPTY : in.parseFieldMatcher()); + super(in == null ? ParseFieldMatcher.EMPTY : in.parseFieldMatcher(), in); this.in = in; } @@ -526,81 +522,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.timeEstimateCounter(); } - @Override - public V putInContext(Object key, Object value) { - return in.putInContext(key, value); - } - - @Override - public void putAllInContext(ObjectObjectAssociativeContainer map) { - in.putAllInContext(map); - } - - @Override - public V getFromContext(Object key) { - return in.getFromContext(key); - } - - @Override - public V getFromContext(Object key, V defaultValue) { - return in.getFromContext(key, defaultValue); - } - - @Override - public boolean hasInContext(Object key) { - return in.hasInContext(key); - } - - @Override - public int contextSize() { - return in.contextSize(); - } - - @Override - public boolean isContextEmpty() { - return in.isContextEmpty(); - } - - @Override - public ImmutableOpenMap getContext() { - return in.getContext(); - } - - @Override - public void copyContextFrom(HasContext other) { - in.copyContextFrom(other); - } - - @Override - public void putHeader(String key, V value) { - in.putHeader(key, value); - } - - @Override - public V getHeader(String key) { - return in.getHeader(key); - } - - @Override - public boolean hasHeader(String key) { - return in.hasHeader(key); - } - - @Override - public Set getHeaders() { - return in.getHeaders(); - } - - @Override - public void copyHeadersFrom(HasHeaders from) { - in.copyHeadersFrom(from); - } - - @Override - public void copyContextAndHeadersFrom(HasContextAndHeaders other) { - in.copyContextAndHeadersFrom(other); - } - @Override public SubPhaseContext getFetchSubPhaseContext(FetchSubPhase.ContextFactory contextFactory) { return in.getFetchSubPhaseContext(contextFactory); diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index a7e45d18fb6..6b6c4225861 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -21,12 +21,14 @@ package org.elasticsearch.search.internal; import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; import com.google.common.collect.MultimapBuilder; + import org.apache.lucene.search.Collector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Sort; import org.apache.lucene.util.Counter; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.common.DelegatingHasContextAndHeaders; import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseFieldMatcher; @@ -67,7 +69,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; -public abstract class SearchContext implements Releasable, HasContextAndHeaders { +public abstract class SearchContext extends DelegatingHasContextAndHeaders implements Releasable { private static ThreadLocal current = new ThreadLocal<>(); public final static int DEFAULT_TERMINATE_AFTER = 0; @@ -91,7 +93,8 @@ public abstract class SearchContext implements Releasable, HasContextAndHeaders protected final ParseFieldMatcher parseFieldMatcher; - protected SearchContext(ParseFieldMatcher parseFieldMatcher) { + protected SearchContext(ParseFieldMatcher parseFieldMatcher, HasContextAndHeaders contextHeaders) { + super(contextHeaders); this.parseFieldMatcher = parseFieldMatcher; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java index 98e450d1265..040ee4e85ea 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestContextParser.java @@ -18,13 +18,15 @@ */ package org.elasticsearch.search.suggest; -import java.io.IOException; - +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.IndexQueryParserService; +import java.io.IOException; + public interface SuggestContextParser { - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException; + public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, HasContextAndHeaders headersContext) throws IOException; } \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java index 637ed3d6c48..d2c2bc5d937 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -45,11 +46,13 @@ public final class SuggestParseElement implements SearchParseElement { @Override public void parse(XContentParser parser, SearchContext context) throws Exception { - SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.queryParserService(), context.shardTarget().index(), context.shardTarget().shardId()); + SuggestionSearchContext suggestionSearchContext = parseInternal(parser, context.mapperService(), context.queryParserService(), + context.shardTarget().index(), context.shardTarget().shardId(), context); context.suggest(suggestionSearchContext); } - public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService, String index, int shardId) throws IOException { + public SuggestionSearchContext parseInternal(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, String index, int shardId, HasContextAndHeaders headersContext) throws IOException { SuggestionSearchContext suggestionSearchContext = new SuggestionSearchContext(); BytesRef globalText = null; @@ -88,7 +91,7 @@ public final class SuggestParseElement implements SearchParseElement { throw new IllegalArgumentException("Suggester[" + fieldName + "] not supported"); } final SuggestContextParser contextParser = suggesters.get(fieldName).getContextParser(); - suggestionContext = contextParser.parse(parser, mapperService, queryParserService); + suggestionContext = contextParser.parse(parser, mapperService, queryParserService, headersContext); } } if (suggestionContext != null) { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java index 9802db38130..8470633fdc5 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.search.suggest.completion; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.Fuzziness; @@ -49,13 +50,14 @@ public class CompletionSuggestParser implements SuggestContextParser { } @Override - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException { + public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, HasContextAndHeaders headersContext) throws IOException { XContentParser.Token token; String fieldName = null; CompletionSuggestionContext suggestion = new CompletionSuggestionContext(completionSuggester); - + XContentParser contextParser = null; - + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -90,7 +92,7 @@ public class CompletionSuggestParser implements SuggestContextParser { // Copy the current structure. We will parse, once the mapping is provided XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()); builder.copyCurrentStructure(parser); - BytesReference bytes = builder.bytes(); + BytesReference bytes = builder.bytes(); contextParser = parser.contentType().xContent().createParser(bytes); } else { throw new IllegalArgumentException("suggester [completion] doesn't support field [" + fieldName + "]"); @@ -99,7 +101,7 @@ public class CompletionSuggestParser implements SuggestContextParser { throw new IllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]"); } } - + suggestion.fieldType((CompletionFieldMapper.CompletionFieldType) mapperService.smartNameFieldType(suggestion.getField())); CompletionFieldMapper.CompletionFieldType fieldType = suggestion.fieldType(); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 0f6a8096973..13149e20e4d 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -48,12 +49,13 @@ public final class PhraseSuggestParser implements SuggestContextParser { } @Override - public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, IndexQueryParserService queryParserService) throws IOException { + public SuggestionSearchContext.SuggestionContext parse(XContentParser parser, MapperService mapperService, + IndexQueryParserService queryParserService, HasContextAndHeaders headersContext) throws IOException { PhraseSuggestionContext suggestion = new PhraseSuggestionContext(suggester); suggestion.setQueryParserService(queryParserService); XContentParser.Token token; String fieldName = null; - boolean gramSizeSet = false; + boolean gramSizeSet = false; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); @@ -140,7 +142,8 @@ public final class PhraseSuggestParser implements SuggestContextParser { throw new IllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); } Template template = Template.parse(parser, queryParserService.parseFieldMatcher()); - CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH); + CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH, + headersContext); suggestion.setCollateQueryScript(compiledScript); } else if ("params".equals(fieldName)) { suggestion.setCollateScriptParams(parser.map()); @@ -162,7 +165,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); } } - + if (suggestion.getField() == null) { throw new IllegalArgumentException("The required field option is missing"); } @@ -178,11 +181,11 @@ public final class PhraseSuggestParser implements SuggestContextParser { suggestion.setAnalyzer(fieldType.searchAnalyzer()); } } - + if (suggestion.model() == null) { suggestion.setModel(StupidBackoffScorer.FACTORY); } - + if (!gramSizeSet || suggestion.generators().isEmpty()) { final ShingleTokenFilterFactory.Factory shingleFilterFactory = SuggestUtils.getShingleFilterFactory(suggestion.getAnalyzer()); if (!gramSizeSet) { @@ -204,9 +207,9 @@ public final class PhraseSuggestParser implements SuggestContextParser { suggestion.addGenerator(generator); } } - - - + + + return suggestion; } diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index ac990deffb7..d5e942b52e1 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -20,11 +20,16 @@ package org.elasticsearch.search.suggest.phrase; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.script.Template; import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; +import java.util.Set; /** * Defines the actual suggest command for phrase suggestions ( phrase). @@ -41,7 +46,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder collateParams; private Boolean collatePrune; @@ -67,7 +72,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder>=1 as an absolut number of query terms. - * + * * The default is set to 1.0 which corresponds to that only * corrections with at most 1 missspelled term are returned. */ @@ -131,13 +136,13 @@ public final class PhraseSuggestionBuilder extends SuggestionBuildertrue the phrase suggester will fail if the analyzer only * produces ngrams. the default it true. */ public PhraseSuggestionBuilder forceUnigrams(boolean forceUnigrams) { - this.forceUnigrams = forceUnigrams; + this.forceUnigrams = forceUnigrams; return this; } @@ -149,7 +154,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilderadditive - * smoothing model. + * smoothing model. *

* See N-Gram @@ -304,7 +317,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder setup two test files one with extension and another without"); Path testFileNoExt = scriptsFilePath.resolve("test_no_ext"); @@ -135,7 +139,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH); + ScriptContext.Standard.SEARCH, contextAndHeaders); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -145,7 +149,8 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly removed"); try { - scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH); + scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, + contextAndHeaders); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk file script [test_script] using lang [test]")); @@ -154,49 +159,56 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testScriptsSameNameDifferentLanguage() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("groovy", "expression"); CompiledScript groovyScript = scriptService.compile( - new Script("file_script", ScriptType.FILE, GroovyScriptEngineService.NAME, null), randomFrom(scriptContexts)); + new Script("file_script", ScriptType.FILE, GroovyScriptEngineService.NAME, null), randomFrom(scriptContexts), + contextAndHeaders); assertThat(groovyScript.lang(), equalTo(GroovyScriptEngineService.NAME)); CompiledScript expressionScript = scriptService.compile(new Script("file_script", ScriptType.FILE, ExpressionScriptEngineService.NAME, - null), randomFrom(new ScriptContext[] {ScriptContext.Standard.AGGS, ScriptContext.Standard.SEARCH})); + null), randomFrom(new ScriptContext[] { ScriptContext.Standard.AGGS, + ScriptContext.Standard.SEARCH }), contextAndHeaders); assertThat(expressionScript.lang(), equalTo(ExpressionScriptEngineService.NAME)); } @Test public void testInlineScriptCompiledOnceCache() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("script", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); CompiledScript compiledScript1 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null), - randomFrom(scriptContexts)); + randomFrom(scriptContexts), contextAndHeaders); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @Test public void testDefaultBehaviourFineGrainedSettings() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.builder(); //rarely inject the default settings, which have no effect if (rarely()) { @@ -213,29 +225,30 @@ public class ScriptServiceTests extends ESTestCase { for (ScriptContext scriptContext : scriptContexts) { //groovy is not sandboxed, only file scripts are enabled by default - assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext); - assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext); + assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileRejected(GroovyScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); //expression engine is sandboxed, all scripts are enabled by default if (!scriptContext.getKey().equals(ScriptContext.Standard.MAPPING.getKey()) && !scriptContext.getKey().equals(ScriptContext.Standard.UPDATE.getKey())) { - assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext); - assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext); + assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileAccepted(ExpressionScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); } //mustache engine is sandboxed, all scripts are enabled by default - assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext); - assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted(MustacheScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext); + assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileAccepted(MustacheScriptEngineService.NAME, "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted(MustacheScriptEngineService.NAME, "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); //custom engine is sandboxed, all scripts are enabled by default - assertCompileAccepted("test", "script", ScriptType.INLINE, scriptContext); - assertCompileAccepted("test", "script", ScriptType.INDEXED, scriptContext); - assertCompileAccepted("test", "file_script", ScriptType.FILE, scriptContext); + assertCompileAccepted("test", "script", ScriptType.INLINE, scriptContext, contextAndHeaders); + assertCompileAccepted("test", "script", ScriptType.INDEXED, scriptContext, contextAndHeaders); + assertCompileAccepted("test", "file_script", ScriptType.FILE, scriptContext, contextAndHeaders); } } @Test public void testFineGrainedSettings() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); //collect the fine-grained settings to set for this run int numScriptSettings = randomIntBetween(0, ScriptType.values().length); Map scriptSourceSettings = new HashMap<>(); @@ -345,16 +358,16 @@ public class ScriptServiceTests extends ESTestCase { for (String lang : scriptEngineService.types()) { switch (scriptMode) { case ON: - assertCompileAccepted(lang, script, scriptType, scriptContext); + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); break; case OFF: - assertCompileRejected(lang, script, scriptType, scriptContext); + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); break; case SANDBOX: if (scriptEngineService.sandboxed()) { - assertCompileAccepted(lang, script, scriptType, scriptContext); + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); } else { - assertCompileRejected(lang, script, scriptType, scriptContext); + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); } break; } @@ -366,6 +379,7 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testCompileNonRegisteredContext() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); String pluginName; String unknownContext; @@ -378,7 +392,7 @@ public class ScriptServiceTests extends ESTestCase { for (String type : scriptEngineService.types()) { try { scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext)); + pluginName, unknownContext), contextAndHeaders); fail("script compilation should have been rejected"); } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); @@ -389,15 +403,17 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testCompileCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testExecutableCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -410,46 +426,52 @@ public class ScriptServiceTests extends ESTestCase { @Test public void testMultipleCompilationsCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); int numberOfCompilations = randomIntBetween(1, 1024); for (int i = 0; i < numberOfCompilations; i++) { - scriptService.compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService + .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); } assertEquals(numberOfCompilations, scriptService.stats().getCompilations()); } @Test public void testCompilationStatsOnCacheHit() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testFileScriptCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); - scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts)); + scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testIndexedScriptCountedInCompilationStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts)); + scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(1L, scriptService.stats().getCompilations()); } @Test public void testCacheEvictionCountedInCacheEvictionsStats() throws IOException { + ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); - scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); assertEquals(2L, scriptService.stats().getCompilations()); assertEquals(1L, scriptService.stats().getCacheEvictions()); } @@ -462,17 +484,19 @@ public class ScriptServiceTests extends ESTestCase { resourceWatcherService.notifyNow(); } - private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { + private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, + HasContextAndHeaders contextAndHeaders) { try { - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext); + scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good } } - private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { - assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext), notNullValue()); + private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, + HasContextAndHeaders contextAndHeaders) { + assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders), notNullValue()); } public static class TestEngineService implements ScriptEngineService { diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index 526c39a3b28..27bfab05326 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -56,6 +56,7 @@ import org.elasticsearch.search.aggregations.bucket.significant.heuristics.Signi import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.TermsBuilder; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESIntegTestCase; import org.junit.Test; @@ -235,7 +236,8 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { public static class SimpleHeuristicParser implements SignificanceHeuristicParser { @Override - public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher) throws IOException, QueryParsingException { + public SignificanceHeuristic parse(XContentParser parser, ParseFieldMatcher parseFieldMatcher, SearchContext context) + throws IOException, QueryParsingException { parser.nextToken(); return new SimpleHeuristic(); } @@ -291,7 +293,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { assertThat(responseBuilder.string(), equalTo(result)); } - + @Test public void testDeletesIssue7951() throws Exception { String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; @@ -311,10 +313,10 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "4") .setSource(TEXT_FIELD, cat2v2, CLASS_FIELD, "2")); indexRandom(true, false, indexRequestBuilderList); - + // Now create some holes in the index with selective deletes caused by updates. // This is the scenario that caused this issue https://github.com/elasticsearch/elasticsearch/issues/7951 - // Scoring algorithms throw exceptions if term docFreqs exceed the reported size of the index + // Scoring algorithms throw exceptions if term docFreqs exceed the reported size of the index // from which they are taken so need to make sure this doesn't happen. String[] text = cat1v1; indexRequestBuilderList.clear(); @@ -323,7 +325,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "1").setSource(TEXT_FIELD, text, CLASS_FIELD, "1")); } indexRandom(true, false, indexRequestBuilderList); - + SearchResponse response1 = client().prepareSearch(INDEX_NAME).setTypes(DOC_TYPE) .addAggregation(new TermsBuilder("class") .field(CLASS_FIELD) @@ -333,7 +335,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase { .minDocCount(1))) .execute() .actionGet(); - } + } @Test public void testBackgroundVsSeparateSet() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java index e3dfe3b96d3..245a561c339 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; +import org.elasticsearch.common.HasContextAndHeaders; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -59,7 +60,8 @@ public class CustomSuggester extends Suggester options = parser.map(); CustomSuggestionsContext suggestionContext = new CustomSuggestionsContext(CustomSuggester.this, options); suggestionContext.setField((String) options.get("field")); diff --git a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java index e6a37dcf02a..48725f4c3e4 100644 --- a/core/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/core/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -94,7 +94,7 @@ public class TestSearchContext extends SearchContext { private final Map subPhaseContexts = new HashMap<>(); public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService) { - super(ParseFieldMatcher.STRICT); + super(ParseFieldMatcher.STRICT, null); this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; @@ -105,7 +105,7 @@ public class TestSearchContext extends SearchContext { } public TestSearchContext() { - super(ParseFieldMatcher.STRICT); + super(ParseFieldMatcher.STRICT, null); this.pageCacheRecycler = null; this.bigArrays = null; this.indexService = null; diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index 7257a08ce9f..8aff7ea5280 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -27,13 +27,16 @@ import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; @@ -44,6 +47,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.GeoShapeQueryBuilder; @@ -52,9 +56,16 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermsLookupQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.Template; import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.script.mustache.MustacheScriptEngineService; +import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders; +import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; @@ -64,6 +75,7 @@ import org.junit.Before; import org.junit.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -77,11 +89,14 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.node.Node.HTTP_ENABLED; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.search.suggest.SuggestBuilders.phraseSuggestion; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionSize; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -273,6 +288,59 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase { assertRequestsContainHeader(PutIndexedScriptRequest.class); } + @Test + public void testThatIndexedScriptGetRequestInTemplateQueryContainsContextAndHeaders() throws Exception { + PutIndexedScriptResponse scriptResponse = transportClient() + .preparePutIndexedScript( + MustacheScriptEngineService.NAME, + "my_script", + jsonBuilder().startObject().field("script", "{ \"query\": { \"match\": { \"name\": \"Star Wars\" }}}").endObject() + .string()).get(); + assertThat(scriptResponse.isCreated(), is(true)); + + transportClient().prepareIndex(queryIndex, "type", "1") + .setSource(jsonBuilder().startObject().field("name", "Star Wars - The new republic").endObject()).get(); + transportClient().admin().indices().prepareRefresh(queryIndex).get(); + + SearchResponse searchResponse = transportClient() + .prepareSearch(queryIndex) + .setQuery( + QueryBuilders.templateQuery(new Template("my_script", ScriptType.INDEXED, + MustacheScriptEngineService.NAME, null, null))).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + + assertGetRequestsContainHeaders(".scripts"); + assertRequestsContainHeader(PutIndexedScriptRequest.class); + } + + @Test + public void testThatIndexedScriptGetRequestInReducePhaseContainsContextAndHeaders() throws Exception { + PutIndexedScriptResponse scriptResponse = transportClient().preparePutIndexedScript(GroovyScriptEngineService.NAME, "my_script", + jsonBuilder().startObject().field("script", "_value0 * 10").endObject().string()).get(); + assertThat(scriptResponse.isCreated(), is(true)); + + transportClient().prepareIndex(queryIndex, "type", "1") + .setSource(jsonBuilder().startObject().field("s_field", "foo").field("l_field", 10).endObject()).get(); + transportClient().admin().indices().prepareRefresh(queryIndex).get(); + + SearchResponse searchResponse = transportClient() + .prepareSearch(queryIndex) + .addAggregation( + AggregationBuilders + .terms("terms") + .field("s_field") + .subAggregation(AggregationBuilders.max("max").field("l_field")) + .subAggregation( + PipelineAggregatorBuilders.bucketScript("scripted").setBucketsPaths("max").script( + new Script("my_script", ScriptType.INDEXED, GroovyScriptEngineService.NAME, null)))).get(); + assertNoFailures(searchResponse); + assertHitCount(searchResponse, 1); + + assertGetRequestsContainHeaders(".scripts"); + assertRequestsContainHeader(PutIndexedScriptRequest.class); + } + @Test public void testThatSearchTemplatesWithIndexedTemplatesGetRequestContainsContextAndHeaders() throws Exception { PutIndexedScriptResponse scriptResponse = transportClient().preparePutIndexedScript(MustacheScriptEngineService.NAME, "the_template", @@ -302,6 +370,98 @@ public class ContextAndHeaderTransportIT extends ESIntegTestCase { assertRequestsContainHeader(PutIndexedScriptRequest.class); } + @Test + public void testThatIndexedScriptGetRequestInPhraseSuggestContainsContextAndHeaders() throws Exception { + CreateIndexRequestBuilder builder = transportClient().admin().indices().prepareCreate("test").setSettings(settingsBuilder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, 1) // A single shard will help to keep the tests repeatable. + .put("index.analysis.analyzer.text.tokenizer", "standard") + .putArray("index.analysis.analyzer.text.filter", "lowercase", "my_shingle") + .put("index.analysis.filter.my_shingle.type", "shingle") + .put("index.analysis.filter.my_shingle.output_unigrams", true) + .put("index.analysis.filter.my_shingle.min_shingle_size", 2) + .put("index.analysis.filter.my_shingle.max_shingle_size", 3)); + + XContentBuilder mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("type1") + .startObject("properties") + .startObject("title") + .field("type", "string") + .field("analyzer", "text") + .endObject() + .endObject() + .endObject() + .endObject(); + assertAcked(builder.addMapping("type1", mapping)); + ensureGreen(); + + List titles = new ArrayList<>(); + + titles.add("United States House of Representatives Elections in Washington 2006"); + titles.add("United States House of Representatives Elections in Washington 2005"); + titles.add("State"); + titles.add("Houses of Parliament"); + titles.add("Representative Government"); + titles.add("Election"); + + List builders = new ArrayList<>(); + for (String title: titles) { + transportClient().prepareIndex("test", "type1").setSource("title", title).get(); + } + transportClient().admin().indices().prepareRefresh("test").get(); + + String filterStringAsFilter = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("match_phrase") + .field("title", "{{suggestion}}") + .endObject() + .endObject() + .endObject() + .string(); + + PutIndexedScriptResponse scriptResponse = transportClient() + .preparePutIndexedScript( + MustacheScriptEngineService.NAME, + "my_script", + jsonBuilder().startObject().field("script", filterStringAsFilter).endObject() + .string()).get(); + assertThat(scriptResponse.isCreated(), is(true)); + + PhraseSuggestionBuilder suggest = phraseSuggestion("title") + .field("title") + .addCandidateGenerator(PhraseSuggestionBuilder.candidateGenerator("title") + .suggestMode("always") + .maxTermFreq(.99f) + .size(10) + .maxInspections(200) + ) + .confidence(0f) + .maxErrors(2f) + .shardSize(30000) + .size(10); + + PhraseSuggestionBuilder filteredFilterSuggest = suggest.collateQuery(new Template("my_script", ScriptType.INDEXED, + MustacheScriptEngineService.NAME, null, null)); + + SearchRequestBuilder searchRequestBuilder = transportClient().prepareSearch("test").setSize(0); + String suggestText = "united states house of representatives elections in washington 2006"; + if (suggestText != null) { + searchRequestBuilder.setSuggestText(suggestText); + } + searchRequestBuilder.addSuggestion(filteredFilterSuggest); + SearchResponse actionGet = searchRequestBuilder.execute().actionGet(); + assertThat(Arrays.toString(actionGet.getShardFailures()), actionGet.getFailedShards(), equalTo(0)); + Suggest searchSuggest = actionGet.getSuggest(); + + assertSuggestionSize(searchSuggest, 0, 2, "title"); + + assertGetRequestsContainHeaders(".scripts"); + assertRequestsContainHeader(PutIndexedScriptRequest.class); + } + + @Test public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { String releventHeaderName = "relevant_" + randomHeaderKey;