Merge pull request #13881 from s1monw/astalavista_baby

Remove shard-level injector
This commit is contained in:
Simon Willnauer 2015-10-05 14:26:38 +02:00
commit e8d74bb9d9
93 changed files with 830 additions and 1035 deletions

View File

@ -274,7 +274,7 @@
<include>org/elasticsearch/common/cli/CliToolTestCase$*.class</include> <include>org/elasticsearch/common/cli/CliToolTestCase$*.class</include>
<include>org/elasticsearch/cluster/MockInternalClusterInfoService.class</include> <include>org/elasticsearch/cluster/MockInternalClusterInfoService.class</include>
<include>org/elasticsearch/cluster/MockInternalClusterInfoService$*.class</include> <include>org/elasticsearch/cluster/MockInternalClusterInfoService$*.class</include>
<include>org/elasticsearch/index/shard/MockEngineFactoryPlugin.class</include> <include>org/elasticsearch/index/MockEngineFactoryPlugin.class</include>
<include>org/elasticsearch/search/MockSearchService.class</include> <include>org/elasticsearch/search/MockSearchService.class</include>
<include>org/elasticsearch/search/MockSearchService$*.class</include> <include>org/elasticsearch/search/MockSearchService$*.class</include>
<include>org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.class</include> <include>org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.class</include>

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.search.suggest.completion.CompletionStats;

View File

@ -83,7 +83,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
IndexService service = indicesService.indexService(shardRouting.getIndex()); IndexService service = indicesService.indexService(shardRouting.getIndex());
if (service != null) { if (service != null) {
IndexShard shard = service.shard(shardRouting.id()); IndexShard shard = service.getShardOrNull(shardRouting.id());
boolean clearedAtLeastOne = false; boolean clearedAtLeastOne = false;
if (request.queryCache()) { if (request.queryCache()) {
clearedAtLeastOne = true; clearedAtLeastOne = true;

View File

@ -62,7 +62,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
@Override @Override
protected Tuple<ActionWriteResponse, ShardFlushRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<ActionWriteResponse, ShardFlushRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
indexShard.flush(shardRequest.request.getRequest()); indexShard.flush(shardRequest.request.getRequest());
logger.trace("{} flush request executed on primary", indexShard.shardId()); logger.trace("{} flush request executed on primary", indexShard.shardId());
return new Tuple<>(new ActionWriteResponse(), shardRequest.request); return new Tuple<>(new ActionWriteResponse(), shardRequest.request);
@ -70,7 +70,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) { protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
indexShard.flush(request.getRequest()); indexShard.flush(request.getRequest());
logger.trace("{} flush request executed on replica", indexShard.shardId()); logger.trace("{} flush request executed on replica", indexShard.shardId());
} }

View File

@ -75,7 +75,7 @@ public class TransportOptimizeAction extends TransportBroadcastByNodeAction<Opti
@Override @Override
protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) throws IOException { protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) throws IOException {
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).getShard(shardRouting.shardId().id());
indexShard.optimize(request); indexShard.optimize(request);
return EmptyResult.INSTANCE; return EmptyResult.INSTANCE;
} }

View File

@ -100,7 +100,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
@Override @Override
protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) { protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
return indexShard.recoveryState(); return indexShard.recoveryState();
} }

View File

@ -63,7 +63,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
@Override @Override
protected Tuple<ActionWriteResponse, ReplicationRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<ActionWriteResponse, ReplicationRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId()); logger.trace("{} refresh request executed on primary", indexShard.shardId());
return new Tuple<>(new ActionWriteResponse(), shardRequest.request); return new Tuple<>(new ActionWriteResponse(), shardRequest.request);
@ -71,7 +71,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) { protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on replica", indexShard.shardId()); logger.trace("{} refresh request executed on replica", indexShard.shardId());
} }

View File

@ -94,7 +94,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
@Override @Override
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) { protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.id()); IndexShard indexShard = indexService.getShard(shardRouting.id());
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose())); return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
} }
} }

View File

@ -34,7 +34,7 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
@ -167,7 +167,7 @@ public class CommonStats implements Streamable, ToXContent {
segments = indexShard.segmentStats(); segments = indexShard.segmentStats();
break; break;
case Percolate: case Percolate:
percolate = indexShard.shardPercolateService().stats(); percolate = indexShard.percolateStats();
break; break;
case Translog: case Translog:
translog = indexShard.translogStats(); translog = indexShard.translogStats();

View File

@ -95,7 +95,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
@Override @Override
protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting shardRouting) { protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
// if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet
if (indexShard.routingEntry() == null) { if (indexShard.routingEntry() == null) {
throw new ShardNotFoundException(indexShard.shardId()); throw new ShardNotFoundException(indexShard.shardId());

View File

@ -96,8 +96,8 @@ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction
@Override @Override
protected ShardUpgradeStatus shardOperation(UpgradeStatusRequest request, ShardRouting shardRouting) { protected ShardUpgradeStatus shardOperation(UpgradeStatusRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
List<Segment> segments = indexShard.engine().segments(false); List<Segment> segments = indexShard.segments(false);
long total_bytes = 0; long total_bytes = 0;
long to_upgrade_bytes = 0; long to_upgrade_bytes = 0;
long to_upgrade_bytes_ancient = 0; long to_upgrade_bytes_ancient = 0;

View File

@ -119,7 +119,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
@Override @Override
protected ShardUpgradeResult shardOperation(UpgradeRequest request, ShardRouting shardRouting) throws IOException { protected ShardUpgradeResult shardOperation(UpgradeRequest request, ShardRouting shardRouting) throws IOException {
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).getShard(shardRouting.shardId().id());
org.apache.lucene.util.Version oldestLuceneSegment = indexShard.upgrade(request); org.apache.lucene.util.Version oldestLuceneSegment = indexShard.upgrade(request);
// We are using the current version of Elasticsearch as upgrade version since we update mapping to match the current version // We are using the current version of Elasticsearch as upgrade version since we update mapping to match the current version
return new ShardUpgradeResult(shardRouting.shardId(), indexShard.routingEntry().primary(), Version.CURRENT, oldestLuceneSegment); return new ShardUpgradeResult(shardRouting.shardId(), indexShard.routingEntry().primary(), Version.CURRENT, oldestLuceneSegment);

View File

@ -163,7 +163,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) { protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexQueryParserService queryParserService = indexService.queryParserService(); IndexQueryParserService queryParserService = indexService.queryParserService();
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
boolean valid; boolean valid;
String explanation = null; String explanation = null;

View File

@ -116,7 +116,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
final BulkShardRequest request = shardRequest.request; final BulkShardRequest request = shardRequest.request;
final IndexService indexService = indicesService.indexServiceSafe(request.index()); final IndexService indexService = indicesService.indexServiceSafe(request.index());
final IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); final IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
long[] preVersions = new long[request.items().length]; long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length];
@ -447,7 +447,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) { protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
Translog.Location location = null; Translog.Location location = null;
for (int i = 0; i < request.items().length; i++) { for (int i = 0; i < request.items().length; i++) {
BulkItemRequest item = request.items()[i]; BulkItemRequest item = request.items()[i];

View File

@ -42,7 +42,6 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -130,7 +129,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
@Override @Override
protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
DeleteRequest request = shardRequest.request; DeleteRequest request = shardRequest.request;
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY); Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
indexShard.delete(delete); indexShard.delete(delete);
// update the request with teh version so it will go to the replicas // update the request with teh version so it will go to the replicas
@ -146,7 +145,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) { protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA); Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
indexShard.delete(delete); indexShard.delete(delete);

View File

@ -148,7 +148,7 @@ public class TransportExistsAction extends TransportBroadcastAction<ExistsReques
@Override @Override
protected ShardExistsResponse shardOperation(ShardExistsRequest request) { protected ShardExistsResponse shardOperation(ShardExistsRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id());
SearchContext context = new DefaultSearchContext(0, SearchContext context = new DefaultSearchContext(0,

View File

@ -104,7 +104,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
@Override @Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) { protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id())); Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm)); Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
if (!result.exists()) { if (!result.exists()) {

View File

@ -152,7 +152,7 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastAction
Map<String, FieldStats> fieldStats = new HashMap<>(); Map<String, FieldStats> fieldStats = new HashMap<>();
IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex());
MapperService mapperService = indexServices.mapperService(); MapperService mapperService = indexServices.mapperService();
IndexShard shard = indexServices.shardSafe(shardId.id()); IndexShard shard = indexServices.getShard(shardId.id());
try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) { try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) {
for (String field : request.getFields()) { for (String field : request.getFields()) {
MappedFieldType fieldType = mapperService.fullName(field); MappedFieldType fieldType = mapperService.fullName(field);

View File

@ -92,7 +92,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
@Override @Override
protected GetResponse shardOperation(GetRequest request, ShardId shardId) { protected GetResponse shardOperation(GetRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
if (request.refresh() && !request.realtime()) { if (request.refresh() && !request.realtime()) {
indexShard.refresh("refresh_flag_get"); indexShard.refresh("refresh_flag_get");

View File

@ -87,7 +87,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
@Override @Override
protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) { protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
if (request.refresh() && !request.realtime()) { if (request.refresh() && !request.realtime()) {
indexShard.refresh("refresh_flag_mget"); indexShard.refresh("refresh_flag_mget");

View File

@ -164,7 +164,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
} }
IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard); final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
final IndexResponse response = result.response; final IndexResponse response = result.response;
@ -176,7 +176,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) { protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());

View File

@ -130,7 +130,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
@Override @Override
protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) { protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric(); ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric();
suggestMetric.preSuggest(); suggestMetric.preSuggest();
long startTime = System.nanoTime(); long startTime = System.nanoTime();

View File

@ -666,7 +666,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected Releasable getIndexShardOperationsCounter(ShardId shardId) { protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.index().getName()); IndexService indexService = indicesService.indexServiceSafe(shardId.index().getName());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReference(indexShard); return new IndexShardReference(indexShard);
} }
@ -678,7 +678,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId); logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
return; return;
} }
IndexShard indexShard = indexService.shard(shardId); IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard == null) { if (indexShard == null) {
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId); logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
return; return;

View File

@ -79,7 +79,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
TermVectorsRequest termVectorsRequest = request.requests.get(i); TermVectorsRequest termVectorsRequest = request.requests.get(i);
try { try {
IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse termVectorsResponse = indexShard.getTermVectors(termVectorsRequest); TermVectorsResponse termVectorsResponse = indexShard.getTermVectors(termVectorsRequest);
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime()); termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
response.add(request.locations.get(i), termVectorsResponse); response.add(request.locations.get(i), termVectorsResponse);

View File

@ -82,7 +82,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
@Override @Override
protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) { protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse response = indexShard.getTermVectors(request); TermVectorsResponse response = indexShard.getTermVectors(request);
response.updateTookInMillis(request.startTime()); response.updateTookInMillis(request.startTime());
return response; return response;

View File

@ -166,7 +166,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) { protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId()); IndexShard indexShard = indexService.getShard(request.shardId());
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
switch (result.operation()) { switch (result.operation()) {
case UPSERT: case UPSERT:
@ -266,7 +266,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
UpdateResponse update = result.action(); UpdateResponse update = result.action();
IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex());
if (indexServiceOrNull != null) { if (indexServiceOrNull != null) {
IndexShard shard = indexService.shard(request.shardId()); IndexShard shard = indexService.getShardOrNull(request.shardId());
if (shard != null) { if (shard != null) {
shard.indexingService().noopUpdate(request.type()); shard.indexingService().noopUpdate(request.type());
} }

View File

@ -20,21 +20,31 @@
package org.elasticsearch.index; package org.elasticsearch.index;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
/** /**
* *
*/ */
public class IndexModule extends AbstractModule { public class IndexModule extends AbstractModule {
private final Settings settings; // pkg private so tests can mock
Class<? extends EngineFactory> engineFactoryImpl = InternalEngineFactory.class;
public IndexModule(Settings settings) { Class<? extends IndexSearcherWrapper> indexSearcherWrapper = null;
this.settings = settings;
}
@Override @Override
protected void configure() { protected void configure() {
bind(EngineFactory.class).to(engineFactoryImpl).asEagerSingleton();
if (indexSearcherWrapper == null) {
bind(IndexSearcherWrapper.class).toProvider(Providers.of(null));
} else {
bind(IndexSearcherWrapper.class).to(indexSearcherWrapper).asEagerSingleton();
}
bind(IndexService.class).asEagerSingleton(); bind(IndexService.class).asEagerSingleton();
bind(IndexServicesProvider.class).asEagerSingleton();
} }
} }

View File

@ -24,16 +24,9 @@ import org.apache.lucene.util.Accountable;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Injectors;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLock;
@ -49,20 +42,12 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.*;
import org.elasticsearch.index.shard.IndexShardModule;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreModule;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.InternalIndicesLifecycle;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.plugins.PluginsService;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -81,86 +66,42 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
*/ */
public class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> { public class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> {
private final Injector injector;
private final Settings indexSettings; private final Settings indexSettings;
private final PluginsService pluginsService;
private final InternalIndicesLifecycle indicesLifecycle; private final InternalIndicesLifecycle indicesLifecycle;
private final AnalysisService analysisService; private final AnalysisService analysisService;
private final MapperService mapperService;
private final IndexQueryParserService queryParserService;
private final SimilarityService similarityService;
private final IndexAliasesService aliasesService;
private final IndexCache indexCache;
private final IndexFieldDataService indexFieldData; private final IndexFieldDataService indexFieldData;
private final BitsetFilterCache bitsetFilterCache; private final BitsetFilterCache bitsetFilterCache;
private final IndexSettingsService settingsService; private final IndexSettingsService settingsService;
private final NodeEnvironment nodeEnv; private final NodeEnvironment nodeEnv;
private final IndicesService indicesServices; private final IndicesService indicesServices;
private final IndexServicesProvider indexServicesProvider;
private volatile ImmutableMap<Integer, IndexShardInjectorPair> shards = ImmutableMap.of(); private final IndexStore indexStore;
private volatile ImmutableMap<Integer, IndexShard> shards = ImmutableMap.of();
private static class IndexShardInjectorPair {
private final IndexShard indexShard;
private final Injector injector;
public IndexShardInjectorPair(IndexShard indexShard, Injector injector) {
this.indexShard = indexShard;
this.injector = injector;
}
public IndexShard getIndexShard() {
return indexShard;
}
public Injector getInjector() {
return injector;
}
}
private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicBoolean closed = new AtomicBoolean(false);
private final AtomicBoolean deleted = new AtomicBoolean(false); private final AtomicBoolean deleted = new AtomicBoolean(false);
@Inject @Inject
public IndexService(Injector injector, Index index, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv, public IndexService(Index index, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv,
AnalysisService analysisService, MapperService mapperService, IndexQueryParserService queryParserService, AnalysisService analysisService,
SimilarityService similarityService, IndexAliasesService aliasesService, IndexCache indexCache,
IndexSettingsService settingsService, IndexSettingsService settingsService,
IndexFieldDataService indexFieldData, BitsetFilterCache bitSetFilterCache, IndicesService indicesServices) { IndexFieldDataService indexFieldData,
BitsetFilterCache bitSetFilterCache,
IndicesService indicesServices,
IndexServicesProvider indexServicesProvider,
IndexStore indexStore) {
super(index, indexSettings); super(index, indexSettings);
this.injector = injector;
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.analysisService = analysisService; this.analysisService = analysisService;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.similarityService = similarityService;
this.aliasesService = aliasesService;
this.indexCache = indexCache;
this.indexFieldData = indexFieldData; this.indexFieldData = indexFieldData;
this.settingsService = settingsService; this.settingsService = settingsService;
this.bitsetFilterCache = bitSetFilterCache; this.bitsetFilterCache = bitSetFilterCache;
this.pluginsService = injector.getInstance(PluginsService.class);
this.indicesServices = indicesServices; this.indicesServices = indicesServices;
this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class); this.indicesLifecycle = (InternalIndicesLifecycle) indexServicesProvider.getIndicesLifecycle();
this.nodeEnv = nodeEnv;
// inject workarounds for cyclic dep this.indexServicesProvider = indexServicesProvider;
this.indexStore = indexStore;
indexFieldData.setListener(new FieldDataCacheListener(this)); indexFieldData.setListener(new FieldDataCacheListener(this));
bitSetFilterCache.setListener(new BitsetCacheListener(this)); bitSetFilterCache.setListener(new BitsetCacheListener(this));
this.nodeEnv = nodeEnv;
} }
public int numberOfShards() { public int numberOfShards() {
@ -173,7 +114,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public Iterator<IndexShard> iterator() { public Iterator<IndexShard> iterator() {
return shards.values().stream().map((p) -> p.getIndexShard()).iterator(); return shards.values().iterator();
} }
public boolean hasShard(int shardId) { public boolean hasShard(int shardId) {
@ -184,19 +125,15 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
* Return the shard with the provided id, or null if there is no such shard. * Return the shard with the provided id, or null if there is no such shard.
*/ */
@Nullable @Nullable
public IndexShard shard(int shardId) { public IndexShard getShardOrNull(int shardId) {
IndexShardInjectorPair indexShardInjectorPair = shards.get(shardId); return shards.get(shardId);
if (indexShardInjectorPair != null) {
return indexShardInjectorPair.getIndexShard();
}
return null;
} }
/** /**
* Return the shard with the provided id, or throw an exception if it doesn't exist. * Return the shard with the provided id, or throw an exception if it doesn't exist.
*/ */
public IndexShard shardSafe(int shardId) { public IndexShard getShard(int shardId) {
IndexShard indexShard = shard(shardId); IndexShard indexShard = getShardOrNull(shardId);
if (indexShard == null) { if (indexShard == null) {
throw new ShardNotFoundException(new ShardId(index, shardId)); throw new ShardNotFoundException(new ShardId(index, shardId));
} }
@ -207,16 +144,12 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
return shards.keySet(); return shards.keySet();
} }
public Injector injector() {
return injector;
}
public IndexSettingsService settingsService() { public IndexSettingsService settingsService() {
return this.settingsService; return this.settingsService;
} }
public IndexCache cache() { public IndexCache cache() {
return indexCache; return indexServicesProvider.getIndexCache();
} }
public IndexFieldDataService fieldData() { public IndexFieldDataService fieldData() {
@ -232,19 +165,19 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
public MapperService mapperService() { public MapperService mapperService() {
return mapperService; return indexServicesProvider.getMapperService();
} }
public IndexQueryParserService queryParserService() { public IndexQueryParserService queryParserService() {
return queryParserService; return indexServicesProvider.getQueryParserService();
} }
public SimilarityService similarityService() { public SimilarityService similarityService() {
return similarityService; return indexServicesProvider.getSimilarityService();
} }
public IndexAliasesService aliasesService() { public IndexAliasesService aliasesService() {
return aliasesService; return indexServicesProvider.getIndexAliasesService();
} }
public synchronized void close(final String reason, boolean delete) { public synchronized void close(final String reason, boolean delete) {
@ -261,16 +194,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} }
/**
* Return the shard injector for the provided id, or throw an exception if there is no such shard.
*/
public Injector shardInjectorSafe(int shardId) {
IndexShardInjectorPair indexShardInjectorPair = shards.get(shardId);
if (indexShardInjectorPair == null) {
throw new ShardNotFoundException(new ShardId(index, shardId));
}
return indexShardInjectorPair.getInjector();
}
public String indexUUID() { public String indexUUID() {
return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
@ -301,10 +224,14 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
if (closed.get()) { if (closed.get()) {
throw new IllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed"); throw new IllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed");
} }
if (indexSettings.get("index.translog.type") != null) { // TODO remove?
throw new IllegalStateException("a custom translog type is no longer supported. got [" + indexSettings.get("index.translog.type") + "]");
}
final ShardId shardId = new ShardId(index, sShardId); final ShardId shardId = new ShardId(index, sShardId);
ShardLock lock = null; ShardLock lock = null;
boolean success = false; boolean success = false;
Injector shardInjector = null; Store store = null;
IndexShard indexShard = null;
try { try {
lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5)); lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5));
indicesLifecycle.beforeIndexShardCreated(shardId, indexSettings); indicesLifecycle.beforeIndexShardCreated(shardId, indexSettings);
@ -325,7 +252,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
if (path == null) { if (path == null) {
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
// that's being relocated/replicated we know how large it will become once it's done copying: // that's being relocated/replicated we know how large it will become once it's done copying:
// Count up how many shards are currently on each data path: // Count up how many shards are currently on each data path:
Map<Path,Integer> dataPathToShardCount = new HashMap<>(); Map<Path,Integer> dataPathToShardCount = new HashMap<>();
for(IndexShard shard : this) { for(IndexShard shard : this) {
@ -351,39 +277,17 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
ModulesBuilder modules = new ModulesBuilder(); store = new Store(shardId, indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> indexServicesProvider.getIndicesQueryCache().onClose(shardId)));
// plugin modules must be added here, before others or we can get crazy injection errors... if (useShadowEngine(primary, indexSettings)) {
for (Module pluginModule : pluginsService.shardModules(indexSettings)) { indexShard = new ShadowIndexShard(shardId, indexSettings, path, store, indexServicesProvider);
modules.add(pluginModule); } else {
} indexShard = new IndexShard(shardId, indexSettings, path, store, indexServicesProvider);
modules.add(new IndexShardModule(shardId, primary, indexSettings));
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
@Override
public void close() throws IOException {
injector.getInstance(IndicesQueryCache.class).onClose(shardId);
}
}), path));
pluginsService.processModules(modules);
try {
shardInjector = modules.createChildInjector(injector);
} catch (CreationException e) {
ElasticsearchException ex = new ElasticsearchException("failed to create shard", Injectors.getFirstErrorFailure(e));
ex.setShard(shardId);
throw ex;
} catch (Throwable e) {
ElasticsearchException ex = new ElasticsearchException("failed to create shard", e);
ex.setShard(shardId);
throw ex;
} }
IndexShard indexShard = shardInjector.getInstance(IndexShard.class);
indicesLifecycle.indexShardStateChanged(indexShard, null, "shard created"); indicesLifecycle.indexShardStateChanged(indexShard, null, "shard created");
indicesLifecycle.afterIndexShardCreated(indexShard); indicesLifecycle.afterIndexShardCreated(indexShard);
shards = newMapBuilder(shards).put(shardId.id(), new IndexShardInjectorPair(indexShard, shardInjector)).immutableMap();
settingsService.addListener(indexShard); settingsService.addListener(indexShard);
shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
success = true; success = true;
return indexShard; return indexShard;
} catch (IOException e) { } catch (IOException e) {
@ -393,45 +297,35 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} finally { } finally {
if (success == false) { if (success == false) {
IOUtils.closeWhileHandlingException(lock); IOUtils.closeWhileHandlingException(lock);
if (shardInjector != null) { closeShard("initialization failed", shardId, indexShard, store);
IndexShard indexShard = shardInjector.getInstance(IndexShard.class);
closeShardInjector("initialization failed", shardId, shardInjector, indexShard);
}
} }
} }
} }
static boolean useShadowEngine(boolean primary, Settings indexSettings) {
return primary == false && IndexMetaData.isIndexUsingShadowReplicas(indexSettings);
}
public synchronized void removeShard(int shardId, String reason) { public synchronized void removeShard(int shardId, String reason) {
final ShardId sId = new ShardId(index, shardId); final ShardId sId = new ShardId(index, shardId);
final Injector shardInjector;
final IndexShard indexShard; final IndexShard indexShard;
if (shards.containsKey(shardId) == false) { if (shards.containsKey(shardId) == false) {
return; return;
} }
logger.debug("[{}] closing... (reason: [{}])", shardId, reason); logger.debug("[{}] closing... (reason: [{}])", shardId, reason);
HashMap<Integer, IndexShardInjectorPair> tmpShardsMap = new HashMap<>(shards); HashMap<Integer, IndexShard> tmpShardsMap = new HashMap<>(shards);
IndexShardInjectorPair indexShardInjectorPair = tmpShardsMap.remove(shardId); indexShard = tmpShardsMap.remove(shardId);
indexShard = indexShardInjectorPair.getIndexShard();
shardInjector = indexShardInjectorPair.getInjector();
shards = ImmutableMap.copyOf(tmpShardsMap); shards = ImmutableMap.copyOf(tmpShardsMap);
closeShardInjector(reason, sId, shardInjector, indexShard); closeShard(reason, sId, indexShard, indexShard.store());
logger.debug("[{}] closed (reason: [{}])", shardId, reason); logger.debug("[{}] closed (reason: [{}])", shardId, reason);
} }
private void closeShardInjector(String reason, ShardId sId, Injector shardInjector, IndexShard indexShard) { private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store store) {
final int shardId = sId.id(); final int shardId = sId.id();
try { try {
try { try {
indicesLifecycle.beforeIndexShardClosed(sId, indexShard, indexSettings); indicesLifecycle.beforeIndexShardClosed(sId, indexShard, indexSettings);
} finally { } finally {
// close everything else even if the beforeIndexShardClosed threw an exception
for (Class<? extends Closeable> closeable : pluginsService.shardServices()) {
try {
shardInjector.getInstance(closeable).close();
} catch (Throwable e) {
logger.debug("[{}] failed to clean plugin shard service [{}]", e, shardId, closeable);
}
}
// this logic is tricky, we want to close the engine so we rollback the changes done to it // this logic is tricky, we want to close the engine so we rollback the changes done to it
// and close the shard so no operations are allowed to it // and close the shard so no operations are allowed to it
if (indexShard != null) { if (indexShard != null) {
@ -449,30 +343,13 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} finally { } finally {
try { try {
shardInjector.getInstance(Store.class).close(); store.close();
} catch (Throwable e) { } catch (Throwable e) {
logger.warn("[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason); logger.warn("[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason);
} }
} }
} }
/**
* Closes an optional resource. Returns true if the resource was found;
* NOTE: this method swallows all exceptions thrown from the close method of the injector and logs them as debug log
*/
private boolean closeInjectorOptionalResource(ShardId shardId, Injector shardInjector, Class<? extends Closeable> toClose) {
try {
final Closeable instance = shardInjector.getInstance(toClose);
if (instance == null) {
return false;
}
IOUtils.close(instance);
} catch (Throwable t) {
logger.debug("{} failed to close {}", t, shardId, Strings.toUnderscoreCase(toClose.getSimpleName()));
}
return true;
}
private void onShardClose(ShardLock lock, boolean ownsShard) { private void onShardClose(ShardLock lock, boolean ownsShard) {
if (deleted.get()) { // we remove that shards content if this index has been deleted if (deleted.get()) { // we remove that shards content if this index has been deleted
@ -492,6 +369,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} }
public IndexServicesProvider getIndexServices() {
return indexServicesProvider;
}
private class StoreCloseListener implements Store.OnClose { private class StoreCloseListener implements Store.OnClose {
private final ShardId shardId; private final ShardId shardId;
private final boolean ownsShard; private final boolean ownsShard;
@ -533,7 +414,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onCache(ShardId shardId, Accountable accountable) { public void onCache(ShardId shardId, Accountable accountable) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l;
shard.shardBitsetFilterCache().onCached(ramBytesUsed); shard.shardBitsetFilterCache().onCached(ramBytesUsed);
@ -544,7 +425,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onRemoval(ShardId shardId, Accountable accountable) { public void onRemoval(ShardId shardId, Accountable accountable) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l;
shard.shardBitsetFilterCache().onRemoval(ramBytesUsed); shard.shardBitsetFilterCache().onRemoval(ramBytesUsed);
@ -563,7 +444,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage) { public void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
shard.fieldData().onCache(shardId, fieldNames, fieldDataType, ramUsage); shard.fieldData().onCache(shardId, fieldNames, fieldDataType, ramUsage);
} }
@ -573,7 +454,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onRemoval(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { public void onRemoval(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
shard.fieldData().onRemoval(shardId, fieldNames, fieldDataType, wasEvicted, sizeInBytes); shard.fieldData().onRemoval(shardId, fieldNames, fieldDataType, wasEvicted, sizeInBytes);
} }

View File

@ -0,0 +1,138 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.threadpool.ThreadPool;
/**
* Simple provider class that holds the Index and Node level services used by
* a shard.
* This is just a temporary solution until we cleaned up index creation and removed injectors on that level as well.
*/
public final class IndexServicesProvider {
private final IndicesLifecycle indicesLifecycle;
private final ThreadPool threadPool;
private final MapperService mapperService;
private final IndexQueryParserService queryParserService;
private final IndexCache indexCache;
private final IndexAliasesService indexAliasesService;
private final IndicesQueryCache indicesQueryCache;
private final CodecService codecService;
private final TermVectorsService termVectorsService;
private final IndexFieldDataService indexFieldDataService;
private final IndicesWarmer warmer;
private final SimilarityService similarityService;
private final EngineFactory factory;
private final BigArrays bigArrays;
private final IndexSearcherWrapper indexSearcherWrapper;
@Inject
public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper) {
this.indicesLifecycle = indicesLifecycle;
this.threadPool = threadPool;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.indexCache = indexCache;
this.indexAliasesService = indexAliasesService;
this.indicesQueryCache = indicesQueryCache;
this.codecService = codecService;
this.termVectorsService = termVectorsService;
this.indexFieldDataService = indexFieldDataService;
this.warmer = warmer;
this.similarityService = similarityService;
this.factory = factory;
this.bigArrays = bigArrays;
this.indexSearcherWrapper = indexSearcherWrapper;
}
public IndicesLifecycle getIndicesLifecycle() {
return indicesLifecycle;
}
public ThreadPool getThreadPool() {
return threadPool;
}
public MapperService getMapperService() {
return mapperService;
}
public IndexQueryParserService getQueryParserService() {
return queryParserService;
}
public IndexCache getIndexCache() {
return indexCache;
}
public IndexAliasesService getIndexAliasesService() {
return indexAliasesService;
}
public IndicesQueryCache getIndicesQueryCache() {
return indicesQueryCache;
}
public CodecService getCodecService() {
return codecService;
}
public TermVectorsService getTermVectorsService() {
return termVectorsService;
}
public IndexFieldDataService getIndexFieldDataService() {
return indexFieldDataService;
}
public IndicesWarmer getWarmer() {
return warmer;
}
public SimilarityService getSimilarityService() {
return similarityService;
}
public EngineFactory getFactory() {
return factory;
}
public BigArrays getBigArrays() {
return bigArrays;
}
public IndexSearcherWrapper getIndexSearcherWrapper() { return indexSearcherWrapper; }
}

View File

@ -59,6 +59,8 @@ import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import java.util.function.Supplier;
/** /**
* *
@ -78,7 +80,6 @@ public abstract class Engine implements Closeable {
protected final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(); protected final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
protected final ReleasableLock readLock = new ReleasableLock(rwl.readLock()); protected final ReleasableLock readLock = new ReleasableLock(rwl.readLock());
protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock()); protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock());
protected volatile Throwable failedEngine = null; protected volatile Throwable failedEngine = null;
protected Engine(EngineConfig engineConfig) { protected Engine(EngineConfig engineConfig) {
@ -227,8 +228,8 @@ public abstract class Engine implements Closeable {
PENDING_OPERATIONS PENDING_OPERATIONS
} }
final protected GetResult getFromSearcher(Get get) throws EngineException { final protected GetResult getFromSearcher(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
final Searcher searcher = acquireSearcher("get"); final Searcher searcher = searcherFactory.apply("get");
final Versions.DocIdAndVersion docIdAndVersion; final Versions.DocIdAndVersion docIdAndVersion;
try { try {
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid()); docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
@ -256,7 +257,11 @@ public abstract class Engine implements Closeable {
} }
} }
public abstract GetResult get(Get get) throws EngineException; public final GetResult get(Get get) throws EngineException {
return get(get, this::acquireSearcher);
}
public abstract GetResult get(Get get, Function<String, Searcher> searcherFactory) throws EngineException;
/** /**
* Returns a new searcher instance. The consumer of this * Returns a new searcher instance. The consumer of this
@ -279,7 +284,7 @@ public abstract class Engine implements Closeable {
try { try {
final Searcher retVal = newSearcher(source, searcher, manager); final Searcher retVal = newSearcher(source, searcher, manager);
success = true; success = true;
return config().getWrappingService().wrap(engineConfig, retVal); return retVal;
} finally { } finally {
if (!success) { if (!success) {
manager.release(searcher); manager.release(searcher);

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
@ -32,6 +33,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
@ -73,7 +75,7 @@ public final class EngineConfig {
private final boolean forceNewTranslog; private final boolean forceNewTranslog;
private final QueryCache queryCache; private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy; private final QueryCachingPolicy queryCachingPolicy;
private final IndexSearcherWrappingService wrappingService; private final SetOnce<IndexSearcherWrapper> searcherWrapper = new SetOnce<>();
/** /**
* Index setting for compound file on flush. This setting is realtime updateable. * Index setting for compound file on flush. This setting is realtime updateable.
@ -121,7 +123,7 @@ public final class EngineConfig {
Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer, MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, IndexSearcherWrappingService wrappingService, TranslogConfig translogConfig) { TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig) {
this.shardId = shardId; this.shardId = shardId;
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.threadPool = threadPool; this.threadPool = threadPool;
@ -135,7 +137,6 @@ public final class EngineConfig {
this.similarity = similarity; this.similarity = similarity;
this.codecService = codecService; this.codecService = codecService;
this.failedEngineListener = failedEngineListener; this.failedEngineListener = failedEngineListener;
this.wrappingService = wrappingService;
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE; indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE;
@ -380,10 +381,6 @@ public final class EngineConfig {
return queryCachingPolicy; return queryCachingPolicy;
} }
public IndexSearcherWrappingService getWrappingService() {
return wrappingService;
}
/** /**
* Returns the translog config for this engine * Returns the translog config for this engine
*/ */

View File

@ -1,47 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.IndexSearcher;
/**
* Extension point to add custom functionality at request time to the {@link DirectoryReader}
* and {@link IndexSearcher} managed by the {@link Engine}.
*/
public interface IndexSearcherWrapper {
/**
* @param reader The provided directory reader to be wrapped to add custom functionality
* @return a new directory reader wrapping the provided directory reader or if no wrapping was performed
* the provided directory reader
*/
DirectoryReader wrap(DirectoryReader reader);
/**
* @param engineConfig The engine config which can be used to get the query cache and query cache policy from
* when creating a new index searcher
* @param searcher The provided index searcher to be wrapped to add custom functionality
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
* the provided index searcher
*/
IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException;
}

View File

@ -66,6 +66,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.function.Supplier;
/** /**
* *
@ -303,7 +305,7 @@ public class InternalEngine extends Engine {
} }
@Override @Override
public GetResult get(Get get) throws EngineException { public GetResult get(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
try (ReleasableLock lock = readLock.acquire()) { try (ReleasableLock lock = readLock.acquire()) {
ensureOpen(); ensureOpen();
if (get.realtime()) { if (get.realtime()) {
@ -324,7 +326,7 @@ public class InternalEngine extends Engine {
} }
// no version, get the version from the index, we know that we refresh on flush // no version, get the version from the index, we know that we refresh on flush
return getFromSearcher(get); return getFromSearcher(get, searcherFactory);
} }
} }

View File

@ -35,6 +35,7 @@ import org.elasticsearch.index.translog.Translog;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.function.Function;
/** /**
* ShadowEngine is a specialized engine that only allows read-only operations * ShadowEngine is a specialized engine that only allows read-only operations
@ -168,9 +169,9 @@ public class ShadowEngine extends Engine {
} }
@Override @Override
public GetResult get(Get get) throws EngineException { public GetResult get(Get get, Function<String, Searcher> searcherFacotry) throws EngineException {
// There is no translog, so we can get it directly from the searcher // There is no translog, so we can get it directly from the searcher
return getFromSearcher(get); return getFromSearcher(get, searcherFacotry);
} }
@Override @Override

View File

@ -16,7 +16,7 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.index.percolator.stats; package org.elasticsearch.index.percolator;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.percolator; package org.elasticsearch.index.percolator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
@ -27,6 +28,8 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -41,20 +44,18 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentTypeListener; import org.elasticsearch.index.mapper.DocumentTypeListener;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.percolator.stats.ShardPercolateService;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
/** /**
@ -64,39 +65,35 @@ import java.util.concurrent.atomic.AtomicBoolean;
* Once a document type has been created, the real-time percolator will start to listen to write events and update the * Once a document type has been created, the real-time percolator will start to listen to write events and update the
* this registry with queries in real time. * this registry with queries in real time.
*/ */
public class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable{ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable {
public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string"; public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string";
// This is a shard level service, but these below are index level service: // This is a shard level service, but these below are index level service:
private final IndexQueryParserService queryParserService; private final IndexQueryParserService queryParserService;
private final MapperService mapperService; private final MapperService mapperService;
private final IndicesLifecycle indicesLifecycle;
private final IndexFieldDataService indexFieldDataService; private final IndexFieldDataService indexFieldDataService;
private final ShardIndexingService indexingService; private final ShardIndexingService indexingService;
private final ShardPercolateService shardPercolateService;
private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
private final ShardLifecycleListener shardLifecycleListener = new ShardLifecycleListener();
private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener(); private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener();
private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener(); private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener();
private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false); private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false);
private boolean mapUnmappedFieldsAsString; private boolean mapUnmappedFieldsAsString;
private final MeanMetric percolateMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric();
public PercolatorQueriesRegistry(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService queryParserService, public PercolatorQueriesRegistry(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService queryParserService,
ShardIndexingService indexingService, IndicesLifecycle indicesLifecycle, MapperService mapperService, ShardIndexingService indexingService, MapperService mapperService,
IndexFieldDataService indexFieldDataService, ShardPercolateService shardPercolateService) { IndexFieldDataService indexFieldDataService) {
super(shardId, indexSettings); super(shardId, indexSettings);
this.queryParserService = queryParserService; this.queryParserService = queryParserService;
this.mapperService = mapperService; this.mapperService = mapperService;
this.indicesLifecycle = indicesLifecycle;
this.indexingService = indexingService; this.indexingService = indexingService;
this.indexFieldDataService = indexFieldDataService; this.indexFieldDataService = indexFieldDataService;
this.shardPercolateService = shardPercolateService;
this.mapUnmappedFieldsAsString = indexSettings.getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false); this.mapUnmappedFieldsAsString = indexSettings.getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false);
indicesLifecycle.addListener(shardLifecycleListener);
mapperService.addTypeListener(percolateTypeListener); mapperService.addTypeListener(percolateTypeListener);
} }
@ -107,7 +104,6 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
@Override @Override
public void close() { public void close() {
mapperService.removeTypeListener(percolateTypeListener); mapperService.removeTypeListener(percolateTypeListener);
indicesLifecycle.removeListener(shardLifecycleListener);
indexingService.removeListener(realTimePercolatorOperationListener); indexingService.removeListener(realTimePercolatorOperationListener);
clear(); clear();
} }
@ -116,30 +112,25 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
percolateQueries.clear(); percolateQueries.clear();
} }
void enableRealTimePercolator() { public void enableRealTimePercolator() {
if (realTimePercolatorEnabled.compareAndSet(false, true)) { if (realTimePercolatorEnabled.compareAndSet(false, true)) {
indexingService.addListener(realTimePercolatorOperationListener); indexingService.addListener(realTimePercolatorOperationListener);
} }
} }
void disableRealTimePercolator() {
if (realTimePercolatorEnabled.compareAndSet(true, false)) {
indexingService.removeListener(realTimePercolatorOperationListener);
}
}
public void addPercolateQuery(String idAsString, BytesReference source) { public void addPercolateQuery(String idAsString, BytesReference source) {
Query newquery = parsePercolatorDocument(idAsString, source); Query newquery = parsePercolatorDocument(idAsString, source);
BytesRef id = new BytesRef(idAsString); BytesRef id = new BytesRef(idAsString);
Query previousQuery = percolateQueries.put(id, newquery); percolateQueries.put(id, newquery);
shardPercolateService.addedQuery(id, previousQuery, newquery); numberOfQueries.inc();
} }
public void removePercolateQuery(String idAsString) { public void removePercolateQuery(String idAsString) {
BytesRef id = new BytesRef(idAsString); BytesRef id = new BytesRef(idAsString);
Query query = percolateQueries.remove(id); Query query = percolateQueries.remove(id);
if (query != null) { if (query != null) {
shardPercolateService.removedQuery(id, query); numberOfQueries.dec();
} }
} }
@ -225,55 +216,27 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
enableRealTimePercolator(); enableRealTimePercolator();
} }
} }
} }
private class ShardLifecycleListener extends IndicesLifecycle.Listener { public void loadQueries(IndexReader reader) {
logger.trace("loading percolator queries...");
@Override final int loadedQueries;
public void afterIndexShardCreated(IndexShard indexShard) { try {
if (hasPercolatorType(indexShard)) { Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
enableRealTimePercolator(); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
IndexSearcher indexSearcher = new IndexSearcher(reader);
indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector);
Map<BytesRef, Query> queries = queryCollector.queries();
for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
percolateQueries.put(entry.getKey(), entry.getValue());
numberOfQueries.inc();
} }
loadedQueries = queries.size();
} catch (Exception e) {
throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
} }
logger.debug("done loading [{}] percolator queries", loadedQueries);
@Override
public void beforeIndexShardPostRecovery(IndexShard indexShard) {
if (hasPercolatorType(indexShard)) {
// percolator index has started, fetch what we can from it and initialize the indices
// we have
logger.trace("loading percolator queries for [{}]...", shardId);
int loadedQueries = loadQueries(indexShard);
logger.debug("done loading [{}] percolator queries for [{}]", loadedQueries, shardId);
}
}
private boolean hasPercolatorType(IndexShard indexShard) {
ShardId otherShardId = indexShard.shardId();
return shardId.equals(otherShardId) && mapperService.hasMapping(PercolatorService.TYPE_NAME);
}
private int loadQueries(IndexShard shard) {
shard.refresh("percolator_load_queries");
// NOTE: we acquire the searcher via the engine directly here since this is executed right
// before the shard is marked as POST_RECOVERY
try (Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) {
Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
IndexSearcher indexSearcher = new IndexSearcher(searcher.reader());
indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector);
Map<BytesRef, Query> queries = queryCollector.queries();
for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
}
return queries.size();
} catch (Exception e) {
throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
}
}
} }
private class RealTimePercolatorOperationListener extends IndexingOperationListener { private class RealTimePercolatorOperationListener extends IndexingOperationListener {
@ -320,4 +283,35 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
} }
} }
} }
public void prePercolate() {
currentMetric.inc();
}
public void postPercolate(long tookInNanos) {
currentMetric.dec();
percolateMetric.inc(tookInNanos);
}
/**
* @return The current metrics
*/
public PercolateStats stats() {
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
}
// Enable when a more efficient manner is found for estimating the size of a Lucene query.
/*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
size += RamEstimator.sizeOf(query);
return size;
}
private static final class RamEstimator {
// we move this into it's own class to exclude it from the forbidden API checks
// it's fine to use here!
static long sizeOf(Query query) {
return RamUsageEstimator.sizeOf(query);
}
}*/
} }

View File

@ -1,93 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator.stats;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
import java.util.concurrent.TimeUnit;
/**
* Shard level percolator service that maintains percolator metrics:
* <ul>
* <li> total time spent in percolate api
* <li> the current number of percolate requests
* <li> number of registered percolate queries
* </ul>
*/
public class ShardPercolateService extends AbstractIndexShardComponent {
@Inject
public ShardPercolateService(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}
private final MeanMetric percolateMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric();
public void prePercolate() {
currentMetric.inc();
}
public void postPercolate(long tookInNanos) {
currentMetric.dec();
percolateMetric.inc(tookInNanos);
}
public void addedQuery(BytesRef id, Query previousQuery, Query newQuery) {
numberOfQueries.inc();
}
public void removedQuery(BytesRef id, Query query) {
numberOfQueries.dec();
}
/**
* @return The current metrics
*/
public PercolateStats stats() {
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
}
// Enable when a more efficient manner is found for estimating the size of a Lucene query.
/*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
size += RamEstimator.sizeOf(query);
return size;
}
private static final class RamEstimator {
// we move this into it's own class to exclude it from the forbidden API checks
// it's fine to use here!
static long sizeOf(Query query) {
return RamUsageEstimator.sizeOf(query);
}
}*/
}

View File

@ -17,59 +17,47 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.index.engine; package org.elasticsearch.index.shard;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.Engine.Searcher; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import java.util.Set; import java.io.IOException;
/** /**
* Service responsible for wrapping the {@link DirectoryReader} and {@link IndexSearcher} of a {@link Searcher} via the * Extension point to add custom functionality at request time to the {@link DirectoryReader}
* configured {@link IndexSearcherWrapper} instance. This allows custom functionally to be added the {@link Searcher} * and {@link IndexSearcher} managed by the {@link Engine}.
* before being used to do an operation (search, get, field stats etc.)
*/ */
// TODO: This needs extension point is a bit hacky now, because the IndexSearch from the engine can only be wrapped once, public interface IndexSearcherWrapper {
// if we allowed the IndexSearcher to be wrapped multiple times then a custom IndexSearcherWrapper needs have good
// control over its location in the wrapping chain
public final class IndexSearcherWrappingService {
private final IndexSearcherWrapper wrapper; /**
* @param reader The provided directory reader to be wrapped to add custom functionality
* @return a new directory reader wrapping the provided directory reader or if no wrapping was performed
* the provided directory reader
*/
DirectoryReader wrap(DirectoryReader reader) throws IOException;
// for unit tests: /**
IndexSearcherWrappingService() { * @param engineConfig The engine config which can be used to get the query cache and query cache policy from
this.wrapper = null; * when creating a new index searcher
} * @param searcher The provided index searcher to be wrapped to add custom functionality
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
@Inject * the provided index searcher
// Use a Set parameter here, because constructor parameter can't be optional */
// and I prefer to keep the `wrapper` field final. IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws IOException;
public IndexSearcherWrappingService(Set<IndexSearcherWrapper> wrappers) {
if (wrappers.size() > 1) {
throw new IllegalStateException("wrapping of the index searcher by more than one wrappers is forbidden, found the following wrappers [" + wrappers + "]");
}
if (wrappers.isEmpty()) {
this.wrapper = null;
} else {
this.wrapper = wrappers.iterator().next();
}
}
/** /**
* If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher
* gets wrapped and a new {@link Searcher} instances is returned, otherwise the provided {@link Searcher} is returned. * gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned.
* *
* This is invoked each time a {@link Searcher} is requested to do an operation. (for example search) * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/ */
public Searcher wrap(EngineConfig engineConfig, final Searcher engineSearcher) throws EngineException { default Engine.Searcher wrap(EngineConfig engineConfig, Engine.Searcher engineSearcher) throws IOException {
if (wrapper == null) { DirectoryReader reader = wrap((DirectoryReader) engineSearcher.reader());
return engineSearcher;
}
DirectoryReader reader = wrapper.wrap((DirectoryReader) engineSearcher.reader());
IndexSearcher innerIndexSearcher = new IndexSearcher(reader); IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
innerIndexSearcher.setQueryCache(engineConfig.getQueryCache()); innerIndexSearcher.setQueryCache(engineConfig.getQueryCache());
innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy()); innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
@ -77,12 +65,11 @@ public final class IndexSearcherWrappingService {
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
IndexSearcher indexSearcher = wrapper.wrap(engineConfig, innerIndexSearcher); IndexSearcher indexSearcher = wrap(engineConfig, innerIndexSearcher);
if (reader == engineSearcher.reader() && indexSearcher == innerIndexSearcher) { if (reader == engineSearcher.reader() && indexSearcher == innerIndexSearcher) {
return engineSearcher; return engineSearcher;
} else { } else {
return new Engine.Searcher(engineSearcher.source(), indexSearcher) { return new Engine.Searcher(engineSearcher.source(), indexSearcher) {
@Override @Override
public void close() throws ElasticsearchException { public void close() throws ElasticsearchException {
engineSearcher.close(); engineSearcher.close();

View File

@ -20,10 +20,7 @@
package org.elasticsearch.index.shard; package org.elasticsearch.index.shard;
import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
@ -36,6 +33,7 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
@ -51,11 +49,11 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexServicesProvider;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.IndexCache;
@ -75,8 +73,8 @@ import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.*;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.percolator.stats.ShardPercolateService;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
@ -99,12 +97,12 @@ import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.index.translog.TranslogWriter; import org.elasticsearch.index.translog.TranslogWriter;
import org.elasticsearch.index.warmer.ShardIndexWarmerService; import org.elasticsearch.index.warmer.ShardIndexWarmerService;
import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.InternalIndicesLifecycle;
import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat; import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat;
import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.search.suggest.completion.CompletionStats;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -137,7 +135,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
private final ShardRequestCache shardQueryCache; private final ShardRequestCache shardQueryCache;
private final ShardFieldData shardFieldData; private final ShardFieldData shardFieldData;
private final PercolatorQueriesRegistry percolatorQueriesRegistry; private final PercolatorQueriesRegistry percolatorQueriesRegistry;
private final ShardPercolateService shardPercolateService;
private final TermVectorsService termVectorsService; private final TermVectorsService termVectorsService;
private final IndexFieldDataService indexFieldDataService; private final IndexFieldDataService indexFieldDataService;
private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric(); private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric();
@ -161,7 +158,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
protected volatile IndexShardState state; protected volatile IndexShardState state;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>(); protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
protected final EngineFactory engineFactory; protected final EngineFactory engineFactory;
private final IndexSearcherWrappingService wrappingService;
@Nullable @Nullable
private RecoveryState recoveryState; private RecoveryState recoveryState;
@ -190,42 +186,36 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
private final IndexShardOperationCounter indexShardOperationCounter; private final IndexShardOperationCounter indexShardOperationCounter;
private EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); private final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
private final IndexSearcherWrapper searcherWrapper;
@Inject @Inject
public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndicesLifecycle indicesLifecycle, Store store, public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, ShardPath path, Store store, IndexServicesProvider provider) {
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService,
IndicesQueryCache indicesQueryCache, CodecService codecService,
TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
@Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory,
ShardPath path, BigArrays bigArrays, IndexSearcherWrappingService wrappingService) {
super(shardId, indexSettings); super(shardId, indexSettings);
this.codecService = codecService; this.codecService = provider.getCodecService();
this.warmer = warmer; this.warmer = provider.getWarmer();
this.deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); this.deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
this.similarityService = similarityService; this.similarityService = provider.getSimilarityService();
this.wrappingService = wrappingService;
Objects.requireNonNull(store, "Store must be provided to the index shard"); Objects.requireNonNull(store, "Store must be provided to the index shard");
this.engineFactory = factory; this.engineFactory = provider.getFactory();
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle; this.indicesLifecycle = (InternalIndicesLifecycle) provider.getIndicesLifecycle();
this.store = store; this.store = store;
this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings); this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings);
this.threadPool = threadPool; this.threadPool = provider.getThreadPool();
this.mapperService = mapperService; this.mapperService = provider.getMapperService();
this.queryParserService = queryParserService; this.queryParserService = provider.getQueryParserService();
this.indexCache = indexCache; this.indexCache = provider.getIndexCache();
this.indexAliasesService = indexAliasesService; this.indexAliasesService = provider.getIndexAliasesService();
this.indexingService = new ShardIndexingService(shardId, indexSettings); this.indexingService = new ShardIndexingService(shardId, indexSettings);
this.getService = new ShardGetService(this, mapperService); this.getService = new ShardGetService(this, mapperService);
this.termVectorsService = termVectorsService; this.termVectorsService = provider.getTermVectorsService();
this.searchService = new ShardSearchStats(indexSettings); this.searchService = new ShardSearchStats(indexSettings);
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings); this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
this.indicesQueryCache = indicesQueryCache; this.indicesQueryCache = provider.getIndicesQueryCache();
this.shardQueryCache = new ShardRequestCache(shardId, indexSettings); this.shardQueryCache = new ShardRequestCache(shardId, indexSettings);
this.shardFieldData = new ShardFieldData(); this.shardFieldData = new ShardFieldData();
this.shardPercolateService = new ShardPercolateService(shardId, indexSettings); this.indexFieldDataService = provider.getIndexFieldDataService();
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, indicesLifecycle, mapperService, indexFieldDataService, shardPercolateService);
this.indexFieldDataService = indexFieldDataService;
this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings); this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings);
state = IndexShardState.CREATED; state = IndexShardState.CREATED;
this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, EngineConfig.DEFAULT_REFRESH_INTERVAL); this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, EngineConfig.DEFAULT_REFRESH_INTERVAL);
@ -238,7 +228,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false"); this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false");
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, indexSettings, Translog.Durabilty.REQUEST), this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, indexSettings, Translog.Durabilty.REQUEST),
bigArrays, threadPool); provider.getBigArrays(), threadPool);
final QueryCachingPolicy cachingPolicy; final QueryCachingPolicy cachingPolicy;
// the query cache is a node-level thing, however we want the most popular filters // the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis // to be computed on a per-shard basis
@ -252,6 +242,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB)); this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false); this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
this.searcherWrapper = provider.getIndexSearcherWrapper();
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, mapperService, indexFieldDataService);
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
percolatorQueriesRegistry.enableRealTimePercolator();
}
} }
public Store store() { public Store store() {
@ -344,7 +339,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) { if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
// we want to refresh *before* we move to internal STARTED state // we want to refresh *before* we move to internal STARTED state
try { try {
engine().refresh("cluster_state_started"); getEngine().refresh("cluster_state_started");
} catch (Throwable t) { } catch (Throwable t) {
logger.debug("failed to refresh due to move to cluster wide started", t); logger.debug("failed to refresh due to move to cluster wide started", t);
} }
@ -453,7 +448,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", create.type(), create.id(), create.docs()); logger.trace("index [{}][{}]{}", create.type(), create.id(), create.docs());
} }
engine().create(create); getEngine().create(create);
create.endTime(System.nanoTime()); create.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postCreate(create, ex); indexingService.postCreate(create, ex);
@ -492,7 +487,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs());
} }
created = engine().index(index); created = getEngine().index(index);
index.endTime(System.nanoTime()); index.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postIndex(index, ex); indexingService.postIndex(index, ex);
@ -515,7 +510,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("delete [{}]", delete.uid().text()); logger.trace("delete [{}]", delete.uid().text());
} }
engine().delete(delete); getEngine().delete(delete);
delete.endTime(System.nanoTime()); delete.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postDelete(delete, ex); indexingService.postDelete(delete, ex);
@ -526,7 +521,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
public Engine.GetResult get(Engine.Get get) { public Engine.GetResult get(Engine.Get get) {
readAllowed(); readAllowed();
return engine().get(get); return getEngine().get(get, this::acquireSearcher);
} }
public void refresh(String source) { public void refresh(String source) {
@ -535,7 +530,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
logger.trace("refresh with source: {}", source); logger.trace("refresh with source: {}", source);
} }
long time = System.nanoTime(); long time = System.nanoTime();
engine().refresh(source); getEngine().refresh(source);
refreshMetric.inc(System.nanoTime() - time); refreshMetric.inc(System.nanoTime() - time);
} }
@ -561,7 +556,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
@Nullable @Nullable
public CommitStats commitStats() { public CommitStats commitStats() {
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
return engine == null ? null : engine.commitStats(); return engine == null ? null : engine.commitStats();
} }
@ -588,7 +583,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
public MergeStats mergeStats() { public MergeStats mergeStats() {
final Engine engine = engineUnsafe(); final Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
return new MergeStats(); return new MergeStats();
} }
@ -596,7 +591,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
public SegmentsStats segmentStats() { public SegmentsStats segmentStats() {
SegmentsStats segmentsStats = engine().segmentsStats(); SegmentsStats segmentsStats = getEngine().segmentsStats();
segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes());
return segmentsStats; return segmentsStats;
} }
@ -621,12 +616,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
return percolatorQueriesRegistry; return percolatorQueriesRegistry;
} }
public ShardPercolateService shardPercolateService() {
return shardPercolateService;
}
public TranslogStats translogStats() { public TranslogStats translogStats() {
return engine().getTranslog().stats(); return getEngine().getTranslog().stats();
} }
public SuggestStats suggestStats() { public SuggestStats suggestStats() {
@ -651,7 +642,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) { public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
verifyStartedOrRecovering(); verifyStartedOrRecovering();
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId); logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
return engine().syncFlush(syncId, expectedCommitId); return getEngine().syncFlush(syncId, expectedCommitId);
} }
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException { public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException {
@ -666,7 +657,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
verifyStartedOrRecovering(); verifyStartedOrRecovering();
long time = System.nanoTime(); long time = System.nanoTime();
Engine.CommitId commitId = engine().flush(force, waitIfOngoing); Engine.CommitId commitId = getEngine().flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time); flushMetric.inc(System.nanoTime() - time);
return commitId; return commitId;
@ -677,7 +668,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("optimize with {}", optimize); logger.trace("optimize with {}", optimize);
} }
engine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false); getEngine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false);
} }
/** /**
@ -690,7 +681,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion();
// we just want to upgrade the segments, not actually optimize to a single segment // we just want to upgrade the segments, not actually optimize to a single segment
engine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable getEngine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
false, true, upgrade.upgradeOnlyAncientSegments()); false, true, upgrade.upgradeOnlyAncientSegments());
org.apache.lucene.util.Version version = minimumCompatibleVersion(); org.apache.lucene.util.Version version = minimumCompatibleVersion();
@ -703,7 +694,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
public org.apache.lucene.util.Version minimumCompatibleVersion() { public org.apache.lucene.util.Version minimumCompatibleVersion() {
org.apache.lucene.util.Version luceneVersion = null; org.apache.lucene.util.Version luceneVersion = null;
for (Segment segment : engine().segments(false)) { for (Segment segment : getEngine().segments(false)) {
if (luceneVersion == null || luceneVersion.onOrAfter(segment.getVersion())) { if (luceneVersion == null || luceneVersion.onOrAfter(segment.getVersion())) {
luceneVersion = segment.getVersion(); luceneVersion = segment.getVersion();
} }
@ -721,7 +712,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
IndexShardState state = this.state; // one time volatile read IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
return engine().snapshotIndex(flushFirst); return getEngine().snapshotIndex(flushFirst);
} else { } else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
} }
@ -742,12 +733,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
public void failShard(String reason, @Nullable Throwable e) { public void failShard(String reason, @Nullable Throwable e) {
// fail the engine. This will cause this shard to also be removed from the node's index service. // fail the engine. This will cause this shard to also be removed from the node's index service.
engine().failEngine(reason, e); getEngine().failEngine(reason, e);
} }
public Engine.Searcher acquireSearcher(String source) { public Engine.Searcher acquireSearcher(String source) {
readAllowed(); readAllowed();
return engine().acquireSearcher(source); Engine engine = getEngine();
try {
return searcherWrapper == null ? engine.acquireSearcher(source) : searcherWrapper.wrap(engineConfig, engine.acquireSearcher(source));
} catch (IOException ex) {
throw new ElasticsearchException("failed to wrap searcher", ex);
}
} }
public void close(String reason, boolean flushEngine) throws IOException { public void close(String reason, boolean flushEngine) throws IOException {
@ -774,8 +770,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
} }
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
indicesLifecycle.beforeIndexShardPostRecovery(this); if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
refresh("percolator_load_queries");
try (Engine.Searcher searcher = getEngine().acquireSearcher("percolator_load_queries")) {
this.percolatorQueriesRegistry.loadQueries(searcher.reader());
}
}
synchronized (mutex) { synchronized (mutex) {
if (state == IndexShardState.CLOSED) { if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId); throw new IndexShardClosedException(shardId);
@ -789,7 +791,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
recoveryState.setStage(RecoveryState.Stage.DONE); recoveryState.setStage(RecoveryState.Stage.DONE);
changeState(IndexShardState.POST_RECOVERY, reason); changeState(IndexShardState.POST_RECOVERY, reason);
} }
indicesLifecycle.afterIndexShardPostRecovery(this);
return this; return this;
} }
@ -813,7 +814,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (state != IndexShardState.RECOVERING) { if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state); throw new IndexShardNotRecoveringException(shardId, state);
} }
return engineConfig.getTranslogRecoveryPerformer().performBatchRecovery(engine(), operations); return engineConfig.getTranslogRecoveryPerformer().performBatchRecovery(getEngine(), operations);
} }
/** /**
@ -852,7 +853,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
* a remote peer. * a remote peer.
*/ */
public void skipTranslogRecovery() throws IOException { public void skipTranslogRecovery() throws IOException {
assert engineUnsafe() == null : "engine was already created"; assert getEngineOrNull() == null : "engine was already created";
internalPerformTranslogRecovery(true, true); internalPerformTranslogRecovery(true, true);
assert recoveryState.getTranslog().recoveredOperations() == 0; assert recoveryState.getTranslog().recoveredOperations() == 0;
} }
@ -892,7 +893,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
public void finalizeRecovery() { public void finalizeRecovery() {
recoveryState().setStage(RecoveryState.Stage.FINALIZE); recoveryState().setStage(RecoveryState.Stage.FINALIZE);
engine().refresh("recovery_finalization"); getEngine().refresh("recovery_finalization");
startScheduledTasksIfNeeded(); startScheduledTasksIfNeeded();
engineConfig.setEnableGcDeletes(true); engineConfig.setEnableGcDeletes(true);
} }
@ -982,7 +983,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
config.setIndexingBufferSize(shardIndexingBufferSize); config.setIndexingBufferSize(shardIndexingBufferSize);
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
logger.debug("updateBufferSize: engine is closed; skipping"); logger.debug("updateBufferSize: engine is closed; skipping");
return; return;
@ -1057,7 +1058,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
boolean shouldFlush() { boolean shouldFlush() {
if (disableFlush == false) { if (disableFlush == false) {
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
if (engine != null) { if (engine != null) {
try { try {
Translog translog = engine.getTranslog(); Translog translog = engine.getTranslog();
@ -1171,15 +1172,37 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
searchService.onRefreshSettings(settings); searchService.onRefreshSettings(settings);
indexingService.onRefreshSettings(settings); indexingService.onRefreshSettings(settings);
if (change) { if (change) {
engine().onSettingsChanged(); getEngine().onSettingsChanged();
} }
} }
public Translog.View acquireTranslogView() {
Engine engine = getEngine();
assert engine.getTranslog() != null : "translog must not be null";
return engine.getTranslog().newView();
}
public List<Segment> segments(boolean verbose) {
return getEngine().segments(verbose);
}
public void flushAndCloseEngine() throws IOException {
getEngine().flushAndClose();
}
public Translog getTranslog() {
return getEngine().getTranslog();
}
public PercolateStats percolateStats() {
return percolatorQueriesRegistry.stats();
}
class EngineRefresher implements Runnable { class EngineRefresher implements Runnable {
@Override @Override
public void run() { public void run() {
// we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule // we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule
if (!engine().refreshNeeded()) { if (!getEngine().refreshNeeded()) {
reschedule(); reschedule();
return; return;
} }
@ -1187,7 +1210,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
@Override @Override
public void run() { public void run() {
try { try {
if (engine().refreshNeeded()) { if (getEngine().refreshNeeded()) {
refresh("schedule"); refresh("schedule");
} }
} catch (EngineClosedException e) { } catch (EngineClosedException e) {
@ -1300,8 +1323,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS))); recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS)));
} }
public Engine engine() { Engine getEngine() {
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
throw new EngineClosedException(shardId); throw new EngineClosedException(shardId);
} }
@ -1310,7 +1333,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
/** NOTE: returns null if engine is not yet started (e.g. recovery phase 1, copying over index files, is still running), or if engine is /** NOTE: returns null if engine is not yet started (e.g. recovery phase 1, copying over index files, is still running), or if engine is
* closed. */ * closed. */
protected Engine engineUnsafe() { protected Engine getEngineOrNull() {
return this.currentEngineReference.get(); return this.currentEngineReference.get();
} }
@ -1403,7 +1426,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
}; };
return new EngineConfig(shardId, return new EngineConfig(shardId,
threadPool, indexingService, indexSettings, warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, threadPool, indexingService, indexSettings, warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, wrappingService, translogConfig); mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig);
} }
private static class IndexShardOperationCounter extends AbstractRefCounted { private static class IndexShardOperationCounter extends AbstractRefCounted {
@ -1444,7 +1467,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
public void sync(Translog.Location location) { public void sync(Translog.Location location) {
try { try {
final Engine engine = engine(); final Engine engine = getEngine();
engine.getTranslog().ensureSynced(location); engine.getTranslog().ensureSynced(location);
} catch (EngineClosedException ex) { } catch (EngineClosedException ex) {
// that's fine since we already synced everything on engine close - this also is conform with the methods documentation // that's fine since we already synced everything on engine close - this also is conform with the methods documentation
@ -1515,4 +1538,5 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
return false; return false;
} }
} }

View File

@ -1,76 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.IndexSearcherWrapper;
import org.elasticsearch.index.engine.IndexSearcherWrappingService;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
/**
* The {@code IndexShardModule} module is responsible for binding the correct
* shard id, index shard, engine factory, and warming service for a newly
* created shard.
*/
public class IndexShardModule extends AbstractModule {
private final ShardId shardId;
private final Settings settings;
private final boolean primary;
// pkg private so tests can mock
Class<? extends EngineFactory> engineFactoryImpl = InternalEngineFactory.class;
public IndexShardModule(ShardId shardId, boolean primary, Settings settings) {
this.settings = settings;
this.shardId = shardId;
this.primary = primary;
if (settings.get("index.translog.type") != null) {
throw new IllegalStateException("a custom translog type is no longer supported. got [" + settings.get("index.translog.type") + "]");
}
}
/** Return true if a shadow engine should be used */
protected boolean useShadowEngine() {
return primary == false && IndexMetaData.isIndexUsingShadowReplicas(settings);
}
@Override
protected void configure() {
bind(ShardId.class).toInstance(shardId);
if (useShadowEngine()) {
bind(IndexShard.class).to(ShadowIndexShard.class).asEagerSingleton();
} else {
bind(IndexShard.class).asEagerSingleton();
}
bind(EngineFactory.class).to(engineFactoryImpl);
bind(IndexSearcherWrappingService.class).asEagerSingleton();
// this injects an empty set in IndexSearcherWrappingService, otherwise guice can't construct IndexSearcherWrappingService
Multibinder<IndexSearcherWrapper> multibinder
= Multibinder.newSetBinder(binder(), IndexSearcherWrapper.class);
}
}

View File

@ -18,32 +18,14 @@
*/ */
package org.elasticsearch.index.shard; package org.elasticsearch.index.shard;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexServicesProvider;
import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.IndexSearcherWrappingService;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException; import java.io.IOException;
@ -55,23 +37,8 @@ import java.io.IOException;
*/ */
public final class ShadowIndexShard extends IndexShard { public final class ShadowIndexShard extends IndexShard {
@Inject public ShadowIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, ShardPath path, Store store, IndexServicesProvider provider) throws IOException {
public ShadowIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, super(shardId, indexSettings, path, store, provider);
IndicesLifecycle indicesLifecycle, Store store,
ThreadPool threadPool, MapperService mapperService,
IndexQueryParserService queryParserService, IndexCache indexCache,
IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache,
CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
@Nullable IndicesWarmer warmer,
SimilarityService similarityService,
EngineFactory factory,
ShardPath path, BigArrays bigArrays, IndexSearcherWrappingService wrappingService) throws IOException {
super(shardId, indexSettings, indicesLifecycle, store,
threadPool, mapperService, queryParserService, indexCache, indexAliasesService,
indicesQueryCache, codecService,
termVectorsService, indexFieldDataService,
warmer, similarityService,
factory, path, bigArrays, wrappingService);
} }
/** /**

View File

@ -490,7 +490,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {
super(snapshotId, Version.CURRENT, shardId); super(snapshotId, Version.CURRENT, shardId);
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
store = indexService.shard(shardId.id()).store(); store = indexService.getShardOrNull(shardId.id()).store();
this.snapshotStatus = snapshotStatus; this.snapshotStatus = snapshotStatus;
} }
@ -774,7 +774,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
*/ */
public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
super(snapshotId, version, shardId, snapshotShardId); super(snapshotId, version, shardId, snapshotShardId);
store = indicesService.indexServiceSafe(shardId.getIndex()).shard(shardId.id()).store(); store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
this.recoveryState = recoveryState; this.recoveryState = recoveryState;
} }

View File

@ -27,6 +27,7 @@ import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
import java.io.Closeable; import java.io.Closeable;
@ -112,7 +113,7 @@ public class IndexStore extends AbstractIndexComponent implements Closeable {
/** /**
* The shard store class that should be used for each shard. * The shard store class that should be used for each shard.
*/ */
public Class<? extends DirectoryService> shardDirectory() { public DirectoryService newDirectoryService(ShardPath path) {
return FsDirectoryService.class; return new FsDirectoryService(indexSettings, this, path);
} }
} }

View File

@ -97,17 +97,6 @@ public interface IndicesLifecycle {
} }
/**
* Called right after the shard is moved into POST_RECOVERY mode
*/
public void afterIndexShardPostRecovery(IndexShard indexShard) {}
/**
* Called right before the shard is moved into POST_RECOVERY mode.
* The shard is ready to be used but not yet marked as POST_RECOVERY.
*/
public void beforeIndexShardPostRecovery(IndexShard indexShard) {}
/** /**
* Called after the index shard has been started. * Called after the index shard has been started.
*/ */

View File

@ -346,7 +346,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
modules.add(new IndexFieldDataModule(indexSettings)); modules.add(new IndexFieldDataModule(indexSettings));
modules.add(new MapperServiceModule()); modules.add(new MapperServiceModule());
modules.add(new IndexAliasesServiceModule()); modules.add(new IndexAliasesServiceModule());
modules.add(new IndexModule(indexSettings)); modules.add(new IndexModule());
pluginsService.processModules(modules); pluginsService.processModules(modules);

View File

@ -87,7 +87,7 @@ public final class IndicesWarmer extends AbstractComponent {
if (indexService == null) { if (indexService == null) {
return; return;
} }
final IndexShard indexShard = indexService.shard(context.shardId().id()); final IndexShard indexShard = indexService.getShardOrNull(context.shardId().id());
if (indexShard == null) { if (indexShard == null) {
return; return;
} }

View File

@ -121,28 +121,6 @@ public class InternalIndicesLifecycle extends AbstractComponent implements Indic
} }
} }
public void beforeIndexShardPostRecovery(IndexShard indexShard) {
for (Listener listener : listeners) {
try {
listener.beforeIndexShardPostRecovery(indexShard);
} catch (Throwable t) {
logger.warn("{} failed to invoke before shard post recovery callback", t, indexShard.shardId());
throw t;
}
}
}
public void afterIndexShardPostRecovery(IndexShard indexShard) {
for (Listener listener : listeners) {
try {
listener.afterIndexShardPostRecovery(indexShard);
} catch (Throwable t) {
logger.warn("{} failed to invoke after shard post recovery callback", t, indexShard.shardId());
throw t;
}
}
}
public void afterIndexShardStarted(IndexShard indexShard) { public void afterIndexShardStarted(IndexShard indexShard) {
for (Listener listener : listeners) { for (Listener listener : listeners) {

View File

@ -38,7 +38,7 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;

View File

@ -327,7 +327,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// already deleted on us, ignore it // already deleted on us, ignore it
continue; continue;
} }
IndexSettingsService indexSettingsService = indexService.injector().getInstance(IndexSettingsService.class); IndexSettingsService indexSettingsService = indexService.settingsService();
indexSettingsService.refreshSettings(indexMetaData.settings()); indexSettingsService.refreshSettings(indexMetaData.settings());
} }
} }
@ -505,7 +505,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
continue; continue;
} }
IndexShard indexShard = indexService.shard(shardId); IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard != null) { if (indexShard != null) {
ShardRouting currentRoutingEntry = indexShard.routingEntry(); ShardRouting currentRoutingEntry = indexShard.routingEntry();
// if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated // if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated
@ -591,7 +591,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final int shardId = shardRouting.id(); final int shardId = shardRouting.id();
if (indexService.hasShard(shardId)) { if (indexService.hasShard(shardId)) {
IndexShard indexShard = indexService.shardSafe(shardId); IndexShard indexShard = indexService.getShard(shardId);
if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) { if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) {
// the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting
// for master to confirm a shard started message (either master failover, or a cluster event before // for master to confirm a shard started message (either master failover, or a cluster event before
@ -647,7 +647,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
return; return;
} }
} }
final IndexShard indexShard = indexService.shardSafe(shardId); final IndexShard indexShard = indexService.getShard(shardId);
if (indexShard.ignoreRecoveryAttempt()) { if (indexShard.ignoreRecoveryAttempt()) {
// we are already recovering (we can get to this state since the cluster event can happen several // we are already recovering (we can get to this state since the cluster event can happen several
@ -835,7 +835,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
ShardRouting shardRouting = null; ShardRouting shardRouting = null;
final IndexService indexService = indicesService.indexService(shardId.index().name()); final IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) { if (indexShard != null) {
shardRouting = indexShard.routingEntry(); shardRouting = indexShard.routingEntry();
} }

View File

@ -398,7 +398,7 @@ public class SyncedFlushService extends AbstractComponent {
} }
private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) { private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId()); logger.trace("{} performing pre sync flush", request.shardId());
Engine.CommitId commitId = indexShard.flush(flushRequest); Engine.CommitId commitId = indexShard.flush(flushRequest);
@ -408,7 +408,7 @@ public class SyncedFlushService extends AbstractComponent {
private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) { private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId());
Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId());
logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
@ -426,7 +426,7 @@ public class SyncedFlushService extends AbstractComponent {
private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
if (indexShard.routingEntry().primary() == false) { if (indexShard.routingEntry().primary() == false) {
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
} }

View File

@ -234,7 +234,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
protected IndexShard getShard(ShardId shardId) { protected IndexShard getShard(ShardId shardId) {
IndexService indexService = indicesService.indexService(shardId.index().name()); IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
return indexShard; return indexShard;
} }
return null; return null;
@ -264,7 +264,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
} }
final Translog translog; final Translog translog;
try { try {
translog = indexShard.engine().getTranslog(); translog = indexShard.getTranslog();
} catch (EngineClosedException e) { } catch (EngineClosedException e) {
// not ready yet to be checked for activity // not ready yet to be checked for activity
return null; return null;

View File

@ -89,7 +89,7 @@ public class RecoverySource extends AbstractComponent {
private RecoveryResponse recover(final StartRecoveryRequest request) { private RecoveryResponse recover(final StartRecoveryRequest request) {
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().index().name()); final IndexService indexService = indicesService.indexServiceSafe(request.shardId().index().name());
final IndexShard shard = indexService.shardSafe(request.shardId().id()); final IndexShard shard = indexService.getShard(request.shardId().id());
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
// the index operations will not be routed to it properly // the index operations will not be routed to it properly

View File

@ -120,9 +120,7 @@ public class RecoverySourceHandler {
* performs the recovery from the local engine to the target * performs the recovery from the local engine to the target
*/ */
public RecoveryResponse recoverToTarget() { public RecoveryResponse recoverToTarget() {
final Engine engine = shard.engine(); try (Translog.View translogView = shard.acquireTranslogView()) {
assert engine.getTranslog() != null : "translog must not be null";
try (Translog.View translogView = engine.getTranslog().newView()) {
logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration()); logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration());
final IndexCommit phase1Snapshot; final IndexCommit phase1Snapshot;
try { try {
@ -179,7 +177,7 @@ public class RecoverySourceHandler {
try { try {
recoverySourceMetadata = store.getMetadata(snapshot); recoverySourceMetadata = store.getMetadata(snapshot);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
shard.engine().failEngine("recovery", ex); shard.failShard("recovery", ex);
throw ex; throw ex;
} }
for (String name : snapshot.getFileNames()) { for (String name : snapshot.getFileNames()) {
@ -287,7 +285,7 @@ public class RecoverySourceHandler {
for (StoreFileMetaData md : metadata) { for (StoreFileMetaData md : metadata) {
logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md); logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md);
if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail!
shard.engine().failEngine("recovery", corruptIndexException); shard.failShard("recovery", corruptIndexException);
logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md); logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
throw corruptIndexException; throw corruptIndexException;
} }
@ -641,7 +639,7 @@ public class RecoverySourceHandler {
} }
protected void failEngine(IOException cause) { protected void failEngine(IOException cause) {
shard.engine().failEngine("recovery", cause); shard.failShard("recovery", cause);
} }
Future<Void>[] asyncSendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) { Future<Void>[] asyncSendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) {

View File

@ -52,7 +52,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
// if we relocate we need to close the engine in order to open a new // if we relocate we need to close the engine in order to open a new
// IndexWriter on the other end of the relocation // IndexWriter on the other end of the relocation
engineClosed = true; engineClosed = true;
shard.engine().flushAndClose(); shard.flushAndCloseEngine();
} catch (IOException e) { } catch (IOException e) {
logger.warn("close engine failed", e); logger.warn("close engine failed", e);
shard.failShard("failed to close engine (phase1)", e); shard.failShard("failed to close engine (phase1)", e);

View File

@ -395,7 +395,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
ShardId shardId = request.shardId; ShardId shardId = request.shardId;
IndexService indexService = indicesService.indexService(shardId.index().getName()); IndexService indexService = indicesService.indexService(shardId.index().getName());
if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) {
return indexService.shard(shardId.id()); return indexService.getShardOrNull(shardId.id());
} }
return null; return null;
} }

View File

@ -152,7 +152,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
try { try {
IndexService indexService = indicesService.indexService(shardId.index().name()); IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) { if (indexShard != null) {
final Store store = indexShard.store(); final Store store = indexShard.store();
store.incRef(); store.incRef();

View File

@ -50,6 +50,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -89,6 +90,7 @@ import java.util.concurrent.ConcurrentMap;
*/ */
public class PercolateContext extends SearchContext { public class PercolateContext extends SearchContext {
private final PercolatorQueriesRegistry percolateQueryRegistry;
public boolean limit; public boolean limit;
private int size; private int size;
public boolean doSort; public boolean doSort;
@ -102,7 +104,6 @@ public class PercolateContext extends SearchContext {
private final PageCacheRecycler pageCacheRecycler; private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays; private final BigArrays bigArrays;
private final ScriptService scriptService; private final ScriptService scriptService;
private final ConcurrentMap<BytesRef, Query> percolateQueries;
private final int numberOfShards; private final int numberOfShards;
private final Query aliasFilter; private final Query aliasFilter;
private final long originNanoTime = System.nanoTime(); private final long originNanoTime = System.nanoTime();
@ -133,7 +134,7 @@ public class PercolateContext extends SearchContext {
this.indexService = indexService; this.indexService = indexService;
this.fieldDataService = indexService.fieldData(); this.fieldDataService = indexService.fieldData();
this.searchShardTarget = searchShardTarget; this.searchShardTarget = searchShardTarget;
this.percolateQueries = indexShard.percolateRegistry().percolateQueries(); this.percolateQueryRegistry = indexShard.percolateRegistry();
this.types = new String[]{request.documentType()}; this.types = new String[]{request.documentType()};
this.pageCacheRecycler = pageCacheRecycler; this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays.withCircuitBreaking(); this.bigArrays = bigArrays.withCircuitBreaking();
@ -179,7 +180,7 @@ public class PercolateContext extends SearchContext {
} }
public ConcurrentMap<BytesRef, Query> percolateQueries() { public ConcurrentMap<BytesRef, Query> percolateQueries() {
return percolateQueries; return percolateQueryRegistry.percolateQueries();
} }
public Query percolateQuery() { public Query percolateQuery() {

View File

@ -71,7 +71,7 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
@ -86,7 +86,6 @@ import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.highlight.HighlightField; import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.highlight.HighlightPhase; import org.elasticsearch.search.highlight.HighlightPhase;
@ -177,11 +176,10 @@ public class PercolatorService extends AbstractComponent {
public PercolateShardResponse percolate(PercolateShardRequest request) { public PercolateShardResponse percolate(PercolateShardRequest request) {
IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = percolateIndexService.shardSafe(request.shardId().id()); IndexShard indexShard = percolateIndexService.getShard(request.shardId().id());
indexShard.readAllowed(); // check if we can read the shard... indexShard.readAllowed(); // check if we can read the shard...
PercolatorQueriesRegistry percolateQueryRegistry = indexShard.percolateRegistry();
ShardPercolateService shardPercolateService = indexShard.shardPercolateService(); percolateQueryRegistry.prePercolate();
shardPercolateService.prePercolate();
long startTime = System.nanoTime(); long startTime = System.nanoTime();
// TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request, // TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request,
@ -255,7 +253,7 @@ public class PercolatorService extends AbstractComponent {
} finally { } finally {
SearchContext.removeCurrent(); SearchContext.removeCurrent();
context.close(); context.close();
shardPercolateService.postPercolate(System.nanoTime() - startTime); percolateQueryRegistry.postPercolate(System.nanoTime() - startTime);
} }
} }

View File

@ -73,20 +73,6 @@ public abstract class Plugin {
return Collections.emptyList(); return Collections.emptyList();
} }
/**
* Per index shard module.
*/
public Collection<Module> shardModules(Settings indexSettings) {
return Collections.emptyList();
}
/**
* Per index shard service that will be automatically closed.
*/
public Collection<Class<? extends Closeable>> shardServices() {
return Collections.emptyList();
}
/** /**
* Additional node settings loaded by the plugin. Note that settings that are explicit in the nodes settings can't be * Additional node settings loaded by the plugin. Note that settings that are explicit in the nodes settings can't be
* overwritten with the additional settings. These settings added if they don't exist. * overwritten with the additional settings. These settings added if they don't exist.

View File

@ -250,22 +250,6 @@ public class PluginsService extends AbstractComponent {
return services; return services;
} }
public Collection<Module> shardModules(Settings indexSettings) {
List<Module> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().shardModules(indexSettings));
}
return modules;
}
public Collection<Class<? extends Closeable>> shardServices() {
List<Class<? extends Closeable>> services = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().shardServices());
}
return services;
}
/** /**
* Get information about plugins (jvm and site plugins). * Get information about plugins (jvm and site plugins).
*/ */

View File

@ -43,7 +43,7 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.index.suggest.stats.SuggestStats;

View File

@ -559,7 +559,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(request.shardId()); IndexShard indexShard = indexService.getShard(request.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());

View File

@ -289,7 +289,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().entrySet()) { for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().entrySet()) {
final ShardId shardId = shardEntry.getKey(); final ShardId shardId = shardEntry.getKey();
try { try {
final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shard(shardId.id()); final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id());
executor.execute(new AbstractRunnable() { executor.execute(new AbstractRunnable() {
@Override @Override
public void doRun() { public void doRun() {

View File

@ -158,7 +158,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node);
IndexService indexShards = indexServices.indexServiceSafe(index); IndexService indexShards = indexServices.indexServiceSafe(index);
for (Integer shardId : indexShards.shardIds()) { for (Integer shardId : indexShards.shardIds()) {
IndexShard shard = indexShards.shardSafe(shardId); IndexShard shard = indexShards.getShard(shardId);
if (randomBoolean()) { if (randomBoolean()) {
shard.failShard("test", new CorruptIndexException("test corrupted", "")); shard.failShard("test", new CorruptIndexException("test corrupted", ""));
Set<String> nodes = corruptedShardIDMap.get(shardId); Set<String> nodes = corruptedShardIDMap.get(shardId);

View File

@ -65,7 +65,7 @@ public class UpgradeReallyOldIndexIT extends StaticIndexBackwardCompatibilityIT
for (IndicesService services : internalCluster().getInstances(IndicesService.class)) { for (IndicesService services : internalCluster().getInstances(IndicesService.class)) {
IndexService indexService = services.indexService(index); IndexService indexService = services.indexService(index);
if (indexService != null) { if (indexService != null) {
assertEquals(version, indexService.shard(0).minimumCompatibleVersion()); assertEquals(version, indexService.getShardOrNull(0).minimumCompatibleVersion());
} }
} }

View File

@ -179,7 +179,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
DiscoveryNode discoveryNode = state.getNodes().get(nodeId); DiscoveryNode discoveryNode = state.getNodes().get(nodeId);
IndicesService indicesService = internalTestCluster.getInstance(IndicesService.class, discoveryNode.getName()); IndicesService indicesService = internalTestCluster.getInstance(IndicesService.class, discoveryNode.getName());
IndexService indexService = indicesService.indexService(shard.index()); IndexService indexService = indicesService.indexService(shard.index());
IndexShard indexShard = indexService.shard(shard.id()); IndexShard indexShard = indexService.getShardOrNull(shard.id());
assertEquals(indexShard.shardPath().getRootDataPath().toString(), dataPath); assertEquals(indexShard.shardPath().getRootDataPath().toString(), dataPath);
} }

View File

@ -60,6 +60,22 @@ public abstract class ModuleTestCase extends ESTestCase {
fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s);
} }
// /** Configures the module and asserts "instance" is bound to "to". */
// public void assertInstanceBinding(Module module, Class to, Object instance) {
// List<Element> elements = Elements.getElements(module);
// for (Element element : elements) {
// if (element instanceof ProviderInstanceBinding) {
// assertEquals(instance, ((ProviderInstanceBinding) element).getProviderInstance().get());
// return;
// }
// }
// StringBuilder s = new StringBuilder();
// for (Element element : elements) {
// s.append(element + "\n");
// }
// fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s);
// }
/** /**
* Attempts to configure the module, and asserts an {@link IllegalArgumentException} is * Attempts to configure the module, and asserts an {@link IllegalArgumentException} is
* caught, containing the given messages * caught, containing the given messages
@ -164,6 +180,10 @@ public abstract class ModuleTestCase extends ESTestCase {
return; return;
} }
} }
} else if (element instanceof ProviderInstanceBinding) {
ProviderInstanceBinding binding = (ProviderInstanceBinding) element;
assertTrue(tester.test(to.cast(binding.getProviderInstance().get())));
return;
} }
} }
StringBuilder s = new StringBuilder(); StringBuilder s = new StringBuilder();

View File

@ -0,0 +1,66 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.common.inject.ModuleTestCase;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.test.engine.MockEngineFactory;
public class IndexModuleTests extends ModuleTestCase {
public void testWrapperIsBound() {
IndexModule module = new IndexModule();
assertInstanceBinding(module, IndexSearcherWrapper.class,(x) -> x == null);
module.indexSearcherWrapper = Wrapper.class;
assertBinding(module, IndexSearcherWrapper.class, Wrapper.class);
}
public void testEngineFactoryBound() {
IndexModule module = new IndexModule();
assertBinding(module, EngineFactory.class, InternalEngineFactory.class);
module.engineFactoryImpl = MockEngineFactory.class;
assertBinding(module, EngineFactory.class, MockEngineFactory.class);
}
public void testOtherServiceBound() {
IndexModule module = new IndexModule();
assertBinding(module, IndexService.class, IndexService.class);
assertBinding(module, IndexServicesProvider.class, IndexServicesProvider.class);
}
public static final class Wrapper implements IndexSearcherWrapper {
@Override
public DirectoryReader wrap(DirectoryReader reader) {
return null;
}
@Override
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
return null;
}
}
}

View File

@ -17,19 +17,19 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.index.shard; package org.elasticsearch.index;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.junit.Test; import org.junit.Test;
/** Unit test(s) for IndexShardModule */ /** Unit test(s) for IndexService */
public class IndexShardModuleTests extends ESTestCase { public class IndexServiceTests extends ESTestCase {
@Test @Test
public void testDetermineShadowEngineShouldBeUsed() { public void testDetermineShadowEngineShouldBeUsed() {
ShardId shardId = new ShardId("myindex", 0);
Settings regularSettings = Settings.builder() Settings regularSettings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
@ -41,14 +41,9 @@ public class IndexShardModuleTests extends ESTestCase {
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
.build(); .build();
IndexShardModule ism1 = new IndexShardModule(shardId, true, regularSettings); assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(true, regularSettings));
IndexShardModule ism2 = new IndexShardModule(shardId, false, regularSettings); assertFalse("no shadow replicas for normal settings", IndexService.useShadowEngine(false, regularSettings));
IndexShardModule ism3 = new IndexShardModule(shardId, true, shadowSettings); assertFalse("no shadow replicas for primary shard with shadow settings", IndexService.useShadowEngine(true, shadowSettings));
IndexShardModule ism4 = new IndexShardModule(shardId, false, shadowSettings); assertTrue("shadow replicas for replica shards with shadow settings",IndexService.useShadowEngine(false, shadowSettings));
assertFalse("no shadow replicas for normal settings", ism1.useShadowEngine());
assertFalse("no shadow replicas for normal settings", ism2.useShadowEngine());
assertFalse("no shadow replicas for primary shard with shadow settings", ism3.useShadowEngine());
assertTrue("shadow replicas for replica shards with shadow settings", ism4.useShadowEngine());
} }
} }

View File

@ -150,7 +150,7 @@ public class IndexWithShadowReplicasIT extends ESIntegTestCase {
for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) { for (IndicesService service : internalCluster().getDataNodeInstances(IndicesService.class)) {
if (service.hasIndex("foo-copy")) { if (service.hasIndex("foo-copy")) {
IndexShard shard = service.indexServiceSafe("foo-copy").shard(0); IndexShard shard = service.indexServiceSafe("foo-copy").getShardOrNull(0);
if (shard.routingEntry().primary()) { if (shard.routingEntry().primary()) {
assertFalse(shard instanceof ShadowIndexShard); assertFalse(shard instanceof ShadowIndexShard);
} else { } else {

View File

@ -16,10 +16,11 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.index.shard; package org.elasticsearch.index;
import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.engine.MockEngineFactory; import org.elasticsearch.test.engine.MockEngineFactory;
import org.elasticsearch.test.engine.MockEngineSupportModule; import org.elasticsearch.test.engine.MockEngineSupportModule;
@ -27,7 +28,7 @@ import org.elasticsearch.test.engine.MockEngineSupportModule;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
// this must exist in the same package as IndexShardModule to allow access to setting the impl // this must exist in the same package as IndexModule to allow access to setting the impl
public class MockEngineFactoryPlugin extends Plugin { public class MockEngineFactoryPlugin extends Plugin {
@Override @Override
public String name() { public String name() {
@ -41,7 +42,7 @@ public class MockEngineFactoryPlugin extends Plugin {
public Collection<Module> indexModules(Settings indexSettings) { public Collection<Module> indexModules(Settings indexSettings) {
return Collections.<Module>singletonList(new MockEngineSupportModule()); return Collections.<Module>singletonList(new MockEngineSupportModule());
} }
public void onModule(IndexShardModule module) { public void onModule(IndexModule module) {
module.engineFactoryImpl = MockEngineFactory.class; module.engineFactoryImpl = MockEngineFactory.class;
} }
} }

View File

@ -97,7 +97,7 @@ public class CodecTests extends ESSingleNodeTestCase {
private static CodecService createCodecService(Settings settings) { private static CodecService createCodecService(Settings settings) {
IndexService indexService = createIndex("test", settings); IndexService indexService = createIndex("test", settings);
return indexService.injector().getInstance(CodecService.class); return indexService.getIndexServices().getCodecService();
} }
} }

View File

@ -21,6 +21,7 @@ package org.elasticsearch.index.engine;
import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.LiveIndexWriterConfig;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.EngineAccess;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -33,7 +34,7 @@ public class InternalEngineSettingsTests extends ESSingleNodeTestCase {
public void testSettingsUpdate() { public void testSettingsUpdate() {
final IndexService service = createIndex("foo"); final IndexService service = createIndex("foo");
// INDEX_COMPOUND_ON_FLUSH // INDEX_COMPOUND_ON_FLUSH
InternalEngine engine = ((InternalEngine)engine(service)); InternalEngine engine = ((InternalEngine) EngineAccess.engine(service.getShardOrNull(0)));
assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(true)); assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(true));
client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, false).build()).get(); client().admin().indices().prepareUpdateSettings("foo").setSettings(Settings.builder().put(EngineConfig.INDEX_COMPOUND_ON_FLUSH, false).build()).get();
assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(false)); assertThat(engine.getCurrentIndexWriterConfig().getUseCompoundFile(), is(false));

View File

@ -67,10 +67,7 @@ import org.elasticsearch.index.mapper.ParseContext.Document;
import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper;
import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.*;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardUtils;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
import org.elasticsearch.index.similarity.SimilarityLookupService; import org.elasticsearch.index.similarity.SimilarityLookupService;
import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.DirectoryUtils;
@ -232,15 +229,15 @@ public class InternalEngineTests extends ESTestCase {
return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); return new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
} }
protected InternalEngine createEngine(Store store, Path translogPath, IndexSearcherWrapper... wrappers) { protected InternalEngine createEngine(Store store, Path translogPath) {
return createEngine(defaultSettings, store, translogPath, new MergeSchedulerConfig(defaultSettings), newMergePolicy(), wrappers); return createEngine(defaultSettings, store, translogPath, new MergeSchedulerConfig(defaultSettings), newMergePolicy());
} }
protected InternalEngine createEngine(Settings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy, IndexSearcherWrapper... wrappers) { protected InternalEngine createEngine(Settings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) {
return new InternalEngine(config(indexSettings, store, translogPath, mergeSchedulerConfig, mergePolicy, wrappers), false); return new InternalEngine(config(indexSettings, store, translogPath, mergeSchedulerConfig, mergePolicy), false);
} }
public EngineConfig config(Settings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy, IndexSearcherWrapper... wrappers) { public EngineConfig config(Settings indexSettings, Store store, Path translogPath, MergeSchedulerConfig mergeSchedulerConfig, MergePolicy mergePolicy) {
IndexWriterConfig iwc = newIndexWriterConfig(); IndexWriterConfig iwc = newIndexWriterConfig();
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool); TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, threadPool);
@ -251,7 +248,7 @@ public class InternalEngineTests extends ESTestCase {
public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) {
// we don't need to notify anybody in this test // we don't need to notify anybody in this test
} }
}, new TranslogHandler(shardId.index().getName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(new HashSet<>(Arrays.asList(wrappers))), translogConfig); }, new TranslogHandler(shardId.index().getName(), logger), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
try { try {
config.setCreate(Lucene.indexExists(store.directory()) == false); config.setCreate(Lucene.indexExists(store.directory()) == false);
} catch (IOException e) { } catch (IOException e) {
@ -491,8 +488,7 @@ public class InternalEngineTests extends ESTestCase {
assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY)); assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_GENERATION_KEY));
assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY)); assertThat(stats2.getUserData(), hasKey(Translog.TRANSLOG_UUID_KEY));
assertThat(stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); assertThat(stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY))));
assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY))) assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY)));
;
} }
@Test @Test
@ -514,8 +510,11 @@ public class InternalEngineTests extends ESTestCase {
}; };
Store store = createStore(); Store store = createStore();
Path translog = createTempDir("translog-test"); Path translog = createTempDir("translog-test");
InternalEngine engine = createEngine(store, translog, wrapper); InternalEngine engine = createEngine(store, translog);
Engine.Searcher searcher = engine.acquireSearcher("test"); engine.close();
engine = new InternalEngine(engine.config(), false);
Engine.Searcher searcher = wrapper.wrap(engine.config(), engine.acquireSearcher("test"));
assertThat(counter.get(), equalTo(2)); assertThat(counter.get(), equalTo(2));
searcher.close(); searcher.close();
IOUtils.close(store, engine); IOUtils.close(store, engine);
@ -1951,7 +1950,7 @@ public class InternalEngineTests extends ESTestCase {
EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettings() EngineConfig brokenConfig = new EngineConfig(shardId, threadPool, config.getIndexingService(), config.getIndexSettings()
, null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getMergeSchedulerConfig(), , null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getMergeSchedulerConfig(),
config.getAnalyzer(), config.getSimilarity(), new CodecService(shardId.index()), config.getFailedEngineListener() config.getAnalyzer(), config.getSimilarity(), new CodecService(shardId.index()), config.getFailedEngineListener()
, config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(), translogConfig); , config.getTranslogRecoveryPerformer(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
try { try {
new InternalEngine(brokenConfig, false); new InternalEngine(brokenConfig, false);

View File

@ -216,7 +216,7 @@ public class ShadowEngineTests extends ESTestCase {
@Override @Override
public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) {
// we don't need to notify anybody in this test // we don't need to notify anybody in this test
}}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), new IndexSearcherWrappingService(), translogConfig); }}, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig);
try { try {
config.setCreate(Lucene.indexExists(store.directory()) == false); config.setCreate(Lucene.indexExists(store.directory()) == false);
} catch (IOException e) { } catch (IOException e) {

View File

@ -102,7 +102,7 @@ public abstract class AbstractFieldDataTestCase extends ESSingleNodeTestCase {
Settings settings = Settings.builder().put("index.fielddata.cache", "none").build(); Settings settings = Settings.builder().put("index.fielddata.cache", "none").build();
indexService = createIndex("test", settings); indexService = createIndex("test", settings);
mapperService = indexService.mapperService(); mapperService = indexService.mapperService();
indicesFieldDataCache = indexService.injector().getInstance(IndicesFieldDataCache.class); indicesFieldDataCache = getInstanceFromNode(IndicesFieldDataCache.class);
ifdService = indexService.fieldData(); ifdService = indexService.fieldData();
// LogByteSizeMP to preserve doc ID order // LogByteSizeMP to preserve doc ID order
writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy())); writer = new IndexWriter(new RAMDirectory(), new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));

View File

@ -433,7 +433,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase {
client().prepareIndex(index, "type").setSource("foo", "bar").get(); client().prepareIndex(index, "type").setSource("foo", "bar").get();
client().admin().indices().prepareRefresh(index).get(); client().admin().indices().prepareRefresh(index).get();
Query query = indexService.mapperService().documentMapper("type").allFieldMapper().fieldType().termQuery("bar", null); Query query = indexService.mapperService().documentMapper("type").allFieldMapper().fieldType().termQuery("bar", null);
try (Searcher searcher = indexService.shard(0).acquireSearcher("tests")) { try (Searcher searcher = indexService.getShardOrNull(0).acquireSearcher("tests")) {
query = searcher.searcher().rewrite(query); query = searcher.searcher().rewrite(query);
final Class<?> expected = boost ? AllTermQuery.class : TermQuery.class; final Class<?> expected = boost ? AllTermQuery.class : TermQuery.class;
assertThat(query, Matchers.instanceOf(expected)); assertThat(query, Matchers.instanceOf(expected));

View File

@ -71,7 +71,7 @@ public class MultiMatchQueryTests extends ESSingleNodeTestCase {
QueryShardContext queryShardContext = new QueryShardContext(new Index("test"), queryParser); QueryShardContext queryShardContext = new QueryShardContext(new Index("test"), queryParser);
queryShardContext.setAllowUnmappedFields(true); queryShardContext.setAllowUnmappedFields(true);
Query parsedQuery = multiMatchQuery("banon").field("name.first", 2).field("name.last", 3).field("foobar").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext); Query parsedQuery = multiMatchQuery("banon").field("name.first", 2).field("name.last", 3).field("foobar").type(MultiMatchQueryBuilder.Type.CROSS_FIELDS).toQuery(queryShardContext);
try (Engine.Searcher searcher = indexService.shardSafe(0).acquireSearcher("test")) { try (Engine.Searcher searcher = indexService.getShard(0).acquireSearcher("test")) {
Query rewrittenQuery = searcher.searcher().rewrite(parsedQuery); Query rewrittenQuery = searcher.searcher().rewrite(parsedQuery);
BooleanQuery.Builder expected = new BooleanQuery.Builder(); BooleanQuery.Builder expected = new BooleanQuery.Builder();

View File

@ -0,0 +1,31 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.elasticsearch.index.engine.Engine;
/**
* Test utility to access the engine of a shard
*/
public final class EngineAccess {
public static Engine engine(IndexShard shard) {
return shard.getEngine();
}
}

View File

@ -20,9 +20,8 @@ package org.elasticsearch.index.shard;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexCommit; import org.apache.lucene.search.*;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
@ -58,13 +57,17 @@ import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLock;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexServicesProvider;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.indexing.IndexingOperationListener; import org.elasticsearch.index.indexing.IndexingOperationListener;
import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
@ -112,7 +115,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
assertEquals(initValue, shard.isFlushOnClose()); assertEquals(initValue, shard.isFlushOnClose());
final boolean newValue = !initValue; final boolean newValue = !initValue;
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, newValue).build())); assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, newValue).build()));
@ -183,7 +186,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId)); ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
assertEquals(getShardStateMetadata(shard), shardStateMetaData); assertEquals(getShardStateMetadata(shard), shardStateMetaData);
ShardRouting routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1); ShardRouting routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
@ -232,7 +235,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
try { try {
shard.deleteShardState(); shard.deleteShardState();
fail("shard is active metadata delete must fail"); fail("shard is active metadata delete must fail");
@ -259,7 +262,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class); NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
// fail shard // fail shard
shard.failShard("test shard fail", new CorruptIndexException("", "")); shard.failShard("test shard fail", new CorruptIndexException("", ""));
// check state file still exists // check state file still exists
@ -304,7 +307,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test"); IndexService indexService = indicesService.indexServiceSafe("test");
IndexShard indexShard = indexService.shard(0); IndexShard indexShard = indexService.getShardOrNull(0);
client().admin().indices().prepareDelete("test").get(); client().admin().indices().prepareDelete("test").get();
assertThat(indexShard.getOperationsCount(), equalTo(0)); assertThat(indexShard.getOperationsCount(), equalTo(0));
try { try {
@ -321,7 +324,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test"); IndexService indexService = indicesService.indexServiceSafe("test");
IndexShard indexShard = indexService.shard(0); IndexShard indexShard = indexService.getShardOrNull(0);
assertEquals(0, indexShard.getOperationsCount()); assertEquals(0, indexShard.getOperationsCount());
indexShard.incrementOperationCounter(); indexShard.incrementOperationCounter();
assertEquals(1, indexShard.getOperationsCount()); assertEquals(1, indexShard.getOperationsCount());
@ -339,7 +342,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
client().prepareIndex("test", "test").setSource("{}").get(); client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
indicesService.indexService("test").shard(0).markAsInactive(); indicesService.indexService("test").getShardOrNull(0).markAsInactive();
assertBusy(new Runnable() { // should be very very quick assertBusy(new Runnable() { // should be very very quick
@Override @Override
public void run() { public void run() {
@ -366,31 +369,31 @@ public class IndexShardTests extends ESSingleNodeTestCase {
client().prepareIndex("test", "bar", "1").setSource("{}").get(); client().prepareIndex("test", "bar", "1").setSource("{}").get();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
setDurability(shard, Translog.Durabilty.REQUEST); setDurability(shard, Translog.Durabilty.REQUEST);
assertFalse(shard.engine().getTranslog().syncNeeded()); assertFalse(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durabilty.ASYNC); setDurability(shard, Translog.Durabilty.ASYNC);
client().prepareIndex("test", "bar", "2").setSource("{}").get(); client().prepareIndex("test", "bar", "2").setSource("{}").get();
assertTrue(shard.engine().getTranslog().syncNeeded()); assertTrue(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durabilty.REQUEST); setDurability(shard, Translog.Durabilty.REQUEST);
client().prepareDelete("test", "bar", "1").get(); client().prepareDelete("test", "bar", "1").get();
assertFalse(shard.engine().getTranslog().syncNeeded()); assertFalse(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durabilty.ASYNC); setDurability(shard, Translog.Durabilty.ASYNC);
client().prepareDelete("test", "bar", "2").get(); client().prepareDelete("test", "bar", "2").get();
assertTrue(shard.engine().getTranslog().syncNeeded()); assertTrue(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durabilty.REQUEST); setDurability(shard, Translog.Durabilty.REQUEST);
assertNoFailures(client().prepareBulk() assertNoFailures(client().prepareBulk()
.add(client().prepareIndex("test", "bar", "3").setSource("{}")) .add(client().prepareIndex("test", "bar", "3").setSource("{}"))
.add(client().prepareDelete("test", "bar", "1")).get()); .add(client().prepareDelete("test", "bar", "1")).get());
assertFalse(shard.engine().getTranslog().syncNeeded()); assertFalse(shard.getEngine().getTranslog().syncNeeded());
setDurability(shard, Translog.Durabilty.ASYNC); setDurability(shard, Translog.Durabilty.ASYNC);
assertNoFailures(client().prepareBulk() assertNoFailures(client().prepareBulk()
.add(client().prepareIndex("test", "bar", "4").setSource("{}")) .add(client().prepareIndex("test", "bar", "4").setSource("{}"))
.add(client().prepareDelete("test", "bar", "3")).get()); .add(client().prepareDelete("test", "bar", "3")).get());
setDurability(shard, Translog.Durabilty.REQUEST); setDurability(shard, Translog.Durabilty.REQUEST);
assertTrue(shard.engine().getTranslog().syncNeeded()); assertTrue(shard.getEngine().getTranslog().syncNeeded());
} }
private void setDurability(IndexShard shard, Translog.Durabilty durabilty) { private void setDurability(IndexShard shard, Translog.Durabilty durabilty) {
@ -407,12 +410,12 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
int numDocs = 1; int numDocs = 1;
shard.state = IndexShardState.RECOVERING; shard.state = IndexShardState.RECOVERING;
try { try {
shard.recoveryState().getTranslog().totalOperations(1); shard.recoveryState().getTranslog().totalOperations(1);
shard.engine().config().getTranslogRecoveryPerformer().performRecoveryOperation(shard.engine(), new Translog.DeleteByQuery(new Engine.DeleteByQuery(null, new BytesArray("{\"term\" : { \"user\" : \"kimchy\" }}"), null, null, null, Engine.Operation.Origin.RECOVERY, 0, "person")), false); shard.getEngine().config().getTranslogRecoveryPerformer().performRecoveryOperation(shard.getEngine(), new Translog.DeleteByQuery(new Engine.DeleteByQuery(null, new BytesArray("{\"term\" : { \"user\" : \"kimchy\" }}"), null, null, null, Engine.Operation.Origin.RECOVERY, 0, "person")), false);
assertTrue(version.onOrBefore(Version.V_1_0_0_Beta2)); assertTrue(version.onOrBefore(Version.V_1_0_0_Beta2));
numDocs = 0; numDocs = 0;
} catch (ParsingException ex) { } catch (ParsingException ex) {
@ -420,9 +423,9 @@ public class IndexShardTests extends ESSingleNodeTestCase {
} finally { } finally {
shard.state = IndexShardState.STARTED; shard.state = IndexShardState.STARTED;
} }
shard.engine().refresh("foo"); shard.getEngine().refresh("foo");
try (Engine.Searcher searcher = shard.engine().acquireSearcher("foo")) { try (Engine.Searcher searcher = shard.getEngine().acquireSearcher("foo")) {
assertEquals(numDocs, searcher.reader().numDocs()); assertEquals(numDocs, searcher.reader().numDocs());
} }
} }
@ -434,11 +437,11 @@ public class IndexShardTests extends ESSingleNodeTestCase {
client().prepareIndex("test", "test").setSource("{}").get(); client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test"); ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexShard test = indicesService.indexService("test").shard(0); IndexShard test = indicesService.indexService("test").getShardOrNull(0);
assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
client().prepareIndex("test", "test").setSource("{}").get(); client().prepareIndex("test", "test").setSource("{}").get();
assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion()); assertEquals(versionCreated.luceneVersion, test.minimumCompatibleVersion());
test.engine().flush(); test.getEngine().flush();
assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion()); assertEquals(Version.CURRENT.luceneVersion, test.minimumCompatibleVersion());
} }
@ -460,7 +463,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
assertHitCount(response, 1l); assertHitCount(response, 1l);
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
ShardPath shardPath = shard.shardPath(); ShardPath shardPath = shard.shardPath();
Path dataPath = shardPath.getDataPath(); Path dataPath = shardPath.getDataPath();
client().admin().indices().prepareClose("test").get(); client().admin().indices().prepareClose("test").get();
@ -580,7 +583,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(shard, new CommonStatsFlags()), shard.commitStats()); ShardStats stats = new ShardStats(shard.routingEntry(), shard.shardPath(), new CommonStats(shard, new CommonStatsFlags()), shard.commitStats());
assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath()); assertEquals(shard.shardPath().getRootDataPath().toString(), stats.getDataPath());
assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath()); assertEquals(shard.shardPath().getRootStatePath().toString(), stats.getStatePath());
@ -619,7 +622,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("testpreindex"); IndexService test = indicesService.indexService("testpreindex");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
ShardIndexingService shardIndexingService = shard.indexingService(); ShardIndexingService shardIndexingService = shard.indexingService();
final AtomicBoolean preIndexCalled = new AtomicBoolean(false); final AtomicBoolean preIndexCalled = new AtomicBoolean(false);
@ -642,7 +645,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("testpostindex"); IndexService test = indicesService.indexService("testpostindex");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
ShardIndexingService shardIndexingService = shard.indexingService(); ShardIndexingService shardIndexingService = shard.indexingService();
final AtomicBoolean postIndexCalled = new AtomicBoolean(false); final AtomicBoolean postIndexCalled = new AtomicBoolean(false);
@ -665,7 +668,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("testpostindexwithexception"); IndexService test = indicesService.indexService("testpostindexwithexception");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
ShardIndexingService shardIndexingService = shard.indexingService(); ShardIndexingService shardIndexingService = shard.indexingService();
shard.close("Unexpected close", true); shard.close("Unexpected close", true);
@ -700,7 +703,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, 1).build()).get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, 1).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
@ -709,25 +712,25 @@ public class IndexShardTests extends ESSingleNodeTestCase {
Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc); Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc);
shard.index(index); shard.index(index);
assertTrue(shard.shouldFlush()); assertTrue(shard.shouldFlush());
assertEquals(2, shard.engine().getTranslog().totalOperations()); assertEquals(2, shard.getEngine().getTranslog().totalOperations());
client().prepareIndex("test", "test", "2").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "2").setSource("{}").setRefresh(randomBoolean()).get();
assertBusy(() -> { // this is async assertBusy(() -> { // this is async
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
}); });
assertEquals(0, shard.engine().getTranslog().totalOperations()); assertEquals(0, shard.getEngine().getTranslog().totalOperations());
shard.engine().getTranslog().sync(); shard.getEngine().getTranslog().sync();
long size = shard.engine().getTranslog().sizeInBytes(); long size = shard.getEngine().getTranslog().sizeInBytes();
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.engine().getTranslog().sizeInBytes(), shard.engine().getTranslog().totalOperations(), shard.engine().getTranslog().getGeneration()); logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, 1000) client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, 1000)
.put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(size, ByteSizeUnit.BYTES)) .put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(size, ByteSizeUnit.BYTES))
.build()).get(); .build()).get();
client().prepareDelete("test", "test", "2").get(); client().prepareDelete("test", "test", "2").get();
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.engine().getTranslog().sizeInBytes(), shard.engine().getTranslog().totalOperations(), shard.engine().getTranslog().getGeneration()); logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertBusy(() -> { // this is async assertBusy(() -> { // this is async
logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.engine().getTranslog().sizeInBytes(), shard.engine().getTranslog().totalOperations(), shard.engine().getTranslog().getGeneration()); logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(), shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
}); });
assertEquals(0, shard.engine().getTranslog().totalOperations()); assertEquals(0, shard.getEngine().getTranslog().totalOperations());
} }
public void testStressMaybeFlush() throws Exception { public void testStressMaybeFlush() throws Exception {
@ -735,7 +738,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
final IndexShard shard = test.shard(0); final IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush()); assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, 1).build()).get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_TRANSLOG_FLUSH_THRESHOLD_OPS, 1).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
@ -778,7 +781,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
final IndexShard shard = test.shard(0); final IndexShard shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
if (randomBoolean()) { if (randomBoolean()) {
@ -804,7 +807,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
final IndexShard shard = test.shard(0); final IndexShard shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
if (randomBoolean()) { if (randomBoolean()) {
@ -852,14 +855,14 @@ public class IndexShardTests extends ESSingleNodeTestCase {
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService("test"); IndexService test = indicesService.indexService("test");
IndexService test_target = indicesService.indexService("test_target"); IndexService test_target = indicesService.indexService("test_target");
final IndexShard test_shard = test.shard(0); final IndexShard test_shard = test.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get(); client().prepareIndex("test", "test", "0").setSource("{}").setRefresh(randomBoolean()).get();
client().prepareIndex("test_target", "test", "1").setSource("{}").setRefresh(true).get(); client().prepareIndex("test_target", "test", "1").setSource("{}").setRefresh(true).get();
assertHitCount(client().prepareSearch("test_target").get(), 1); assertHitCount(client().prepareSearch("test_target").get(), 1);
assertSearchHits(client().prepareSearch("test_target").get(), "1"); assertSearchHits(client().prepareSearch("test_target").get(), "1");
client().admin().indices().prepareFlush("test").get(); // only flush test client().admin().indices().prepareFlush("test").get(); // only flush test
final ShardRouting origRouting = test_target.shard(0).routingEntry(); final ShardRouting origRouting = test_target.getShardOrNull(0).routingEntry();
ShardRouting routing = new ShardRouting(origRouting); ShardRouting routing = new ShardRouting(origRouting);
ShardRoutingHelper.reinit(routing); ShardRoutingHelper.reinit(routing);
routing = ShardRoutingHelper.newWithRestoreSource(routing, new RestoreSource(new SnapshotId("foo", "bar"), Version.CURRENT, "test")); routing = ShardRoutingHelper.newWithRestoreSource(routing, new RestoreSource(new SnapshotId("foo", "bar"), Version.CURRENT, "test"));
@ -912,10 +915,100 @@ public class IndexShardTests extends ESSingleNodeTestCase {
ensureGreen(); ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService("test"); IndexService indexService = indicesService.indexService("test");
IndexShard shard = indexService.shard(0); IndexShard shard = indexService.getShardOrNull(0);
IndexSettingsService settingsService = indexService.settingsService(); IndexSettingsService settingsService = indexService.settingsService();
assertTrue(settingsService.isRegistered(shard)); assertTrue(settingsService.isRegistered(shard));
indexService.removeShard(0, "simon says so"); indexService.removeShard(0, "simon says so");
assertFalse(settingsService.isRegistered(shard)); assertFalse(settingsService.isRegistered(shard));
} }
public void testSearcherWrapperIsUsed() throws IOException {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService("test");
IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").setRefresh(randomBoolean()).get();
client().prepareIndex("test", "test", "1").setSource("{\"foobar\" : \"bar\"}").setRefresh(true).get();
Engine.GetResult getResult = shard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1"))));
assertTrue(getResult.exists());
assertNotNull(getResult.searcher());
getResult.release();
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10);
assertEquals(search.totalHits, 1);
search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10);
assertEquals(search.totalHits, 1);
}
ShardRouting routing = new ShardRouting(shard.routingEntry());
shard.close("simon says", true);
IndexServicesProvider indexServices = indexService.getIndexServices();
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {
@Override
public DirectoryReader wrap(DirectoryReader reader) throws IOException {
return new FieldMaskingReader("foo", reader);
}
@Override
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
return searcher;
}
};
IndexServicesProvider newProvider = new IndexServicesProvider(indexServices.getIndicesLifecycle(), indexServices.getThreadPool(), indexServices.getMapperService(), indexServices.getQueryParserService(), indexServices.getIndexCache(), indexServices.getIndexAliasesService(), indexServices.getIndicesQueryCache(), indexServices.getCodecService(), indexServices.getTermVectorsService(), indexServices.getIndexFieldDataService(), indexServices.getWarmer(), indexServices.getSimilarityService(), indexServices.getFactory(), indexServices.getBigArrays(), wrapper);
IndexShard newShard = new IndexShard(shard.shardId(), shard.indexSettings, shard.shardPath(), shard.store(), newProvider);
ShardRoutingHelper.reinit(routing);
newShard.updateRoutingEntry(routing, false);
DiscoveryNode localNode = new DiscoveryNode("foo", DummyTransportAddress.INSTANCE, Version.CURRENT);
assertTrue(newShard.recoverFromStore(routing, localNode));
routing = new ShardRouting(routing);
ShardRoutingHelper.moveToStarted(routing);
newShard.updateRoutingEntry(routing, true);
try (Engine.Searcher searcher = newShard.acquireSearcher("test")) {
TopDocs search = searcher.searcher().search(new TermQuery(new Term("foo", "bar")), 10);
assertEquals(search.totalHits, 0);
search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10);
assertEquals(search.totalHits, 1);
}
getResult = newShard.get(new Engine.Get(false, new Term(UidFieldMapper.NAME, Uid.createUid("test", "1"))));
assertTrue(getResult.exists());
assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader
assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader);
getResult.release();
newShard.close("just do it", randomBoolean());
}
private static class FieldMaskingReader extends FilterDirectoryReader {
private final String field;
public FieldMaskingReader(String field, DirectoryReader in) throws IOException {
super(in, new SubReaderWrapper() {
private final String filteredField = field;
@Override
public LeafReader wrap(LeafReader reader) {
return new FilterLeafReader(reader) {
@Override
public Fields fields() throws IOException {
return new FilterFields(super.fields()) {
@Override
public Terms terms(String field) throws IOException {
return filteredField.equals(field) ? null : super.terms(field);
}
};
}
};
}
});
this.field = field;
}
@Override
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
return new FieldMaskingReader(field, in);
}
}
} }

View File

@ -137,8 +137,8 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
IndexService test = createIndex("test"); IndexService test = createIndex("test");
assertTrue(test.hasShard(0)); assertTrue(test.hasShard(0));
ShardPath path = test.shard(0).shardPath(); ShardPath path = test.getShardOrNull(0).shardPath();
assertTrue(test.shard(0).routingEntry().started()); assertTrue(test.getShardOrNull(0).routingEntry().started());
ShardPath shardPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings()); ShardPath shardPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings());
assertEquals(shardPath, path); assertEquals(shardPath, path);
try { try {

View File

@ -42,7 +42,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}").get(); client().prepareIndex("test", "test", "1").setSource("{}").get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId(); final ShardId shardId = shard.shardId();
@ -86,7 +86,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}").get(); client().prepareIndex("test", "test", "1").setSource("{}").get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId(); final ShardId shardId = shard.shardId();
@ -106,7 +106,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}").get(); client().prepareIndex("test", "test", "1").setSource("{}").get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId(); final ShardId shardId = shard.shardId();
@ -129,7 +129,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException {
createIndex("test"); createIndex("test");
IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener(); SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener();
@ -162,7 +162,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}").get(); client().prepareIndex("test", "test", "1").setSource("{}").get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId(); final ShardId shardId = shard.shardId();
@ -195,7 +195,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase {
createIndex("test"); createIndex("test");
client().prepareIndex("test", "test", "1").setSource("{}").get(); client().prepareIndex("test", "test", "1").setSource("{}").get();
IndexService test = getInstanceFromNode(IndicesService.class).indexService("test"); IndexService test = getInstanceFromNode(IndicesService.class).indexService("test");
IndexShard shard = test.shard(0); IndexShard shard = test.getShardOrNull(0);
SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class);
final ShardId shardId = shard.shardId(); final ShardId shardId = shard.shardId();

View File

@ -1,131 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.indices.leaks;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.junit.Test;
import java.lang.ref.WeakReference;
import java.util.ArrayList;
import java.util.List;
import static org.elasticsearch.test.ESIntegTestCase.Scope;
import static org.hamcrest.Matchers.nullValue;
/**
*/
@ClusterScope(scope= Scope.TEST, numDataNodes =1)
public class IndicesLeaksIT extends ESIntegTestCase {
@SuppressWarnings({"ConstantConditions", "unchecked"})
@Test
@BadApple(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/3232")
public void testIndexShardLifecycleLeak() throws Exception {
client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
.execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class);
IndexService indexService = indicesService.indexServiceSafe("test");
Injector indexInjector = indexService.injector();
IndexShard shard = indexService.shardSafe(0);
Injector shardInjector = indexService.shardInjectorSafe(0);
performCommonOperations();
List<WeakReference> indexReferences = new ArrayList<>();
List<WeakReference> shardReferences = new ArrayList<>();
// TODO if we could iterate over the already created classes on the injector, we can just add them here to the list
// for now, we simple add some classes that make sense
// add index references
indexReferences.add(new WeakReference(indexService));
indexReferences.add(new WeakReference(indexInjector));
indexReferences.add(new WeakReference(indexService.mapperService()));
for (DocumentMapper documentMapper : indexService.mapperService().docMappers(true)) {
indexReferences.add(new WeakReference(documentMapper));
}
indexReferences.add(new WeakReference(indexService.aliasesService()));
indexReferences.add(new WeakReference(indexService.analysisService()));
indexReferences.add(new WeakReference(indexService.fieldData()));
indexReferences.add(new WeakReference(indexService.queryParserService()));
// add shard references
shardReferences.add(new WeakReference(shard));
shardReferences.add(new WeakReference(shardInjector));
indexService = null;
indexInjector = null;
shard = null;
shardInjector = null;
cluster().wipeIndices("test");
for (int i = 0; i < 100; i++) {
System.gc();
int indexNotCleared = 0;
for (WeakReference indexReference : indexReferences) {
if (indexReference.get() != null) {
indexNotCleared++;
}
}
int shardNotCleared = 0;
for (WeakReference shardReference : shardReferences) {
if (shardReference.get() != null) {
shardNotCleared++;
}
}
logger.info("round {}, indices {}/{}, shards {}/{}", i, indexNotCleared, indexReferences.size(), shardNotCleared, shardReferences.size());
if (indexNotCleared == 0 && shardNotCleared == 0) {
break;
}
}
//System.out.println("sleeping");Thread.sleep(1000000);
for (WeakReference indexReference : indexReferences) {
assertThat("dangling index reference: " + indexReference.get(), indexReference.get(), nullValue());
}
for (WeakReference shardReference : shardReferences) {
assertThat("dangling shard reference: " + shardReference.get(), shardReference.get(), nullValue());
}
}
private void performCommonOperations() {
client().prepareIndex("test", "type", "1").setSource("field1", "value", "field2", 2, "field3", 3.0f).execute().actionGet();
client().admin().indices().prepareRefresh().execute().actionGet();
client().prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("field1:value")).execute().actionGet();
client().prepareSearch("test").setQuery(QueryBuilders.termQuery("field1", "value")).execute().actionGet();
}
}

View File

@ -270,10 +270,10 @@ public class IndexRecoveryIT extends ESIntegTestCase {
@Override @Override
public void run() { public void run() {
IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, nodeA);
assertThat(indicesService.indexServiceSafe(INDEX_NAME).shardSafe(0).recoveryStats().currentAsSource(), assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsSource(),
equalTo(1)); equalTo(1));
indicesService = internalCluster().getInstance(IndicesService.class, nodeB); indicesService = internalCluster().getInstance(IndicesService.class, nodeB);
assertThat(indicesService.indexServiceSafe(INDEX_NAME).shardSafe(0).recoveryStats().currentAsTarget(), assertThat(indicesService.indexServiceSafe(INDEX_NAME).getShard(0).recoveryStats().currentAsTarget(),
equalTo(1)); equalTo(1));
} }
}); });

View File

@ -39,7 +39,7 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase {
public void testRenameTempFiles() throws IOException { public void testRenameTempFiles() throws IOException {
IndexService service = createIndex("foo"); IndexService service = createIndex("foo");
IndexShard indexShard = service.shard(0); IndexShard indexShard = service.getShardOrNull(0);
DiscoveryNode node = new DiscoveryNode("foo", new LocalTransportAddress("bar"), Version.CURRENT); DiscoveryNode node = new DiscoveryNode("foo", new LocalTransportAddress("bar"), Version.CURRENT);
RecoveryStatus status = new RecoveryStatus(indexShard, node, new RecoveryTarget.RecoveryListener() { RecoveryStatus status = new RecoveryStatus(indexShard, node, new RecoveryTarget.RecoveryListener() {
@Override @Override

View File

@ -171,7 +171,7 @@ public class RecoveriesCollectionTests extends ESSingleNodeTestCase {
long startRecovery(RecoveriesCollection collection, RecoveryTarget.RecoveryListener listener, TimeValue timeValue) { long startRecovery(RecoveriesCollection collection, RecoveryTarget.RecoveryListener listener, TimeValue timeValue) {
IndicesService indexServices = getInstanceFromNode(IndicesService.class); IndicesService indexServices = getInstanceFromNode(IndicesService.class);
IndexShard indexShard = indexServices.indexServiceSafe("test").shard(0); IndexShard indexShard = indexServices.indexServiceSafe("test").getShardOrNull(0);
final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT); final DiscoveryNode sourceNode = new DiscoveryNode("id", DummyTransportAddress.INSTANCE, Version.CURRENT);
return collection.startRecovery(indexShard, sourceNode, listener, timeValue); return collection.startRecovery(indexShard, sourceNode, listener, timeValue);
} }

View File

@ -41,6 +41,7 @@ import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.Node; import org.elasticsearch.node.Node;
import org.elasticsearch.node.NodeBuilder; import org.elasticsearch.node.NodeBuilder;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.junit.After; import org.junit.After;
@ -215,18 +216,15 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
return instanceFromNode.indexServiceSafe(index); return instanceFromNode.indexServiceSafe(index);
} }
protected static org.elasticsearch.index.engine.Engine engine(IndexService service) {
return service.shard(0).engine();
}
/** /**
* Create a new search context. * Create a new search context.
*/ */
protected static SearchContext createSearchContext(IndexService indexService) { protected static SearchContext createSearchContext(IndexService indexService) {
BigArrays bigArrays = indexService.injector().getInstance(BigArrays.class); BigArrays bigArrays = indexService.getIndexServices().getBigArrays();
ThreadPool threadPool = indexService.injector().getInstance(ThreadPool.class); ThreadPool threadPool = indexService.getIndexServices().getThreadPool();
PageCacheRecycler pageCacheRecycler = indexService.injector().getInstance(PageCacheRecycler.class); PageCacheRecycler pageCacheRecycler = node().injector().getInstance(PageCacheRecycler.class);
return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, indexService); ScriptService scriptService = node().injector().getInstance(ScriptService.class);
return new TestSearchContext(threadPool, pageCacheRecycler, bigArrays, scriptService, indexService);
} }
/** /**

View File

@ -68,7 +68,7 @@ import org.elasticsearch.index.engine.CommitStats;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineClosedException; import org.elasticsearch.index.engine.EngineClosedException;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.MockEngineFactoryPlugin; import org.elasticsearch.index.MockEngineFactoryPlugin;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
@ -1047,8 +1047,8 @@ public final class InternalTestCluster extends TestCluster {
IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name); IndicesService indexServices = getInstance(IndicesService.class, nodeAndClient.name);
for (IndexService indexService : indexServices) { for (IndexService indexService : indexServices) {
for (IndexShard indexShard : indexService) { for (IndexShard indexShard : indexService) {
try { CommitStats commitStats = indexShard.commitStats();
CommitStats commitStats = indexShard.engine().commitStats(); if (commitStats != null) { // null if the engine is closed or if the shard is recovering
String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID); String syncId = commitStats.getUserData().get(Engine.SYNC_COMMIT_ID);
if (syncId != null) { if (syncId != null) {
long liveDocsOnShard = commitStats.getNumDocs(); long liveDocsOnShard = commitStats.getNumDocs();
@ -1058,8 +1058,6 @@ public final class InternalTestCluster extends TestCluster {
docsOnShards.put(syncId, liveDocsOnShard); docsOnShards.put(syncId, liveDocsOnShard);
} }
} }
} catch (EngineClosedException e) {
// nothing to do, shard is closed
} }
} }
} }
@ -1741,7 +1739,7 @@ public final class InternalTestCluster extends TestCluster {
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService != null) { if (indexService != null) {
assertThat(indexService.settingsService().getSettings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), greaterThan(shard)); assertThat(indexService.settingsService().getSettings().getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1), greaterThan(shard));
OperationRouting operationRouting = indexService.injector().getInstance(OperationRouting.class); OperationRouting operationRouting = getInstanceFromNode(OperationRouting.class, node);
while (true) { while (true) {
String routing = RandomStrings.randomAsciiOfLength(random, 10); String routing = RandomStrings.randomAsciiOfLength(random, 10);
final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId(); final int targetShard = operationRouting.indexShards(clusterService.state(), index, type, null, routing).shardId().getId();

View File

@ -85,6 +85,7 @@ public class TestSearchContext extends SearchContext {
final IndexShard indexShard; final IndexShard indexShard;
final Counter timeEstimateCounter = Counter.newCounter(); final Counter timeEstimateCounter = Counter.newCounter();
final QuerySearchResult queryResult = new QuerySearchResult(); final QuerySearchResult queryResult = new QuerySearchResult();
ScriptService scriptService;
ParsedQuery originalQuery; ParsedQuery originalQuery;
ParsedQuery postFilter; ParsedQuery postFilter;
Query query; Query query;
@ -99,7 +100,7 @@ public class TestSearchContext extends SearchContext {
private final long originNanoTime = System.nanoTime(); private final long originNanoTime = System.nanoTime();
private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>(); private final Map<String, FetchSubPhaseContext> subPhaseContexts = new HashMap<>();
public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, IndexService indexService) { public TestSearchContext(ThreadPool threadPool,PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ScriptService scriptService, IndexService indexService) {
super(ParseFieldMatcher.STRICT, null); super(ParseFieldMatcher.STRICT, null);
this.pageCacheRecycler = pageCacheRecycler; this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays.withCircuitBreaking(); this.bigArrays = bigArrays.withCircuitBreaking();
@ -107,7 +108,8 @@ public class TestSearchContext extends SearchContext {
this.indexFieldDataService = indexService.fieldData(); this.indexFieldDataService = indexService.fieldData();
this.fixedBitSetFilterCache = indexService.bitsetFilterCache(); this.fixedBitSetFilterCache = indexService.bitsetFilterCache();
this.threadPool = threadPool; this.threadPool = threadPool;
this.indexShard = indexService.shard(0); this.indexShard = indexService.getShardOrNull(0);
this.scriptService = scriptService;
} }
public TestSearchContext() { public TestSearchContext() {
@ -119,6 +121,7 @@ public class TestSearchContext extends SearchContext {
this.threadPool = null; this.threadPool = null;
this.fixedBitSetFilterCache = null; this.fixedBitSetFilterCache = null;
this.indexShard = null; this.indexShard = null;
scriptService = null;
} }
public void setTypes(String... types) { public void setTypes(String... types) {
@ -325,7 +328,7 @@ public class TestSearchContext extends SearchContext {
@Override @Override
public ScriptService scriptService() { public ScriptService scriptService() {
return indexService.injector().getInstance(ScriptService.class); return scriptService;
} }
@Override @Override

View File

@ -24,14 +24,19 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.IndexStoreModule;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
public class MockFSIndexStore extends IndexStore { public class MockFSIndexStore extends IndexStore {
private final IndicesService indicesService;
public static class TestPlugin extends Plugin { public static class TestPlugin extends Plugin {
@Override @Override
public String name() { public String name() {
@ -52,13 +57,13 @@ public class MockFSIndexStore extends IndexStore {
@Inject @Inject
public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService,
IndicesStore indicesStore) { IndicesStore indicesStore, IndicesService indicesService) {
super(index, indexSettings, indexSettingsService, indicesStore); super(index, indexSettings, indexSettingsService, indicesStore);
this.indicesService = indicesService;
} }
@Override public DirectoryService newDirectoryService(ShardPath path) {
public Class<? extends DirectoryService> shardDirectory() { return new MockFSDirectoryService(indexSettings, this, indicesService, path);
return MockFSDirectoryService.class;
} }
} }

View File

@ -66,24 +66,10 @@ public class JvmExamplePlugin extends Plugin {
} }
@Override @Override
public Collection<Module> indexModules(Settings indexSettings) { public Collection<Module> indexModules(Settings indexSettings) { return Collections.emptyList();}
return Collections.emptyList();
}
@Override @Override
public Collection<Class<? extends Closeable>> indexServices() { public Collection<Class<? extends Closeable>> indexServices() { return Collections.emptyList();}
return Collections.emptyList();
}
@Override
public Collection<Module> shardModules(Settings indexSettings) {
return Collections.emptyList();
}
@Override
public Collection<Class<? extends Closeable>> shardServices() {
return Collections.emptyList();
}
@Override @Override
public Settings additionalSettings() { public Settings additionalSettings() {

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
@ -37,7 +38,7 @@ public class SmbMmapFsIndexStore extends IndexStore {
} }
@Override @Override
public Class<? extends DirectoryService> shardDirectory() { public DirectoryService newDirectoryService(ShardPath path) {
return SmbMmapFsDirectoryService.class; return new SmbMmapFsDirectoryService(indexSettings(), this, path);
} }
} }

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
@ -36,9 +37,13 @@ public class SmbSimpleFsIndexStore extends IndexStore {
super(index, indexSettings, indexSettingsService, indicesStore); super(index, indexSettings, indexSettingsService, indicesStore);
} }
@Override
public Class<? extends DirectoryService> shardDirectory() { public Class<? extends DirectoryService> shardDirectory() {
return SmbSimpleFsDirectoryService.class; return SmbSimpleFsDirectoryService.class;
} }
@Override
public DirectoryService newDirectoryService(ShardPath path) {
return new SmbSimpleFsDirectoryService(indexSettings(), this, path);
}
} }