Merge branch 'master' into immutable_map_be_gone

This commit is contained in:
Nik Everett 2015-10-05 14:00:53 -04:00
commit ba68a8df63
152 changed files with 1828 additions and 1481 deletions

View File

@ -274,7 +274,7 @@
<include>org/elasticsearch/common/cli/CliToolTestCase$*.class</include> <include>org/elasticsearch/common/cli/CliToolTestCase$*.class</include>
<include>org/elasticsearch/cluster/MockInternalClusterInfoService.class</include> <include>org/elasticsearch/cluster/MockInternalClusterInfoService.class</include>
<include>org/elasticsearch/cluster/MockInternalClusterInfoService$*.class</include> <include>org/elasticsearch/cluster/MockInternalClusterInfoService$*.class</include>
<include>org/elasticsearch/index/shard/MockEngineFactoryPlugin.class</include> <include>org/elasticsearch/index/MockEngineFactoryPlugin.class</include>
<include>org/elasticsearch/search/MockSearchService.class</include> <include>org/elasticsearch/search/MockSearchService.class</include>
<include>org/elasticsearch/search/MockSearchService$*.class</include> <include>org/elasticsearch/search/MockSearchService$*.class</include>
<include>org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.class</include> <include>org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.class</include>

View File

@ -26,6 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.*; import org.apache.lucene.search.*;
import org.apache.lucene.util.automaton.RegExp; import org.apache.lucene.util.automaton.RegExp;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MappedFieldType;
@ -484,30 +485,31 @@ public class MapperQueryParser extends QueryParser {
if (!settings.analyzeWildcard()) { if (!settings.analyzeWildcard()) {
return super.getPrefixQuery(field, termStr); return super.getPrefixQuery(field, termStr);
} }
List<String> tlist;
// get Analyzer from superclass and tokenize the term // get Analyzer from superclass and tokenize the term
TokenStream source; TokenStream source = null;
try { try {
source = getAnalyzer().tokenStream(field, termStr);
source.reset();
} catch (IOException e) {
return super.getPrefixQuery(field, termStr);
}
List<String> tlist = new ArrayList<>();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
while (true) {
try { try {
if (!source.incrementToken()) break; source = getAnalyzer().tokenStream(field, termStr);
source.reset();
} catch (IOException e) { } catch (IOException e) {
break; return super.getPrefixQuery(field, termStr);
} }
tlist.add(termAtt.toString()); tlist = new ArrayList<>();
} CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
try { while (true) {
source.close(); try {
} catch (IOException e) { if (!source.incrementToken()) break;
// ignore } catch (IOException e) {
break;
}
tlist.add(termAtt.toString());
}
} finally {
if (source != null) {
IOUtils.closeWhileHandlingException(source);
}
} }
if (tlist.size() == 1) { if (tlist.size() == 1) {
@ -617,8 +619,7 @@ public class MapperQueryParser extends QueryParser {
char c = termStr.charAt(i); char c = termStr.charAt(i);
if (c == '?' || c == '*') { if (c == '?' || c == '*') {
if (isWithinToken) { if (isWithinToken) {
try { try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) {
TokenStream source = getAnalyzer().tokenStream(field, tmp.toString());
source.reset(); source.reset();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
if (source.incrementToken()) { if (source.incrementToken()) {
@ -633,7 +634,6 @@ public class MapperQueryParser extends QueryParser {
// no tokens, just use what we have now // no tokens, just use what we have now
aggStr.append(tmp); aggStr.append(tmp);
} }
source.close();
} catch (IOException e) { } catch (IOException e) {
aggStr.append(tmp); aggStr.append(tmp);
} }
@ -648,22 +648,22 @@ public class MapperQueryParser extends QueryParser {
} }
if (isWithinToken) { if (isWithinToken) {
try { try {
TokenStream source = getAnalyzer().tokenStream(field, tmp.toString()); try (TokenStream source = getAnalyzer().tokenStream(field, tmp.toString())) {
source.reset(); source.reset();
CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class); CharTermAttribute termAtt = source.addAttribute(CharTermAttribute.class);
if (source.incrementToken()) { if (source.incrementToken()) {
String term = termAtt.toString(); String term = termAtt.toString();
if (term.length() == 0) { if (term.length() == 0) {
// no tokens, just use what we have now
aggStr.append(tmp);
} else {
aggStr.append(term);
}
} else {
// no tokens, just use what we have now // no tokens, just use what we have now
aggStr.append(tmp); aggStr.append(tmp);
} else {
aggStr.append(term);
} }
} else {
// no tokens, just use what we have now
aggStr.append(tmp);
} }
source.close();
} catch (IOException e) { } catch (IOException e) {
aggStr.append(tmp); aggStr.append(tmp);
} }

View File

@ -959,11 +959,9 @@ public long ramBytesUsed() {
// TODO: is there a Reader from a CharSequence? // TODO: is there a Reader from a CharSequence?
// Turn tokenstream into automaton: // Turn tokenstream into automaton:
Automaton automaton = null; Automaton automaton = null;
TokenStream ts = queryAnalyzer.tokenStream("", key.toString());
try { try (TokenStream ts = queryAnalyzer.tokenStream("", key.toString())) {
automaton = getTokenStreamToAutomaton().toAutomaton(ts); automaton = getTokenStreamToAutomaton().toAutomaton(ts);
} finally {
IOUtils.closeWhileHandlingException(ts);
} }
automaton = replaceSep(automaton); automaton = replaceSep(automaton);

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats; import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.search.suggest.completion.CompletionStats;

View File

@ -217,12 +217,10 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
} }
List<AnalyzeResponse.AnalyzeToken> tokens = new ArrayList<>(); List<AnalyzeResponse.AnalyzeToken> tokens = new ArrayList<>();
TokenStream stream = null;
int lastPosition = -1; int lastPosition = -1;
int lastOffset = 0; int lastOffset = 0;
for (String text : request.text()) { for (String text : request.text()) {
try { try (TokenStream stream = analyzer.tokenStream(field, text)) {
stream = analyzer.tokenStream(field, text);
stream.reset(); stream.reset();
CharTermAttribute term = stream.addAttribute(CharTermAttribute.class); CharTermAttribute term = stream.addAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class); PositionIncrementAttribute posIncr = stream.addAttribute(PositionIncrementAttribute.class);
@ -243,11 +241,8 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
lastPosition += analyzer.getPositionIncrementGap(field); lastPosition += analyzer.getPositionIncrementGap(field);
lastOffset += analyzer.getOffsetGap(field); lastOffset += analyzer.getOffsetGap(field);
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("failed to analyze", e); throw new ElasticsearchException("failed to analyze", e);
} finally {
IOUtils.closeWhileHandlingException(stream);
} }
} }

View File

@ -83,7 +83,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) { protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
IndexService service = indicesService.indexService(shardRouting.getIndex()); IndexService service = indicesService.indexService(shardRouting.getIndex());
if (service != null) { if (service != null) {
IndexShard shard = service.shard(shardRouting.id()); IndexShard shard = service.getShardOrNull(shardRouting.id());
boolean clearedAtLeastOne = false; boolean clearedAtLeastOne = false;
if (request.queryCache()) { if (request.queryCache()) {
clearedAtLeastOne = true; clearedAtLeastOne = true;

View File

@ -62,7 +62,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
@Override @Override
protected Tuple<ActionWriteResponse, ShardFlushRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<ActionWriteResponse, ShardFlushRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
indexShard.flush(shardRequest.request.getRequest()); indexShard.flush(shardRequest.request.getRequest());
logger.trace("{} flush request executed on primary", indexShard.shardId()); logger.trace("{} flush request executed on primary", indexShard.shardId());
return new Tuple<>(new ActionWriteResponse(), shardRequest.request); return new Tuple<>(new ActionWriteResponse(), shardRequest.request);
@ -70,7 +70,7 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) { protected void shardOperationOnReplica(ShardId shardId, ShardFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
indexShard.flush(request.getRequest()); indexShard.flush(request.getRequest());
logger.trace("{} flush request executed on replica", indexShard.shardId()); logger.trace("{} flush request executed on replica", indexShard.shardId());
} }

View File

@ -75,7 +75,7 @@ public class TransportOptimizeAction extends TransportBroadcastByNodeAction<Opti
@Override @Override
protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) throws IOException { protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) throws IOException {
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).getShard(shardRouting.shardId().id());
indexShard.optimize(request); indexShard.optimize(request);
return EmptyResult.INSTANCE; return EmptyResult.INSTANCE;
} }

View File

@ -100,7 +100,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
@Override @Override
protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) { protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
return indexShard.recoveryState(); return indexShard.recoveryState();
} }

View File

@ -63,7 +63,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
@Override @Override
protected Tuple<ActionWriteResponse, ReplicationRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { protected Tuple<ActionWriteResponse, ReplicationRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable {
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on primary", indexShard.shardId()); logger.trace("{} refresh request executed on primary", indexShard.shardId());
return new Tuple<>(new ActionWriteResponse(), shardRequest.request); return new Tuple<>(new ActionWriteResponse(), shardRequest.request);
@ -71,7 +71,7 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Repl
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) { protected void shardOperationOnReplica(ShardId shardId, ReplicationRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
indexShard.refresh("api"); indexShard.refresh("api");
logger.trace("{} refresh request executed on replica", indexShard.shardId()); logger.trace("{} refresh request executed on replica", indexShard.shardId());
} }

View File

@ -94,7 +94,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
@Override @Override
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) { protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.id()); IndexShard indexShard = indexService.getShard(shardRouting.id());
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose())); return new ShardSegments(indexShard.routingEntry(), indexShard.segments(request.verbose()));
} }
} }

View File

@ -34,7 +34,7 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
@ -167,7 +167,7 @@ public class CommonStats implements Streamable, ToXContent {
segments = indexShard.segmentStats(); segments = indexShard.segmentStats();
break; break;
case Percolate: case Percolate:
percolate = indexShard.shardPercolateService().stats(); percolate = indexShard.percolateStats();
break; break;
case Translog: case Translog:
translog = indexShard.translogStats(); translog = indexShard.translogStats();

View File

@ -95,7 +95,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
@Override @Override
protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting shardRouting) { protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
// if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet
if (indexShard.routingEntry() == null) { if (indexShard.routingEntry() == null) {
throw new ShardNotFoundException(indexShard.shardId()); throw new ShardNotFoundException(indexShard.shardId());

View File

@ -96,8 +96,8 @@ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction
@Override @Override
protected ShardUpgradeStatus shardOperation(UpgradeStatusRequest request, ShardRouting shardRouting) { protected ShardUpgradeStatus shardOperation(UpgradeStatusRequest request, ShardRouting shardRouting) {
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id());
List<Segment> segments = indexShard.engine().segments(false); List<Segment> segments = indexShard.segments(false);
long total_bytes = 0; long total_bytes = 0;
long to_upgrade_bytes = 0; long to_upgrade_bytes = 0;
long to_upgrade_bytes_ancient = 0; long to_upgrade_bytes_ancient = 0;

View File

@ -119,7 +119,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
@Override @Override
protected ShardUpgradeResult shardOperation(UpgradeRequest request, ShardRouting shardRouting) throws IOException { protected ShardUpgradeResult shardOperation(UpgradeRequest request, ShardRouting shardRouting) throws IOException {
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).shardSafe(shardRouting.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).getShard(shardRouting.shardId().id());
org.apache.lucene.util.Version oldestLuceneSegment = indexShard.upgrade(request); org.apache.lucene.util.Version oldestLuceneSegment = indexShard.upgrade(request);
// We are using the current version of Elasticsearch as upgrade version since we update mapping to match the current version // We are using the current version of Elasticsearch as upgrade version since we update mapping to match the current version
return new ShardUpgradeResult(shardRouting.shardId(), indexShard.routingEntry().primary(), Version.CURRENT, oldestLuceneSegment); return new ShardUpgradeResult(shardRouting.shardId(), indexShard.routingEntry().primary(), Version.CURRENT, oldestLuceneSegment);

View File

@ -163,7 +163,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) { protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexQueryParserService queryParserService = indexService.queryParserService(); IndexQueryParserService queryParserService = indexService.queryParserService();
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
boolean valid; boolean valid;
String explanation = null; String explanation = null;

View File

@ -116,7 +116,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { protected Tuple<BulkShardResponse, BulkShardRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
final BulkShardRequest request = shardRequest.request; final BulkShardRequest request = shardRequest.request;
final IndexService indexService = indicesService.indexServiceSafe(request.index()); final IndexService indexService = indicesService.indexServiceSafe(request.index());
final IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); final IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
long[] preVersions = new long[request.items().length]; long[] preVersions = new long[request.items().length];
VersionType[] preVersionTypes = new VersionType[request.items().length]; VersionType[] preVersionTypes = new VersionType[request.items().length];
@ -447,7 +447,7 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) { protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
Translog.Location location = null; Translog.Location location = null;
for (int i = 0; i < request.items().length; i++) { for (int i = 0; i < request.items().length; i++) {
BulkItemRequest item = request.items()[i]; BulkItemRequest item = request.items()[i];

View File

@ -42,7 +42,6 @@ import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -130,7 +129,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
@Override @Override
protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { protected Tuple<DeleteResponse, DeleteRequest> shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) {
DeleteRequest request = shardRequest.request; DeleteRequest request = shardRequest.request;
IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).getShard(shardRequest.shardId.id());
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY); Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY);
indexShard.delete(delete); indexShard.delete(delete);
// update the request with teh version so it will go to the replicas // update the request with teh version so it will go to the replicas
@ -146,7 +145,7 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) { protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA); Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA);
indexShard.delete(delete); indexShard.delete(delete);

View File

@ -148,7 +148,7 @@ public class TransportExistsAction extends TransportBroadcastAction<ExistsReques
@Override @Override
protected ShardExistsResponse shardOperation(ShardExistsRequest request) { protected ShardExistsResponse shardOperation(ShardExistsRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.shardId().getIndex(), request.shardId().id());
SearchContext context = new DefaultSearchContext(0, SearchContext context = new DefaultSearchContext(0,

View File

@ -104,7 +104,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
@Override @Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) { protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id())); Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm)); Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
if (!result.exists()) { if (!result.exists()) {

View File

@ -152,7 +152,7 @@ public class TransportFieldStatsTransportAction extends TransportBroadcastAction
Map<String, FieldStats> fieldStats = new HashMap<>(); Map<String, FieldStats> fieldStats = new HashMap<>();
IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex());
MapperService mapperService = indexServices.mapperService(); MapperService mapperService = indexServices.mapperService();
IndexShard shard = indexServices.shardSafe(shardId.id()); IndexShard shard = indexServices.getShard(shardId.id());
try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) { try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) {
for (String field : request.getFields()) { for (String field : request.getFields()) {
MappedFieldType fieldType = mapperService.fullName(field); MappedFieldType fieldType = mapperService.fullName(field);

View File

@ -92,7 +92,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
@Override @Override
protected GetResponse shardOperation(GetRequest request, ShardId shardId) { protected GetResponse shardOperation(GetRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
if (request.refresh() && !request.realtime()) { if (request.refresh() && !request.realtime()) {
indexShard.refresh("refresh_flag_get"); indexShard.refresh("refresh_flag_get");

View File

@ -87,7 +87,7 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
@Override @Override
protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) { protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
if (request.refresh() && !request.realtime()) { if (request.refresh() && !request.realtime()) {
indexShard.refresh("refresh_flag_mget"); indexShard.refresh("refresh_flag_mget");

View File

@ -164,7 +164,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
} }
IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); IndexShard indexShard = indexService.getShard(shardRequest.shardId.id());
final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard); final WriteResult<IndexResponse> result = executeIndexRequestOnPrimary(null, request, indexShard);
final IndexResponse response = result.response; final IndexResponse response = result.response;
@ -176,7 +176,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
@Override @Override
protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) { protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id()) SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).index(shardId.getIndex()).type(request.type()).id(request.id())
.routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl());

View File

@ -130,7 +130,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
@Override @Override
protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) { protected ShardSuggestResponse shardOperation(ShardSuggestRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric(); ShardSuggestMetric suggestMetric = indexShard.getSuggestMetric();
suggestMetric.preSuggest(); suggestMetric.preSuggest();
long startTime = System.nanoTime(); long startTime = System.nanoTime();

View File

@ -666,7 +666,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected Releasable getIndexShardOperationsCounter(ShardId shardId) { protected Releasable getIndexShardOperationsCounter(ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.index().getName()); IndexService indexService = indicesService.indexServiceSafe(shardId.index().getName());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
return new IndexShardReference(indexShard); return new IndexShardReference(indexShard);
} }
@ -678,7 +678,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId); logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
return; return;
} }
IndexShard indexShard = indexService.shard(shardId); IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard == null) { if (indexShard == null) {
logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId); logger.debug("ignoring failed replica [{}][{}] because index was already removed.", index, shardId);
return; return;

View File

@ -79,7 +79,7 @@ public class TransportShardMultiTermsVectorAction extends TransportSingleShardAc
TermVectorsRequest termVectorsRequest = request.requests.get(i); TermVectorsRequest termVectorsRequest = request.requests.get(i);
try { try {
IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse termVectorsResponse = indexShard.getTermVectors(termVectorsRequest); TermVectorsResponse termVectorsResponse = indexShard.getTermVectors(termVectorsRequest);
termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime()); termVectorsResponse.updateTookInMillis(termVectorsRequest.startTime());
response.add(request.locations.get(i), termVectorsResponse); response.add(request.locations.get(i), termVectorsResponse);

View File

@ -82,7 +82,7 @@ public class TransportTermVectorsAction extends TransportSingleShardAction<TermV
@Override @Override
protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) { protected TermVectorsResponse shardOperation(TermVectorsRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
IndexShard indexShard = indexService.shardSafe(shardId.id()); IndexShard indexShard = indexService.getShard(shardId.id());
TermVectorsResponse response = indexShard.getTermVectors(request); TermVectorsResponse response = indexShard.getTermVectors(request);
response.updateTookInMillis(request.startTime()); response.updateTookInMillis(request.startTime());
return response; return response;

View File

@ -166,7 +166,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) { protected void shardOperation(final UpdateRequest request, final ActionListener<UpdateResponse> listener, final int retryCount) {
IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId()); IndexShard indexShard = indexService.getShard(request.shardId());
final UpdateHelper.Result result = updateHelper.prepare(request, indexShard); final UpdateHelper.Result result = updateHelper.prepare(request, indexShard);
switch (result.operation()) { switch (result.operation()) {
case UPSERT: case UPSERT:
@ -266,7 +266,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
UpdateResponse update = result.action(); UpdateResponse update = result.action();
IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex()); IndexService indexServiceOrNull = indicesService.indexService(request.concreteIndex());
if (indexServiceOrNull != null) { if (indexServiceOrNull != null) {
IndexShard shard = indexService.shard(request.shardId()); IndexShard shard = indexService.getShardOrNull(request.shardId());
if (shard != null) { if (shard != null) {
shard.indexingService().noopUpdate(request.type()); shard.indexingService().noopUpdate(request.type());
} }

View File

@ -45,9 +45,16 @@ public final class BootstrapInfo {
} }
/** /**
* Returns true if secure computing mode is enabled (linux/amd64 only) * Returns true if secure computing mode is enabled (linux/amd64, OS X only)
*/ */
public static boolean isSeccompInstalled() { public static boolean isSeccompInstalled() {
return Natives.isSeccompInstalled(); return Natives.isSeccompInstalled();
} }
/**
* codebase location for untrusted scripts (provide some additional safety)
* <p>
* This is not a full URL, just a path.
*/
public static final String UNTRUSTED_CODEBASE = "/untrusted";
} }

View File

@ -26,29 +26,27 @@ import java.net.URL;
import java.security.CodeSource; import java.security.CodeSource;
import java.security.Permission; import java.security.Permission;
import java.security.PermissionCollection; import java.security.PermissionCollection;
import java.security.Permissions;
import java.security.Policy; import java.security.Policy;
import java.security.ProtectionDomain; import java.security.ProtectionDomain;
import java.security.URIParameter; import java.security.URIParameter;
import java.util.PropertyPermission;
/** custom policy for union of static and dynamic permissions */ /** custom policy for union of static and dynamic permissions */
final class ESPolicy extends Policy { final class ESPolicy extends Policy {
/** template policy file, the one used in tests */ /** template policy file, the one used in tests */
static final String POLICY_RESOURCE = "security.policy"; static final String POLICY_RESOURCE = "security.policy";
/** limited policy for groovy scripts */ /** limited policy for scripts */
static final String GROOVY_RESOURCE = "groovy.policy"; static final String UNTRUSTED_RESOURCE = "untrusted.policy";
final Policy template; final Policy template;
final Policy groovy; final Policy untrusted;
final PermissionCollection dynamic; final PermissionCollection dynamic;
public ESPolicy(PermissionCollection dynamic) throws Exception { public ESPolicy(PermissionCollection dynamic) throws Exception {
URI policyUri = getClass().getResource(POLICY_RESOURCE).toURI(); URI policyUri = getClass().getResource(POLICY_RESOURCE).toURI();
URI groovyUri = getClass().getResource(GROOVY_RESOURCE).toURI(); URI untrustedUri = getClass().getResource(UNTRUSTED_RESOURCE).toURI();
this.template = Policy.getInstance("JavaPolicy", new URIParameter(policyUri)); this.template = Policy.getInstance("JavaPolicy", new URIParameter(policyUri));
this.groovy = Policy.getInstance("JavaPolicy", new URIParameter(groovyUri)); this.untrusted = Policy.getInstance("JavaPolicy", new URIParameter(untrustedUri));
this.dynamic = dynamic; this.dynamic = dynamic;
} }
@ -56,15 +54,17 @@ final class ESPolicy extends Policy {
public boolean implies(ProtectionDomain domain, Permission permission) { public boolean implies(ProtectionDomain domain, Permission permission) {
CodeSource codeSource = domain.getCodeSource(); CodeSource codeSource = domain.getCodeSource();
// codesource can be null when reducing privileges via doPrivileged() // codesource can be null when reducing privileges via doPrivileged()
if (codeSource != null) { if (codeSource == null) {
URL location = codeSource.getLocation(); return false;
// location can be null... ??? nobody knows }
// https://bugs.openjdk.java.net/browse/JDK-8129972
if (location != null) { URL location = codeSource.getLocation();
// run groovy scripts with no permissions (except logging property) // location can be null... ??? nobody knows
if ("/groovy/script".equals(location.getFile())) { // https://bugs.openjdk.java.net/browse/JDK-8129972
return groovy.implies(domain, permission); if (location != null) {
} // run scripts with limited permissions
if (BootstrapInfo.UNTRUSTED_CODEBASE.equals(location.getFile())) {
return untrusted.implies(domain, permission);
} }
} }

View File

@ -204,6 +204,9 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (state.metaData().hasAlias(index)) { if (state.metaData().hasAlias(index)) {
throw new InvalidIndexNameException(new Index(index), index, "already exists as alias"); throw new InvalidIndexNameException(new Index(index), index, "already exists as alias");
} }
if (index.equals(".") || index.equals("..")) {
throw new InvalidIndexNameException(new Index(index), index, "must not be '.' or '..'");
}
} }
private void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener, final Semaphore mdLock) { private void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener, final Semaphore mdLock) {

View File

@ -602,12 +602,12 @@ public class DiskThresholdDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "disk threshold decider disabled"); return allocation.decision(Decision.YES, NAME, "disk threshold decider disabled");
} }
// Allow allocation regardless if only a single node is available // Allow allocation regardless if only a single data node is available
if (allocation.nodes().size() <= 1) { if (allocation.nodes().dataNodes().size() <= 1) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("only a single node is present, allowing allocation"); logger.trace("only a single data node is present, allowing allocation");
} }
return allocation.decision(Decision.YES, NAME, "only a single node is present"); return allocation.decision(Decision.YES, NAME, "only a single data node is present");
} }
// Fail open there is no info available // Fail open there is no info available

View File

@ -0,0 +1,77 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.hash;
import org.elasticsearch.ElasticsearchException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class MessageDigests {
private static final MessageDigest MD5_DIGEST;
private static final MessageDigest SHA_1_DIGEST;
private static final MessageDigest SHA_256_DIGEST;
static {
try {
MD5_DIGEST = MessageDigest.getInstance("MD5");
SHA_1_DIGEST = MessageDigest.getInstance("SHA-1");
SHA_256_DIGEST = MessageDigest.getInstance("SHA-256");
} catch (NoSuchAlgorithmException e) {
throw new ElasticsearchException("Unexpected exception creating MessageDigest instance", e);
}
}
public static MessageDigest md5() {
return clone(MD5_DIGEST);
}
public static MessageDigest sha1() {
return clone(SHA_1_DIGEST);
}
public static MessageDigest sha256() {
return clone(SHA_256_DIGEST);
}
private static MessageDigest clone(MessageDigest messageDigest) {
try {
return (MessageDigest) messageDigest.clone();
} catch (CloneNotSupportedException e) {
throw new ElasticsearchException("Unexpected exception cloning MessageDigest instance", e);
}
}
private static final char[] HEX_DIGITS = "0123456789abcdef".toCharArray();
public static String toHexString(byte[] bytes) {
if (bytes == null) {
throw new NullPointerException("bytes");
}
StringBuilder sb = new StringBuilder(2 * bytes.length);
for (int i = 0; i < bytes.length; i++) {
byte b = bytes[i];
sb.append(HEX_DIGITS[b >> 4 & 0xf]).append(HEX_DIGITS[b & 0xf]);
}
return sb.toString();
}
}

View File

@ -19,19 +19,22 @@
package org.elasticsearch.common.http.client; package org.elasticsearch.common.http.client;
import java.nio.charset.StandardCharsets;
import com.google.common.hash.Hashing;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.*; import org.elasticsearch.Build;
import org.elasticsearch.ElasticsearchCorruptionException;
import org.elasticsearch.ElasticsearchTimeoutException;
import org.elasticsearch.Version;
import org.elasticsearch.common.Base64; import org.elasticsearch.common.Base64;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.hash.MessageDigests;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import java.io.*; import java.io.*;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.net.URL; import java.net.URL;
import java.net.URLConnection; import java.net.URLConnection;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.NoSuchFileException; import java.nio.file.NoSuchFileException;
import java.nio.file.Path; import java.nio.file.Path;
@ -96,7 +99,7 @@ public class HttpDownloadHelper {
public static Checksummer SHA1_CHECKSUM = new Checksummer() { public static Checksummer SHA1_CHECKSUM = new Checksummer() {
@Override @Override
public String checksum(byte[] filebytes) { public String checksum(byte[] filebytes) {
return Hashing.sha1().hashBytes(filebytes).toString(); return MessageDigests.toHexString(MessageDigests.sha1().digest(filebytes));
} }
@Override @Override
@ -109,7 +112,7 @@ public class HttpDownloadHelper {
public static Checksummer MD5_CHECKSUM = new Checksummer() { public static Checksummer MD5_CHECKSUM = new Checksummer() {
@Override @Override
public String checksum(byte[] filebytes) { public String checksum(byte[] filebytes) {
return Hashing.md5().hashBytes(filebytes).toString(); return MessageDigests.toHexString(MessageDigests.md5().digest(filebytes));
} }
@Override @Override

View File

@ -93,12 +93,14 @@ public class LogConfigurator {
loaded = true; loaded = true;
// TODO: this is partly a copy of InternalSettingsPreparer...we should pass in Environment and not do all this... // TODO: this is partly a copy of InternalSettingsPreparer...we should pass in Environment and not do all this...
Environment environment = new Environment(settings); Environment environment = new Environment(settings);
Settings.Builder settingsBuilder = settingsBuilder().put(settings); Settings.Builder settingsBuilder = settingsBuilder();
resolveConfig(environment, settingsBuilder); resolveConfig(environment, settingsBuilder);
settingsBuilder settingsBuilder
.putProperties("elasticsearch.", System.getProperties()) .putProperties("elasticsearch.", System.getProperties())
.putProperties("es.", System.getProperties()) .putProperties("es.", System.getProperties());
.replacePropertyPlaceholders(); // add custom settings after config was added so that they are not overwritten by config
settingsBuilder.put(settings);
settingsBuilder.replacePropertyPlaceholders();
Properties props = new Properties(); Properties props = new Properties();
for (Map.Entry<String, String> entry : settingsBuilder.build().getAsMap().entrySet()) { for (Map.Entry<String, String> entry : settingsBuilder.build().getAsMap().entrySet()) {
String key = "log4j." + entry.getKey(); String key = "log4j." + entry.getKey();

View File

@ -20,21 +20,37 @@
package org.elasticsearch.index; package org.elasticsearch.index;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.inject.util.Providers;
import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
/** /**
* *
*/ */
public class IndexModule extends AbstractModule { public class IndexModule extends AbstractModule {
private final Settings settings; // pkg private so tests can mock
Class<? extends EngineFactory> engineFactoryImpl = InternalEngineFactory.class;
public IndexModule(Settings settings) { Class<? extends IndexSearcherWrapper> indexSearcherWrapper = null;
this.settings = settings;
}
@Override @Override
protected void configure() { protected void configure() {
bind(EngineFactory.class).to(engineFactoryImpl).asEagerSingleton();
if (indexSearcherWrapper == null) {
bind(IndexSearcherWrapper.class).toProvider(Providers.of(null));
} else {
bind(IndexSearcherWrapper.class).to(indexSearcherWrapper).asEagerSingleton();
}
bind(IndexService.class).asEagerSingleton(); bind(IndexService.class).asEagerSingleton();
bind(IndexServicesProvider.class).asEagerSingleton();
bind(MapperService.class).asEagerSingleton();
bind(IndexAliasesService.class).asEagerSingleton();
bind(IndexFieldDataService.class).asEagerSingleton();
} }
} }

View File

@ -25,13 +25,7 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.CreationException;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Injectors;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.inject.ModulesBuilder;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock; import org.elasticsearch.env.ShardLock;
@ -48,19 +42,15 @@ import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardModule; import org.elasticsearch.index.shard.ShadowIndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardNotFoundException; import org.elasticsearch.index.shard.ShardNotFoundException;
import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreModule;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.InternalIndicesLifecycle;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.plugins.PluginsService;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -81,86 +71,42 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder;
*/ */
public class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> { public class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable<IndexShard> {
private final Injector injector;
private final Settings indexSettings; private final Settings indexSettings;
private final PluginsService pluginsService;
private final InternalIndicesLifecycle indicesLifecycle; private final InternalIndicesLifecycle indicesLifecycle;
private final AnalysisService analysisService; private final AnalysisService analysisService;
private final MapperService mapperService;
private final IndexQueryParserService queryParserService;
private final SimilarityService similarityService;
private final IndexAliasesService aliasesService;
private final IndexCache indexCache;
private final IndexFieldDataService indexFieldData; private final IndexFieldDataService indexFieldData;
private final BitsetFilterCache bitsetFilterCache; private final BitsetFilterCache bitsetFilterCache;
private final IndexSettingsService settingsService; private final IndexSettingsService settingsService;
private final NodeEnvironment nodeEnv; private final NodeEnvironment nodeEnv;
private final IndicesService indicesServices; private final IndicesService indicesServices;
private final IndexServicesProvider indexServicesProvider;
private volatile Map<Integer, IndexShardInjectorPair> shards = emptyMap(); private final IndexStore indexStore;
private volatile Map<Integer, IndexShard> shards = emptyMap();
private static class IndexShardInjectorPair {
private final IndexShard indexShard;
private final Injector injector;
public IndexShardInjectorPair(IndexShard indexShard, Injector injector) {
this.indexShard = indexShard;
this.injector = injector;
}
public IndexShard getIndexShard() {
return indexShard;
}
public Injector getInjector() {
return injector;
}
}
private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicBoolean closed = new AtomicBoolean(false);
private final AtomicBoolean deleted = new AtomicBoolean(false); private final AtomicBoolean deleted = new AtomicBoolean(false);
@Inject @Inject
public IndexService(Injector injector, Index index, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv, public IndexService(Index index, @IndexSettings Settings indexSettings, NodeEnvironment nodeEnv,
AnalysisService analysisService, MapperService mapperService, IndexQueryParserService queryParserService, AnalysisService analysisService,
SimilarityService similarityService, IndexAliasesService aliasesService, IndexCache indexCache,
IndexSettingsService settingsService, IndexSettingsService settingsService,
IndexFieldDataService indexFieldData, BitsetFilterCache bitSetFilterCache, IndicesService indicesServices) { IndexFieldDataService indexFieldData,
BitsetFilterCache bitSetFilterCache,
IndicesService indicesServices,
IndexServicesProvider indexServicesProvider,
IndexStore indexStore) {
super(index, indexSettings); super(index, indexSettings);
this.injector = injector;
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.analysisService = analysisService; this.analysisService = analysisService;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.similarityService = similarityService;
this.aliasesService = aliasesService;
this.indexCache = indexCache;
this.indexFieldData = indexFieldData; this.indexFieldData = indexFieldData;
this.settingsService = settingsService; this.settingsService = settingsService;
this.bitsetFilterCache = bitSetFilterCache; this.bitsetFilterCache = bitSetFilterCache;
this.pluginsService = injector.getInstance(PluginsService.class);
this.indicesServices = indicesServices; this.indicesServices = indicesServices;
this.indicesLifecycle = (InternalIndicesLifecycle) injector.getInstance(IndicesLifecycle.class); this.indicesLifecycle = (InternalIndicesLifecycle) indexServicesProvider.getIndicesLifecycle();
this.nodeEnv = nodeEnv;
// inject workarounds for cyclic dep this.indexServicesProvider = indexServicesProvider;
this.indexStore = indexStore;
indexFieldData.setListener(new FieldDataCacheListener(this)); indexFieldData.setListener(new FieldDataCacheListener(this));
bitSetFilterCache.setListener(new BitsetCacheListener(this)); bitSetFilterCache.setListener(new BitsetCacheListener(this));
this.nodeEnv = nodeEnv;
} }
public int numberOfShards() { public int numberOfShards() {
@ -173,7 +119,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public Iterator<IndexShard> iterator() { public Iterator<IndexShard> iterator() {
return shards.values().stream().map((p) -> p.getIndexShard()).iterator(); return shards.values().iterator();
} }
public boolean hasShard(int shardId) { public boolean hasShard(int shardId) {
@ -184,19 +130,15 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
* Return the shard with the provided id, or null if there is no such shard. * Return the shard with the provided id, or null if there is no such shard.
*/ */
@Nullable @Nullable
public IndexShard shard(int shardId) { public IndexShard getShardOrNull(int shardId) {
IndexShardInjectorPair indexShardInjectorPair = shards.get(shardId); return shards.get(shardId);
if (indexShardInjectorPair != null) {
return indexShardInjectorPair.getIndexShard();
}
return null;
} }
/** /**
* Return the shard with the provided id, or throw an exception if it doesn't exist. * Return the shard with the provided id, or throw an exception if it doesn't exist.
*/ */
public IndexShard shardSafe(int shardId) { public IndexShard getShard(int shardId) {
IndexShard indexShard = shard(shardId); IndexShard indexShard = getShardOrNull(shardId);
if (indexShard == null) { if (indexShard == null) {
throw new ShardNotFoundException(new ShardId(index, shardId)); throw new ShardNotFoundException(new ShardId(index, shardId));
} }
@ -207,16 +149,12 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
return shards.keySet(); return shards.keySet();
} }
public Injector injector() {
return injector;
}
public IndexSettingsService settingsService() { public IndexSettingsService settingsService() {
return this.settingsService; return this.settingsService;
} }
public IndexCache cache() { public IndexCache cache() {
return indexCache; return indexServicesProvider.getIndexCache();
} }
public IndexFieldDataService fieldData() { public IndexFieldDataService fieldData() {
@ -232,19 +170,19 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
public MapperService mapperService() { public MapperService mapperService() {
return mapperService; return indexServicesProvider.getMapperService();
} }
public IndexQueryParserService queryParserService() { public IndexQueryParserService queryParserService() {
return queryParserService; return indexServicesProvider.getQueryParserService();
} }
public SimilarityService similarityService() { public SimilarityService similarityService() {
return similarityService; return indexServicesProvider.getSimilarityService();
} }
public IndexAliasesService aliasesService() { public IndexAliasesService aliasesService() {
return aliasesService; return indexServicesProvider.getIndexAliasesService();
} }
public synchronized void close(final String reason, boolean delete) { public synchronized void close(final String reason, boolean delete) {
@ -261,16 +199,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} }
/**
* Return the shard injector for the provided id, or throw an exception if there is no such shard.
*/
public Injector shardInjectorSafe(int shardId) {
IndexShardInjectorPair indexShardInjectorPair = shards.get(shardId);
if (indexShardInjectorPair == null) {
throw new ShardNotFoundException(new ShardId(index, shardId));
}
return indexShardInjectorPair.getInjector();
}
public String indexUUID() { public String indexUUID() {
return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
@ -301,10 +229,14 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
if (closed.get()) { if (closed.get()) {
throw new IllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed"); throw new IllegalStateException("Can't create shard [" + index.name() + "][" + sShardId + "], closed");
} }
if (indexSettings.get("index.translog.type") != null) { // TODO remove?
throw new IllegalStateException("a custom translog type is no longer supported. got [" + indexSettings.get("index.translog.type") + "]");
}
final ShardId shardId = new ShardId(index, sShardId); final ShardId shardId = new ShardId(index, sShardId);
ShardLock lock = null; ShardLock lock = null;
boolean success = false; boolean success = false;
Injector shardInjector = null; Store store = null;
IndexShard indexShard = null;
try { try {
lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5)); lock = nodeEnv.shardLock(shardId, TimeUnit.SECONDS.toMillis(5));
indicesLifecycle.beforeIndexShardCreated(shardId, indexSettings); indicesLifecycle.beforeIndexShardCreated(shardId, indexSettings);
@ -325,7 +257,6 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
if (path == null) { if (path == null) {
// TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard
// that's being relocated/replicated we know how large it will become once it's done copying: // that's being relocated/replicated we know how large it will become once it's done copying:
// Count up how many shards are currently on each data path: // Count up how many shards are currently on each data path:
Map<Path,Integer> dataPathToShardCount = new HashMap<>(); Map<Path,Integer> dataPathToShardCount = new HashMap<>();
for(IndexShard shard : this) { for(IndexShard shard : this) {
@ -351,39 +282,17 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
// if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary.
final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false ||
(primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); (primary && IndexMetaData.isOnSharedFilesystem(indexSettings));
ModulesBuilder modules = new ModulesBuilder(); store = new Store(shardId, indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> indexServicesProvider.getIndicesQueryCache().onClose(shardId)));
// plugin modules must be added here, before others or we can get crazy injection errors... if (useShadowEngine(primary, indexSettings)) {
for (Module pluginModule : pluginsService.shardModules(indexSettings)) { indexShard = new ShadowIndexShard(shardId, indexSettings, path, store, indexServicesProvider);
modules.add(pluginModule); } else {
} indexShard = new IndexShard(shardId, indexSettings, path, store, indexServicesProvider);
modules.add(new IndexShardModule(shardId, primary, indexSettings));
modules.add(new StoreModule(injector.getInstance(IndexStore.class).shardDirectory(), lock,
new StoreCloseListener(shardId, canDeleteShardContent, new Closeable() {
@Override
public void close() throws IOException {
injector.getInstance(IndicesQueryCache.class).onClose(shardId);
}
}), path));
pluginsService.processModules(modules);
try {
shardInjector = modules.createChildInjector(injector);
} catch (CreationException e) {
ElasticsearchException ex = new ElasticsearchException("failed to create shard", Injectors.getFirstErrorFailure(e));
ex.setShard(shardId);
throw ex;
} catch (Throwable e) {
ElasticsearchException ex = new ElasticsearchException("failed to create shard", e);
ex.setShard(shardId);
throw ex;
} }
IndexShard indexShard = shardInjector.getInstance(IndexShard.class);
indicesLifecycle.indexShardStateChanged(indexShard, null, "shard created"); indicesLifecycle.indexShardStateChanged(indexShard, null, "shard created");
indicesLifecycle.afterIndexShardCreated(indexShard); indicesLifecycle.afterIndexShardCreated(indexShard);
shards = newMapBuilder(shards).put(shardId.id(), new IndexShardInjectorPair(indexShard, shardInjector)).immutableMap();
settingsService.addListener(indexShard); settingsService.addListener(indexShard);
shards = newMapBuilder(shards).put(shardId.id(), indexShard).immutableMap();
success = true; success = true;
return indexShard; return indexShard;
} catch (IOException e) { } catch (IOException e) {
@ -393,45 +302,35 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} finally { } finally {
if (success == false) { if (success == false) {
IOUtils.closeWhileHandlingException(lock); IOUtils.closeWhileHandlingException(lock);
if (shardInjector != null) { closeShard("initialization failed", shardId, indexShard, store);
IndexShard indexShard = shardInjector.getInstance(IndexShard.class);
closeShardInjector("initialization failed", shardId, shardInjector, indexShard);
}
} }
} }
} }
static boolean useShadowEngine(boolean primary, Settings indexSettings) {
return primary == false && IndexMetaData.isIndexUsingShadowReplicas(indexSettings);
}
public synchronized void removeShard(int shardId, String reason) { public synchronized void removeShard(int shardId, String reason) {
final ShardId sId = new ShardId(index, shardId); final ShardId sId = new ShardId(index, shardId);
final Injector shardInjector;
final IndexShard indexShard; final IndexShard indexShard;
if (shards.containsKey(shardId) == false) { if (shards.containsKey(shardId) == false) {
return; return;
} }
logger.debug("[{}] closing... (reason: [{}])", shardId, reason); logger.debug("[{}] closing... (reason: [{}])", shardId, reason);
HashMap<Integer, IndexShardInjectorPair> newShards = new HashMap<>(shards); HashMap<Integer, IndexShard> newShards = new HashMap<>(shards);
IndexShardInjectorPair indexShardInjectorPair = newShards.remove(shardId); indexShard = newShards.remove(shardId);
indexShard = indexShardInjectorPair.getIndexShard();
shardInjector = indexShardInjectorPair.getInjector();
shards = unmodifiableMap(newShards); shards = unmodifiableMap(newShards);
closeShardInjector(reason, sId, shardInjector, indexShard); closeShard(reason, sId, indexShard, indexShard.store());
logger.debug("[{}] closed (reason: [{}])", shardId, reason); logger.debug("[{}] closed (reason: [{}])", shardId, reason);
} }
private void closeShardInjector(String reason, ShardId sId, Injector shardInjector, IndexShard indexShard) { private void closeShard(String reason, ShardId sId, IndexShard indexShard, Store store) {
final int shardId = sId.id(); final int shardId = sId.id();
try { try {
try { try {
indicesLifecycle.beforeIndexShardClosed(sId, indexShard, indexSettings); indicesLifecycle.beforeIndexShardClosed(sId, indexShard, indexSettings);
} finally { } finally {
// close everything else even if the beforeIndexShardClosed threw an exception
for (Class<? extends Closeable> closeable : pluginsService.shardServices()) {
try {
shardInjector.getInstance(closeable).close();
} catch (Throwable e) {
logger.debug("[{}] failed to clean plugin shard service [{}]", e, shardId, closeable);
}
}
// this logic is tricky, we want to close the engine so we rollback the changes done to it // this logic is tricky, we want to close the engine so we rollback the changes done to it
// and close the shard so no operations are allowed to it // and close the shard so no operations are allowed to it
if (indexShard != null) { if (indexShard != null) {
@ -449,30 +348,13 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} finally { } finally {
try { try {
shardInjector.getInstance(Store.class).close(); store.close();
} catch (Throwable e) { } catch (Throwable e) {
logger.warn("[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason); logger.warn("[{}] failed to close store on shard removal (reason: [{}])", e, shardId, reason);
} }
} }
} }
/**
* Closes an optional resource. Returns true if the resource was found;
* NOTE: this method swallows all exceptions thrown from the close method of the injector and logs them as debug log
*/
private boolean closeInjectorOptionalResource(ShardId shardId, Injector shardInjector, Class<? extends Closeable> toClose) {
try {
final Closeable instance = shardInjector.getInstance(toClose);
if (instance == null) {
return false;
}
IOUtils.close(instance);
} catch (Throwable t) {
logger.debug("{} failed to close {}", t, shardId, Strings.toUnderscoreCase(toClose.getSimpleName()));
}
return true;
}
private void onShardClose(ShardLock lock, boolean ownsShard) { private void onShardClose(ShardLock lock, boolean ownsShard) {
if (deleted.get()) { // we remove that shards content if this index has been deleted if (deleted.get()) { // we remove that shards content if this index has been deleted
@ -492,6 +374,10 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
} }
} }
public IndexServicesProvider getIndexServices() {
return indexServicesProvider;
}
private class StoreCloseListener implements Store.OnClose { private class StoreCloseListener implements Store.OnClose {
private final ShardId shardId; private final ShardId shardId;
private final boolean ownsShard; private final boolean ownsShard;
@ -533,7 +419,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onCache(ShardId shardId, Accountable accountable) { public void onCache(ShardId shardId, Accountable accountable) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l;
shard.shardBitsetFilterCache().onCached(ramBytesUsed); shard.shardBitsetFilterCache().onCached(ramBytesUsed);
@ -544,7 +430,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onRemoval(ShardId shardId, Accountable accountable) { public void onRemoval(ShardId shardId, Accountable accountable) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l; long ramBytesUsed = accountable != null ? accountable.ramBytesUsed() : 0l;
shard.shardBitsetFilterCache().onRemoval(ramBytesUsed); shard.shardBitsetFilterCache().onRemoval(ramBytesUsed);
@ -563,7 +449,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage) { public void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, Accountable ramUsage) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
shard.fieldData().onCache(shardId, fieldNames, fieldDataType, ramUsage); shard.fieldData().onCache(shardId, fieldNames, fieldDataType, ramUsage);
} }
@ -573,7 +459,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
@Override @Override
public void onRemoval(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) { public void onRemoval(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType, boolean wasEvicted, long sizeInBytes) {
if (shardId != null) { if (shardId != null) {
final IndexShard shard = indexService.shard(shardId.id()); final IndexShard shard = indexService.getShardOrNull(shardId.id());
if (shard != null) { if (shard != null) {
shard.fieldData().onRemoval(shardId, fieldNames, fieldDataType, wasEvicted, sizeInBytes); shard.fieldData().onRemoval(shardId, fieldNames, fieldDataType, wasEvicted, sizeInBytes);
} }

View File

@ -0,0 +1,138 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.threadpool.ThreadPool;
/**
* Simple provider class that holds the Index and Node level services used by
* a shard.
* This is just a temporary solution until we cleaned up index creation and removed injectors on that level as well.
*/
public final class IndexServicesProvider {
private final IndicesLifecycle indicesLifecycle;
private final ThreadPool threadPool;
private final MapperService mapperService;
private final IndexQueryParserService queryParserService;
private final IndexCache indexCache;
private final IndexAliasesService indexAliasesService;
private final IndicesQueryCache indicesQueryCache;
private final CodecService codecService;
private final TermVectorsService termVectorsService;
private final IndexFieldDataService indexFieldDataService;
private final IndicesWarmer warmer;
private final SimilarityService similarityService;
private final EngineFactory factory;
private final BigArrays bigArrays;
private final IndexSearcherWrapper indexSearcherWrapper;
@Inject
public IndexServicesProvider(IndicesLifecycle indicesLifecycle, ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache, CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService, @Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory, BigArrays bigArrays, @Nullable IndexSearcherWrapper indexSearcherWrapper) {
this.indicesLifecycle = indicesLifecycle;
this.threadPool = threadPool;
this.mapperService = mapperService;
this.queryParserService = queryParserService;
this.indexCache = indexCache;
this.indexAliasesService = indexAliasesService;
this.indicesQueryCache = indicesQueryCache;
this.codecService = codecService;
this.termVectorsService = termVectorsService;
this.indexFieldDataService = indexFieldDataService;
this.warmer = warmer;
this.similarityService = similarityService;
this.factory = factory;
this.bigArrays = bigArrays;
this.indexSearcherWrapper = indexSearcherWrapper;
}
public IndicesLifecycle getIndicesLifecycle() {
return indicesLifecycle;
}
public ThreadPool getThreadPool() {
return threadPool;
}
public MapperService getMapperService() {
return mapperService;
}
public IndexQueryParserService getQueryParserService() {
return queryParserService;
}
public IndexCache getIndexCache() {
return indexCache;
}
public IndexAliasesService getIndexAliasesService() {
return indexAliasesService;
}
public IndicesQueryCache getIndicesQueryCache() {
return indicesQueryCache;
}
public CodecService getCodecService() {
return codecService;
}
public TermVectorsService getTermVectorsService() {
return termVectorsService;
}
public IndexFieldDataService getIndexFieldDataService() {
return indexFieldDataService;
}
public IndicesWarmer getWarmer() {
return warmer;
}
public SimilarityService getSimilarityService() {
return similarityService;
}
public EngineFactory getFactory() {
return factory;
}
public BigArrays getBigArrays() {
return bigArrays;
}
public IndexSearcherWrapper getIndexSearcherWrapper() { return indexSearcherWrapper; }
}

View File

@ -1,33 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.aliases;
import org.elasticsearch.common.inject.AbstractModule;
/**
*
*/
public class IndexAliasesServiceModule extends AbstractModule {
@Override
protected void configure() {
bind(IndexAliasesService.class).asEagerSingleton();
}
}

View File

@ -319,7 +319,9 @@ public class Analysis {
* @see #isCharacterTokenStream(TokenStream) * @see #isCharacterTokenStream(TokenStream)
*/ */
public static boolean generatesCharacterTokenStream(Analyzer analyzer, String fieldName) throws IOException { public static boolean generatesCharacterTokenStream(Analyzer analyzer, String fieldName) throws IOException {
return isCharacterTokenStream(analyzer.tokenStream(fieldName, "")); try (TokenStream ts = analyzer.tokenStream(fieldName, "")) {
return isCharacterTokenStream(ts);
}
} }
} }

View File

@ -59,6 +59,8 @@ import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import java.util.function.Supplier;
/** /**
* *
@ -78,7 +80,6 @@ public abstract class Engine implements Closeable {
protected final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock(); protected final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
protected final ReleasableLock readLock = new ReleasableLock(rwl.readLock()); protected final ReleasableLock readLock = new ReleasableLock(rwl.readLock());
protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock()); protected final ReleasableLock writeLock = new ReleasableLock(rwl.writeLock());
protected volatile Throwable failedEngine = null; protected volatile Throwable failedEngine = null;
protected Engine(EngineConfig engineConfig) { protected Engine(EngineConfig engineConfig) {
@ -227,8 +228,8 @@ public abstract class Engine implements Closeable {
PENDING_OPERATIONS PENDING_OPERATIONS
} }
final protected GetResult getFromSearcher(Get get) throws EngineException { final protected GetResult getFromSearcher(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
final Searcher searcher = acquireSearcher("get"); final Searcher searcher = searcherFactory.apply("get");
final Versions.DocIdAndVersion docIdAndVersion; final Versions.DocIdAndVersion docIdAndVersion;
try { try {
docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid()); docIdAndVersion = Versions.loadDocIdAndVersion(searcher.reader(), get.uid());
@ -256,7 +257,11 @@ public abstract class Engine implements Closeable {
} }
} }
public abstract GetResult get(Get get) throws EngineException; public final GetResult get(Get get) throws EngineException {
return get(get, this::acquireSearcher);
}
public abstract GetResult get(Get get, Function<String, Searcher> searcherFactory) throws EngineException;
/** /**
* Returns a new searcher instance. The consumer of this * Returns a new searcher instance. The consumer of this
@ -279,7 +284,7 @@ public abstract class Engine implements Closeable {
try { try {
final Searcher retVal = newSearcher(source, searcher, manager); final Searcher retVal = newSearcher(source, searcher, manager);
success = true; success = true;
return config().getWrappingService().wrap(engineConfig, retVal); return retVal;
} finally { } finally {
if (!success) { if (!success) {
manager.release(searcher); manager.release(searcher);

View File

@ -25,6 +25,7 @@ import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCache;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
@ -32,6 +33,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.TranslogRecoveryPerformer; import org.elasticsearch.index.shard.TranslogRecoveryPerformer;
@ -73,7 +75,7 @@ public final class EngineConfig {
private final boolean forceNewTranslog; private final boolean forceNewTranslog;
private final QueryCache queryCache; private final QueryCache queryCache;
private final QueryCachingPolicy queryCachingPolicy; private final QueryCachingPolicy queryCachingPolicy;
private final IndexSearcherWrappingService wrappingService; private final SetOnce<IndexSearcherWrapper> searcherWrapper = new SetOnce<>();
/** /**
* Index setting for compound file on flush. This setting is realtime updateable. * Index setting for compound file on flush. This setting is realtime updateable.
@ -121,7 +123,7 @@ public final class EngineConfig {
Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, Settings indexSettings, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy,
MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer, MergePolicy mergePolicy, MergeSchedulerConfig mergeSchedulerConfig, Analyzer analyzer,
Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener,
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, IndexSearcherWrappingService wrappingService, TranslogConfig translogConfig) { TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy, TranslogConfig translogConfig) {
this.shardId = shardId; this.shardId = shardId;
this.indexSettings = indexSettings; this.indexSettings = indexSettings;
this.threadPool = threadPool; this.threadPool = threadPool;
@ -135,7 +137,6 @@ public final class EngineConfig {
this.similarity = similarity; this.similarity = similarity;
this.codecService = codecService; this.codecService = codecService;
this.failedEngineListener = failedEngineListener; this.failedEngineListener = failedEngineListener;
this.wrappingService = wrappingService;
this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush); this.compoundOnFlush = indexSettings.getAsBoolean(EngineConfig.INDEX_COMPOUND_ON_FLUSH, compoundOnFlush);
codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME); codecName = indexSettings.get(EngineConfig.INDEX_CODEC_SETTING, EngineConfig.DEFAULT_CODEC_NAME);
indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE; indexingBufferSize = DEFAULT_INDEX_BUFFER_SIZE;
@ -380,10 +381,6 @@ public final class EngineConfig {
return queryCachingPolicy; return queryCachingPolicy;
} }
public IndexSearcherWrappingService getWrappingService() {
return wrappingService;
}
/** /**
* Returns the translog config for this engine * Returns the translog config for this engine
*/ */

View File

@ -1,47 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.engine;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.IndexSearcher;
/**
* Extension point to add custom functionality at request time to the {@link DirectoryReader}
* and {@link IndexSearcher} managed by the {@link Engine}.
*/
public interface IndexSearcherWrapper {
/**
* @param reader The provided directory reader to be wrapped to add custom functionality
* @return a new directory reader wrapping the provided directory reader or if no wrapping was performed
* the provided directory reader
*/
DirectoryReader wrap(DirectoryReader reader);
/**
* @param engineConfig The engine config which can be used to get the query cache and query cache policy from
* when creating a new index searcher
* @param searcher The provided index searcher to be wrapped to add custom functionality
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
* the provided index searcher
*/
IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException;
}

View File

@ -66,6 +66,8 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
import java.util.function.Supplier;
/** /**
* *
@ -303,7 +305,7 @@ public class InternalEngine extends Engine {
} }
@Override @Override
public GetResult get(Get get) throws EngineException { public GetResult get(Get get, Function<String, Searcher> searcherFactory) throws EngineException {
try (ReleasableLock lock = readLock.acquire()) { try (ReleasableLock lock = readLock.acquire()) {
ensureOpen(); ensureOpen();
if (get.realtime()) { if (get.realtime()) {
@ -324,7 +326,7 @@ public class InternalEngine extends Engine {
} }
// no version, get the version from the index, we know that we refresh on flush // no version, get the version from the index, we know that we refresh on flush
return getFromSearcher(get); return getFromSearcher(get, searcherFactory);
} }
} }

View File

@ -35,6 +35,7 @@ import org.elasticsearch.index.translog.Translog;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.function.Function;
/** /**
* ShadowEngine is a specialized engine that only allows read-only operations * ShadowEngine is a specialized engine that only allows read-only operations
@ -168,9 +169,9 @@ public class ShadowEngine extends Engine {
} }
@Override @Override
public GetResult get(Get get) throws EngineException { public GetResult get(Get get, Function<String, Searcher> searcherFacotry) throws EngineException {
// There is no translog, so we can get it directly from the searcher // There is no translog, so we can get it directly from the searcher
return getFromSearcher(get); return getFromSearcher(get, searcherFacotry);
} }
@Override @Override

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.mapper.core; package org.elasticsearch.index.mapper.core;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.document.Field; import org.apache.lucene.document.Field;
@ -145,7 +146,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
if (valueAndBoost.value() == null) { if (valueAndBoost.value() == null) {
count = fieldType().nullValue(); count = fieldType().nullValue();
} else { } else {
count = countPositions(analyzer.analyzer().tokenStream(simpleName(), valueAndBoost.value())); count = countPositions(analyzer, simpleName(), valueAndBoost.value());
} }
addIntegerFields(context, fields, count, valueAndBoost.boost()); addIntegerFields(context, fields, count, valueAndBoost.boost());
} }
@ -156,12 +157,14 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
/** /**
* Count position increments in a token stream. Package private for testing. * Count position increments in a token stream. Package private for testing.
* @param tokenStream token stream to count * @param analyzer analyzer to create token stream
* @param fieldName field name to pass to analyzer
* @param fieldValue field value to pass to analyzer
* @return number of position increments in a token stream * @return number of position increments in a token stream
* @throws IOException if tokenStream throws it * @throws IOException if tokenStream throws it
*/ */
static int countPositions(TokenStream tokenStream) throws IOException { static int countPositions(Analyzer analyzer, String fieldName, String fieldValue) throws IOException {
try { try (TokenStream tokenStream = analyzer.tokenStream(fieldName, fieldValue)) {
int count = 0; int count = 0;
PositionIncrementAttribute position = tokenStream.addAttribute(PositionIncrementAttribute.class); PositionIncrementAttribute position = tokenStream.addAttribute(PositionIncrementAttribute.class);
tokenStream.reset(); tokenStream.reset();
@ -171,8 +174,6 @@ public class TokenCountFieldMapper extends IntegerFieldMapper {
tokenStream.end(); tokenStream.end();
count += position.getPositionIncrement(); count += position.getPositionIncrement();
return count; return count;
} finally {
tokenStream.close();
} }
} }

View File

@ -16,7 +16,7 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.index.percolator.stats; package org.elasticsearch.index.percolator;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.percolator; package org.elasticsearch.index.percolator;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term; import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
@ -27,6 +28,8 @@ import org.apache.lucene.util.BytesRef;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -41,20 +44,18 @@ import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.DocumentTypeListener; import org.elasticsearch.index.mapper.DocumentTypeListener;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
import org.elasticsearch.index.percolator.stats.ShardPercolateService;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.Map; import java.util.Map;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
/** /**
@ -64,39 +65,35 @@ import java.util.concurrent.atomic.AtomicBoolean;
* Once a document type has been created, the real-time percolator will start to listen to write events and update the * Once a document type has been created, the real-time percolator will start to listen to write events and update the
* this registry with queries in real time. * this registry with queries in real time.
*/ */
public class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable{ public final class PercolatorQueriesRegistry extends AbstractIndexShardComponent implements Closeable {
public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string"; public final String MAP_UNMAPPED_FIELDS_AS_STRING = "index.percolator.map_unmapped_fields_as_string";
// This is a shard level service, but these below are index level service: // This is a shard level service, but these below are index level service:
private final IndexQueryParserService queryParserService; private final IndexQueryParserService queryParserService;
private final MapperService mapperService; private final MapperService mapperService;
private final IndicesLifecycle indicesLifecycle;
private final IndexFieldDataService indexFieldDataService; private final IndexFieldDataService indexFieldDataService;
private final ShardIndexingService indexingService; private final ShardIndexingService indexingService;
private final ShardPercolateService shardPercolateService;
private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); private final ConcurrentMap<BytesRef, Query> percolateQueries = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency();
private final ShardLifecycleListener shardLifecycleListener = new ShardLifecycleListener();
private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener(); private final RealTimePercolatorOperationListener realTimePercolatorOperationListener = new RealTimePercolatorOperationListener();
private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener(); private final PercolateTypeListener percolateTypeListener = new PercolateTypeListener();
private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false); private final AtomicBoolean realTimePercolatorEnabled = new AtomicBoolean(false);
private boolean mapUnmappedFieldsAsString; private boolean mapUnmappedFieldsAsString;
private final MeanMetric percolateMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric();
public PercolatorQueriesRegistry(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService queryParserService, public PercolatorQueriesRegistry(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService queryParserService,
ShardIndexingService indexingService, IndicesLifecycle indicesLifecycle, MapperService mapperService, ShardIndexingService indexingService, MapperService mapperService,
IndexFieldDataService indexFieldDataService, ShardPercolateService shardPercolateService) { IndexFieldDataService indexFieldDataService) {
super(shardId, indexSettings); super(shardId, indexSettings);
this.queryParserService = queryParserService; this.queryParserService = queryParserService;
this.mapperService = mapperService; this.mapperService = mapperService;
this.indicesLifecycle = indicesLifecycle;
this.indexingService = indexingService; this.indexingService = indexingService;
this.indexFieldDataService = indexFieldDataService; this.indexFieldDataService = indexFieldDataService;
this.shardPercolateService = shardPercolateService;
this.mapUnmappedFieldsAsString = indexSettings.getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false); this.mapUnmappedFieldsAsString = indexSettings.getAsBoolean(MAP_UNMAPPED_FIELDS_AS_STRING, false);
indicesLifecycle.addListener(shardLifecycleListener);
mapperService.addTypeListener(percolateTypeListener); mapperService.addTypeListener(percolateTypeListener);
} }
@ -107,7 +104,6 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
@Override @Override
public void close() { public void close() {
mapperService.removeTypeListener(percolateTypeListener); mapperService.removeTypeListener(percolateTypeListener);
indicesLifecycle.removeListener(shardLifecycleListener);
indexingService.removeListener(realTimePercolatorOperationListener); indexingService.removeListener(realTimePercolatorOperationListener);
clear(); clear();
} }
@ -116,30 +112,25 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
percolateQueries.clear(); percolateQueries.clear();
} }
void enableRealTimePercolator() { public void enableRealTimePercolator() {
if (realTimePercolatorEnabled.compareAndSet(false, true)) { if (realTimePercolatorEnabled.compareAndSet(false, true)) {
indexingService.addListener(realTimePercolatorOperationListener); indexingService.addListener(realTimePercolatorOperationListener);
} }
} }
void disableRealTimePercolator() {
if (realTimePercolatorEnabled.compareAndSet(true, false)) {
indexingService.removeListener(realTimePercolatorOperationListener);
}
}
public void addPercolateQuery(String idAsString, BytesReference source) { public void addPercolateQuery(String idAsString, BytesReference source) {
Query newquery = parsePercolatorDocument(idAsString, source); Query newquery = parsePercolatorDocument(idAsString, source);
BytesRef id = new BytesRef(idAsString); BytesRef id = new BytesRef(idAsString);
Query previousQuery = percolateQueries.put(id, newquery); percolateQueries.put(id, newquery);
shardPercolateService.addedQuery(id, previousQuery, newquery); numberOfQueries.inc();
} }
public void removePercolateQuery(String idAsString) { public void removePercolateQuery(String idAsString) {
BytesRef id = new BytesRef(idAsString); BytesRef id = new BytesRef(idAsString);
Query query = percolateQueries.remove(id); Query query = percolateQueries.remove(id);
if (query != null) { if (query != null) {
shardPercolateService.removedQuery(id, query); numberOfQueries.dec();
} }
} }
@ -225,55 +216,27 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
enableRealTimePercolator(); enableRealTimePercolator();
} }
} }
} }
private class ShardLifecycleListener extends IndicesLifecycle.Listener { public void loadQueries(IndexReader reader) {
logger.trace("loading percolator queries...");
@Override final int loadedQueries;
public void afterIndexShardCreated(IndexShard indexShard) { try {
if (hasPercolatorType(indexShard)) { Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
enableRealTimePercolator(); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
IndexSearcher indexSearcher = new IndexSearcher(reader);
indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector);
Map<BytesRef, Query> queries = queryCollector.queries();
for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
percolateQueries.put(entry.getKey(), entry.getValue());
numberOfQueries.inc();
} }
loadedQueries = queries.size();
} catch (Exception e) {
throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
} }
logger.debug("done loading [{}] percolator queries", loadedQueries);
@Override
public void beforeIndexShardPostRecovery(IndexShard indexShard) {
if (hasPercolatorType(indexShard)) {
// percolator index has started, fetch what we can from it and initialize the indices
// we have
logger.trace("loading percolator queries for [{}]...", shardId);
int loadedQueries = loadQueries(indexShard);
logger.debug("done loading [{}] percolator queries for [{}]", loadedQueries, shardId);
}
}
private boolean hasPercolatorType(IndexShard indexShard) {
ShardId otherShardId = indexShard.shardId();
return shardId.equals(otherShardId) && mapperService.hasMapping(PercolatorService.TYPE_NAME);
}
private int loadQueries(IndexShard shard) {
shard.refresh("percolator_load_queries");
// NOTE: we acquire the searcher via the engine directly here since this is executed right
// before the shard is marked as POST_RECOVERY
try (Engine.Searcher searcher = shard.engine().acquireSearcher("percolator_load_queries")) {
Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME));
QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService);
IndexSearcher indexSearcher = new IndexSearcher(searcher.reader());
indexSearcher.setQueryCache(null);
indexSearcher.search(query, queryCollector);
Map<BytesRef, Query> queries = queryCollector.queries();
for (Map.Entry<BytesRef, Query> entry : queries.entrySet()) {
Query previousQuery = percolateQueries.put(entry.getKey(), entry.getValue());
shardPercolateService.addedQuery(entry.getKey(), previousQuery, entry.getValue());
}
return queries.size();
} catch (Exception e) {
throw new PercolatorException(shardId.index(), "failed to load queries from percolator index", e);
}
}
} }
private class RealTimePercolatorOperationListener extends IndexingOperationListener { private class RealTimePercolatorOperationListener extends IndexingOperationListener {
@ -320,4 +283,35 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple
} }
} }
} }
public void prePercolate() {
currentMetric.inc();
}
public void postPercolate(long tookInNanos) {
currentMetric.dec();
percolateMetric.inc(tookInNanos);
}
/**
* @return The current metrics
*/
public PercolateStats stats() {
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
}
// Enable when a more efficient manner is found for estimating the size of a Lucene query.
/*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
size += RamEstimator.sizeOf(query);
return size;
}
private static final class RamEstimator {
// we move this into it's own class to exclude it from the forbidden API checks
// it's fine to use here!
static long sizeOf(Query query) {
return RamUsageEstimator.sizeOf(query);
}
}*/
} }

View File

@ -1,93 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.percolator.stats;
import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.metrics.CounterMetric;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.shard.AbstractIndexShardComponent;
import org.elasticsearch.index.shard.ShardId;
import java.util.concurrent.TimeUnit;
/**
* Shard level percolator service that maintains percolator metrics:
* <ul>
* <li> total time spent in percolate api
* <li> the current number of percolate requests
* <li> number of registered percolate queries
* </ul>
*/
public class ShardPercolateService extends AbstractIndexShardComponent {
@Inject
public ShardPercolateService(ShardId shardId, @IndexSettings Settings indexSettings) {
super(shardId, indexSettings);
}
private final MeanMetric percolateMetric = new MeanMetric();
private final CounterMetric currentMetric = new CounterMetric();
private final CounterMetric numberOfQueries = new CounterMetric();
public void prePercolate() {
currentMetric.inc();
}
public void postPercolate(long tookInNanos) {
currentMetric.dec();
percolateMetric.inc(tookInNanos);
}
public void addedQuery(BytesRef id, Query previousQuery, Query newQuery) {
numberOfQueries.inc();
}
public void removedQuery(BytesRef id, Query query) {
numberOfQueries.dec();
}
/**
* @return The current metrics
*/
public PercolateStats stats() {
return new PercolateStats(percolateMetric.count(), TimeUnit.NANOSECONDS.toMillis(percolateMetric.sum()), currentMetric.count(), -1, numberOfQueries.count());
}
// Enable when a more efficient manner is found for estimating the size of a Lucene query.
/*private static long computeSizeInMemory(HashedBytesRef id, Query query) {
long size = (3 * RamUsageEstimator.NUM_BYTES_INT) + RamUsageEstimator.NUM_BYTES_OBJECT_REF + RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + id.bytes.bytes.length;
size += RamEstimator.sizeOf(query);
return size;
}
private static final class RamEstimator {
// we move this into it's own class to exclude it from the forbidden API checks
// it's fine to use here!
static long sizeOf(Query query) {
return RamUsageEstimator.sizeOf(query);
}
}*/
}

View File

@ -22,7 +22,6 @@ package org.elasticsearch.index.query;
import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
@ -47,9 +46,19 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
static final IdsQueryBuilder PROTOTYPE = new IdsQueryBuilder(); static final IdsQueryBuilder PROTOTYPE = new IdsQueryBuilder();
/** /**
* Creates a new IdsQueryBuilder by optionally providing the types of the documents to look for * Creates a new IdsQueryBuilder without providing the types of the documents to look for
*/ */
public IdsQueryBuilder(@Nullable String... types) { public IdsQueryBuilder() {
this.types = new String[0];
}
/**
* Creates a new IdsQueryBuilder by providing the types of the documents to look for
*/
public IdsQueryBuilder(String... types) {
if (types == null) {
throw new IllegalArgumentException("[ids] types cannot be null");
}
this.types = types; this.types = types;
} }
@ -64,32 +73,13 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
* Adds ids to the query. * Adds ids to the query.
*/ */
public IdsQueryBuilder addIds(String... ids) { public IdsQueryBuilder addIds(String... ids) {
if (ids == null) {
throw new IllegalArgumentException("[ids] ids cannot be null");
}
Collections.addAll(this.ids, ids); Collections.addAll(this.ids, ids);
return this; return this;
} }
/**
* Adds ids to the query.
*/
public IdsQueryBuilder addIds(Collection<String> ids) {
this.ids.addAll(ids);
return this;
}
/**
* Adds ids to the filter.
*/
public IdsQueryBuilder ids(String... ids) {
return addIds(ids);
}
/**
* Adds ids to the filter.
*/
public IdsQueryBuilder ids(Collection<String> ids) {
return addIds(ids);
}
/** /**
* Returns the ids for the query. * Returns the ids for the query.
*/ */
@ -100,13 +90,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
@Override @Override
protected void doXContent(XContentBuilder builder, Params params) throws IOException { protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME); builder.startObject(NAME);
if (types != null) { builder.array("types", types);
if (types.length == 1) {
builder.field("type", types[0]);
} else {
builder.array("types", types);
}
}
builder.startArray("values"); builder.startArray("values");
for (String value : ids) { for (String value : ids) {
builder.value(value); builder.value(value);
@ -128,7 +112,7 @@ public class IdsQueryBuilder extends AbstractQueryBuilder<IdsQueryBuilder> {
query = Queries.newMatchNoDocsQuery(); query = Queries.newMatchNoDocsQuery();
} else { } else {
Collection<String> typesForQuery; Collection<String> typesForQuery;
if (types == null || types.length == 0) { if (types.length == 0) {
typesForQuery = context.queryTypes(); typesForQuery = context.queryTypes();
} else if (types.length == 1 && MetaData.ALL.equals(types[0])) { } else if (types.length == 1 && MetaData.ALL.equals(types[0])) {
typesForQuery = context.mapperService().types(); typesForQuery = context.mapperService().types();

View File

@ -197,15 +197,6 @@ public class IndexQueryParserService extends AbstractIndexComponent {
} }
} }
@Nullable
public Query parseInnerQuery(QueryShardContext context) throws IOException {
Query query = context.parseContext().parseInnerQueryBuilder().toQuery(context);
if (query == null) {
query = Queries.newMatchNoDocsQuery();
}
return query;
}
public QueryShardContext getShardContext() { public QueryShardContext getShardContext() {
return cache.get(); return cache.get();
} }
@ -258,16 +249,41 @@ public class IndexQueryParserService extends AbstractIndexComponent {
context.reset(parser); context.reset(parser);
try { try {
context.parseFieldMatcher(parseFieldMatcher); context.parseFieldMatcher(parseFieldMatcher);
Query query = context.parseContext().parseInnerQueryBuilder().toQuery(context); Query query = parseInnerQuery(context);
if (query == null) {
query = Queries.newMatchNoDocsQuery();
}
return new ParsedQuery(query, context.copyNamedQueries()); return new ParsedQuery(query, context.copyNamedQueries());
} finally { } finally {
context.reset(null); context.reset(null);
} }
} }
public Query parseInnerQuery(QueryShardContext context) throws IOException {
return toQuery(context.parseContext().parseInnerQueryBuilder(), context);
}
public ParsedQuery toQuery(QueryBuilder<?> queryBuilder) {
QueryShardContext context = cache.get();
context.reset();
context.parseFieldMatcher(parseFieldMatcher);
try {
Query query = toQuery(queryBuilder, context);
return new ParsedQuery(query, context.copyNamedQueries());
} catch(QueryShardException | ParsingException e ) {
throw e;
} catch(Exception e) {
throw new QueryShardException(context, "failed to create query: {}", e, queryBuilder);
} finally {
context.reset();
}
}
private static Query toQuery(QueryBuilder<?> queryBuilder, QueryShardContext context) throws IOException {
Query query = queryBuilder.toQuery(context);
if (query == null) {
query = Queries.newMatchNoDocsQuery();
}
return query;
}
public ParseFieldMatcher parseFieldMatcher() { public ParseFieldMatcher parseFieldMatcher() {
return parseFieldMatcher; return parseFieldMatcher;
} }

View File

@ -19,7 +19,6 @@
package org.elasticsearch.index.query; package org.elasticsearch.index.query;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.ShapeRelation;
@ -109,12 +108,19 @@ public abstract class QueryBuilders {
return new DisMaxQueryBuilder(); return new DisMaxQueryBuilder();
} }
/**
* Constructs a query that will match only specific ids within all types.
*/
public static IdsQueryBuilder idsQuery() {
return new IdsQueryBuilder();
}
/** /**
* Constructs a query that will match only specific ids within types. * Constructs a query that will match only specific ids within types.
* *
* @param types The mapping/doc type * @param types The mapping/doc type
*/ */
public static IdsQueryBuilder idsQuery(@Nullable String... types) { public static IdsQueryBuilder idsQuery(String... types) {
return new IdsQueryBuilder(types); return new IdsQueryBuilder(types);
} }

View File

@ -17,59 +17,47 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.index.engine; package org.elasticsearch.index.shard;
import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.Engine.Searcher; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import java.util.Set; import java.io.IOException;
/** /**
* Service responsible for wrapping the {@link DirectoryReader} and {@link IndexSearcher} of a {@link Searcher} via the * Extension point to add custom functionality at request time to the {@link DirectoryReader}
* configured {@link IndexSearcherWrapper} instance. This allows custom functionally to be added the {@link Searcher} * and {@link IndexSearcher} managed by the {@link Engine}.
* before being used to do an operation (search, get, field stats etc.)
*/ */
// TODO: This needs extension point is a bit hacky now, because the IndexSearch from the engine can only be wrapped once, public interface IndexSearcherWrapper {
// if we allowed the IndexSearcher to be wrapped multiple times then a custom IndexSearcherWrapper needs have good
// control over its location in the wrapping chain
public final class IndexSearcherWrappingService {
private final IndexSearcherWrapper wrapper; /**
* @param reader The provided directory reader to be wrapped to add custom functionality
* @return a new directory reader wrapping the provided directory reader or if no wrapping was performed
* the provided directory reader
*/
DirectoryReader wrap(DirectoryReader reader) throws IOException;
// for unit tests: /**
IndexSearcherWrappingService() { * @param engineConfig The engine config which can be used to get the query cache and query cache policy from
this.wrapper = null; * when creating a new index searcher
} * @param searcher The provided index searcher to be wrapped to add custom functionality
* @return a new index searcher wrapping the provided index searcher or if no wrapping was performed
@Inject * the provided index searcher
// Use a Set parameter here, because constructor parameter can't be optional */
// and I prefer to keep the `wrapper` field final. IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws IOException;
public IndexSearcherWrappingService(Set<IndexSearcherWrapper> wrappers) {
if (wrappers.size() > 1) {
throw new IllegalStateException("wrapping of the index searcher by more than one wrappers is forbidden, found the following wrappers [" + wrappers + "]");
}
if (wrappers.isEmpty()) {
this.wrapper = null;
} else {
this.wrapper = wrappers.iterator().next();
}
}
/** /**
* If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher
* gets wrapped and a new {@link Searcher} instances is returned, otherwise the provided {@link Searcher} is returned. * gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned.
* *
* This is invoked each time a {@link Searcher} is requested to do an operation. (for example search) * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search)
*/ */
public Searcher wrap(EngineConfig engineConfig, final Searcher engineSearcher) throws EngineException { default Engine.Searcher wrap(EngineConfig engineConfig, Engine.Searcher engineSearcher) throws IOException {
if (wrapper == null) { DirectoryReader reader = wrap((DirectoryReader) engineSearcher.reader());
return engineSearcher;
}
DirectoryReader reader = wrapper.wrap((DirectoryReader) engineSearcher.reader());
IndexSearcher innerIndexSearcher = new IndexSearcher(reader); IndexSearcher innerIndexSearcher = new IndexSearcher(reader);
innerIndexSearcher.setQueryCache(engineConfig.getQueryCache()); innerIndexSearcher.setQueryCache(engineConfig.getQueryCache());
innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy()); innerIndexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
@ -77,12 +65,11 @@ public final class IndexSearcherWrappingService {
// TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point
// For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten
// This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times
IndexSearcher indexSearcher = wrapper.wrap(engineConfig, innerIndexSearcher); IndexSearcher indexSearcher = wrap(engineConfig, innerIndexSearcher);
if (reader == engineSearcher.reader() && indexSearcher == innerIndexSearcher) { if (reader == engineSearcher.reader() && indexSearcher == innerIndexSearcher) {
return engineSearcher; return engineSearcher;
} else { } else {
return new Engine.Searcher(engineSearcher.source(), indexSearcher) { return new Engine.Searcher(engineSearcher.source(), indexSearcher) {
@Override @Override
public void close() throws ElasticsearchException { public void close() throws ElasticsearchException {
engineSearcher.close(); engineSearcher.close();

View File

@ -20,10 +20,7 @@
package org.elasticsearch.index.shard; package org.elasticsearch.index.shard;
import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.codecs.PostingsFormat;
import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.*;
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
import org.apache.lucene.index.SnapshotDeletionPolicy;
import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.QueryCachingPolicy;
import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.apache.lucene.search.UsageTrackingQueryCachingPolicy;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
@ -36,6 +33,7 @@ import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.action.termvectors.TermVectorsRequest; import org.elasticsearch.action.termvectors.TermVectorsRequest;
import org.elasticsearch.action.termvectors.TermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsResponse;
import org.elasticsearch.bootstrap.Elasticsearch;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
@ -51,11 +49,11 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AbstractRunnable;
import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.gateway.MetaDataStateFormat; import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexServicesProvider;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.IndexCache;
@ -75,8 +73,8 @@ import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.indexing.ShardIndexingService; import org.elasticsearch.index.indexing.ShardIndexingService;
import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.*;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.percolator.stats.ShardPercolateService;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
@ -99,12 +97,12 @@ import org.elasticsearch.index.translog.TranslogStats;
import org.elasticsearch.index.translog.TranslogWriter; import org.elasticsearch.index.translog.TranslogWriter;
import org.elasticsearch.index.warmer.ShardIndexWarmerService; import org.elasticsearch.index.warmer.ShardIndexWarmerService;
import org.elasticsearch.index.warmer.WarmerStats; import org.elasticsearch.index.warmer.WarmerStats;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.InternalIndicesLifecycle; import org.elasticsearch.indices.InternalIndicesLifecycle;
import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.indices.recovery.RecoveryFailedException; import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat; import org.elasticsearch.search.suggest.completion.Completion090PostingsFormat;
import org.elasticsearch.search.suggest.completion.CompletionStats; import org.elasticsearch.search.suggest.completion.CompletionStats;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -137,7 +135,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
private final ShardRequestCache shardQueryCache; private final ShardRequestCache shardQueryCache;
private final ShardFieldData shardFieldData; private final ShardFieldData shardFieldData;
private final PercolatorQueriesRegistry percolatorQueriesRegistry; private final PercolatorQueriesRegistry percolatorQueriesRegistry;
private final ShardPercolateService shardPercolateService;
private final TermVectorsService termVectorsService; private final TermVectorsService termVectorsService;
private final IndexFieldDataService indexFieldDataService; private final IndexFieldDataService indexFieldDataService;
private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric(); private final ShardSuggestMetric shardSuggestMetric = new ShardSuggestMetric();
@ -161,7 +158,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
protected volatile IndexShardState state; protected volatile IndexShardState state;
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>(); protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
protected final EngineFactory engineFactory; protected final EngineFactory engineFactory;
private final IndexSearcherWrappingService wrappingService;
@Nullable @Nullable
private RecoveryState recoveryState; private RecoveryState recoveryState;
@ -190,42 +186,36 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
private final IndexShardOperationCounter indexShardOperationCounter; private final IndexShardOperationCounter indexShardOperationCounter;
private EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY); private final EnumSet<IndexShardState> readAllowedStates = EnumSet.of(IndexShardState.STARTED, IndexShardState.RELOCATED, IndexShardState.POST_RECOVERY);
private final IndexSearcherWrapper searcherWrapper;
@Inject @Inject
public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, IndicesLifecycle indicesLifecycle, Store store, public IndexShard(ShardId shardId, @IndexSettings Settings indexSettings, ShardPath path, Store store, IndexServicesProvider provider) {
ThreadPool threadPool, MapperService mapperService, IndexQueryParserService queryParserService, IndexCache indexCache, IndexAliasesService indexAliasesService,
IndicesQueryCache indicesQueryCache, CodecService codecService,
TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
@Nullable IndicesWarmer warmer, SimilarityService similarityService, EngineFactory factory,
ShardPath path, BigArrays bigArrays, IndexSearcherWrappingService wrappingService) {
super(shardId, indexSettings); super(shardId, indexSettings);
this.codecService = codecService; this.codecService = provider.getCodecService();
this.warmer = warmer; this.warmer = provider.getWarmer();
this.deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); this.deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
this.similarityService = similarityService; this.similarityService = provider.getSimilarityService();
this.wrappingService = wrappingService;
Objects.requireNonNull(store, "Store must be provided to the index shard"); Objects.requireNonNull(store, "Store must be provided to the index shard");
this.engineFactory = factory; this.engineFactory = provider.getFactory();
this.indicesLifecycle = (InternalIndicesLifecycle) indicesLifecycle; this.indicesLifecycle = (InternalIndicesLifecycle) provider.getIndicesLifecycle();
this.store = store; this.store = store;
this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings); this.mergeSchedulerConfig = new MergeSchedulerConfig(indexSettings);
this.threadPool = threadPool; this.threadPool = provider.getThreadPool();
this.mapperService = mapperService; this.mapperService = provider.getMapperService();
this.queryParserService = queryParserService; this.queryParserService = provider.getQueryParserService();
this.indexCache = indexCache; this.indexCache = provider.getIndexCache();
this.indexAliasesService = indexAliasesService; this.indexAliasesService = provider.getIndexAliasesService();
this.indexingService = new ShardIndexingService(shardId, indexSettings); this.indexingService = new ShardIndexingService(shardId, indexSettings);
this.getService = new ShardGetService(this, mapperService); this.getService = new ShardGetService(this, mapperService);
this.termVectorsService = termVectorsService; this.termVectorsService = provider.getTermVectorsService();
this.searchService = new ShardSearchStats(indexSettings); this.searchService = new ShardSearchStats(indexSettings);
this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings); this.shardWarmerService = new ShardIndexWarmerService(shardId, indexSettings);
this.indicesQueryCache = indicesQueryCache; this.indicesQueryCache = provider.getIndicesQueryCache();
this.shardQueryCache = new ShardRequestCache(shardId, indexSettings); this.shardQueryCache = new ShardRequestCache(shardId, indexSettings);
this.shardFieldData = new ShardFieldData(); this.shardFieldData = new ShardFieldData();
this.shardPercolateService = new ShardPercolateService(shardId, indexSettings); this.indexFieldDataService = provider.getIndexFieldDataService();
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, indicesLifecycle, mapperService, indexFieldDataService, shardPercolateService);
this.indexFieldDataService = indexFieldDataService;
this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings); this.shardBitsetFilterCache = new ShardBitsetFilterCache(shardId, indexSettings);
state = IndexShardState.CREATED; state = IndexShardState.CREATED;
this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, EngineConfig.DEFAULT_REFRESH_INTERVAL); this.refreshInterval = indexSettings.getAsTime(INDEX_REFRESH_INTERVAL, EngineConfig.DEFAULT_REFRESH_INTERVAL);
@ -238,7 +228,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false"); this.checkIndexOnStartup = indexSettings.get("index.shard.check_on_startup", "false");
this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, indexSettings, Translog.Durabilty.REQUEST), this.translogConfig = new TranslogConfig(shardId, shardPath().resolveTranslog(), indexSettings, getFromSettings(logger, indexSettings, Translog.Durabilty.REQUEST),
bigArrays, threadPool); provider.getBigArrays(), threadPool);
final QueryCachingPolicy cachingPolicy; final QueryCachingPolicy cachingPolicy;
// the query cache is a node-level thing, however we want the most popular filters // the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis // to be computed on a per-shard basis
@ -252,6 +242,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB)); this.flushThresholdSize = indexSettings.getAsBytesSize(INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE, new ByteSizeValue(512, ByteSizeUnit.MB));
this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false); this.disableFlush = indexSettings.getAsBoolean(INDEX_TRANSLOG_DISABLE_FLUSH, false);
this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId); this.indexShardOperationCounter = new IndexShardOperationCounter(logger, shardId);
this.searcherWrapper = provider.getIndexSearcherWrapper();
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryParserService, indexingService, mapperService, indexFieldDataService);
if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
percolatorQueriesRegistry.enableRealTimePercolator();
}
} }
public Store store() { public Store store() {
@ -344,7 +339,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) { if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
// we want to refresh *before* we move to internal STARTED state // we want to refresh *before* we move to internal STARTED state
try { try {
engine().refresh("cluster_state_started"); getEngine().refresh("cluster_state_started");
} catch (Throwable t) { } catch (Throwable t) {
logger.debug("failed to refresh due to move to cluster wide started", t); logger.debug("failed to refresh due to move to cluster wide started", t);
} }
@ -453,7 +448,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", create.type(), create.id(), create.docs()); logger.trace("index [{}][{}]{}", create.type(), create.id(), create.docs());
} }
engine().create(create); getEngine().create(create);
create.endTime(System.nanoTime()); create.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postCreate(create, ex); indexingService.postCreate(create, ex);
@ -492,7 +487,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs());
} }
created = engine().index(index); created = getEngine().index(index);
index.endTime(System.nanoTime()); index.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postIndex(index, ex); indexingService.postIndex(index, ex);
@ -515,7 +510,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("delete [{}]", delete.uid().text()); logger.trace("delete [{}]", delete.uid().text());
} }
engine().delete(delete); getEngine().delete(delete);
delete.endTime(System.nanoTime()); delete.endTime(System.nanoTime());
} catch (Throwable ex) { } catch (Throwable ex) {
indexingService.postDelete(delete, ex); indexingService.postDelete(delete, ex);
@ -526,7 +521,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
public Engine.GetResult get(Engine.Get get) { public Engine.GetResult get(Engine.Get get) {
readAllowed(); readAllowed();
return engine().get(get); return getEngine().get(get, this::acquireSearcher);
} }
public void refresh(String source) { public void refresh(String source) {
@ -535,7 +530,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
logger.trace("refresh with source: {}", source); logger.trace("refresh with source: {}", source);
} }
long time = System.nanoTime(); long time = System.nanoTime();
engine().refresh(source); getEngine().refresh(source);
refreshMetric.inc(System.nanoTime() - time); refreshMetric.inc(System.nanoTime() - time);
} }
@ -561,7 +556,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
@Nullable @Nullable
public CommitStats commitStats() { public CommitStats commitStats() {
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
return engine == null ? null : engine.commitStats(); return engine == null ? null : engine.commitStats();
} }
@ -588,7 +583,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
public MergeStats mergeStats() { public MergeStats mergeStats() {
final Engine engine = engineUnsafe(); final Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
return new MergeStats(); return new MergeStats();
} }
@ -596,7 +591,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
public SegmentsStats segmentStats() { public SegmentsStats segmentStats() {
SegmentsStats segmentsStats = engine().segmentsStats(); SegmentsStats segmentsStats = getEngine().segmentsStats();
segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes()); segmentsStats.addBitsetMemoryInBytes(shardBitsetFilterCache.getMemorySizeInBytes());
return segmentsStats; return segmentsStats;
} }
@ -621,12 +616,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
return percolatorQueriesRegistry; return percolatorQueriesRegistry;
} }
public ShardPercolateService shardPercolateService() {
return shardPercolateService;
}
public TranslogStats translogStats() { public TranslogStats translogStats() {
return engine().getTranslog().stats(); return getEngine().getTranslog().stats();
} }
public SuggestStats suggestStats() { public SuggestStats suggestStats() {
@ -651,7 +642,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) { public Engine.SyncedFlushResult syncFlush(String syncId, Engine.CommitId expectedCommitId) {
verifyStartedOrRecovering(); verifyStartedOrRecovering();
logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId); logger.trace("trying to sync flush. sync id [{}]. expected commit id [{}]]", syncId, expectedCommitId);
return engine().syncFlush(syncId, expectedCommitId); return getEngine().syncFlush(syncId, expectedCommitId);
} }
public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException { public Engine.CommitId flush(FlushRequest request) throws ElasticsearchException {
@ -666,7 +657,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
verifyStartedOrRecovering(); verifyStartedOrRecovering();
long time = System.nanoTime(); long time = System.nanoTime();
Engine.CommitId commitId = engine().flush(force, waitIfOngoing); Engine.CommitId commitId = getEngine().flush(force, waitIfOngoing);
flushMetric.inc(System.nanoTime() - time); flushMetric.inc(System.nanoTime() - time);
return commitId; return commitId;
@ -677,7 +668,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("optimize with {}", optimize); logger.trace("optimize with {}", optimize);
} }
engine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false); getEngine().forceMerge(optimize.flush(), optimize.maxNumSegments(), optimize.onlyExpungeDeletes(), false, false);
} }
/** /**
@ -690,7 +681,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion(); org.apache.lucene.util.Version previousVersion = minimumCompatibleVersion();
// we just want to upgrade the segments, not actually optimize to a single segment // we just want to upgrade the segments, not actually optimize to a single segment
engine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable getEngine().forceMerge(true, // we need to flush at the end to make sure the upgrade is durable
Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment Integer.MAX_VALUE, // we just want to upgrade the segments, not actually optimize to a single segment
false, true, upgrade.upgradeOnlyAncientSegments()); false, true, upgrade.upgradeOnlyAncientSegments());
org.apache.lucene.util.Version version = minimumCompatibleVersion(); org.apache.lucene.util.Version version = minimumCompatibleVersion();
@ -703,7 +694,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
public org.apache.lucene.util.Version minimumCompatibleVersion() { public org.apache.lucene.util.Version minimumCompatibleVersion() {
org.apache.lucene.util.Version luceneVersion = null; org.apache.lucene.util.Version luceneVersion = null;
for (Segment segment : engine().segments(false)) { for (Segment segment : getEngine().segments(false)) {
if (luceneVersion == null || luceneVersion.onOrAfter(segment.getVersion())) { if (luceneVersion == null || luceneVersion.onOrAfter(segment.getVersion())) {
luceneVersion = segment.getVersion(); luceneVersion = segment.getVersion();
} }
@ -721,7 +712,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
IndexShardState state = this.state; // one time volatile read IndexShardState state = this.state; // one time volatile read
// we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine // we allow snapshot on closed index shard, since we want to do one after we close the shard and before we close the engine
if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) { if (state == IndexShardState.STARTED || state == IndexShardState.RELOCATED || state == IndexShardState.CLOSED) {
return engine().snapshotIndex(flushFirst); return getEngine().snapshotIndex(flushFirst);
} else { } else {
throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed"); throw new IllegalIndexShardStateException(shardId, state, "snapshot is not allowed");
} }
@ -742,12 +733,17 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
public void failShard(String reason, @Nullable Throwable e) { public void failShard(String reason, @Nullable Throwable e) {
// fail the engine. This will cause this shard to also be removed from the node's index service. // fail the engine. This will cause this shard to also be removed from the node's index service.
engine().failEngine(reason, e); getEngine().failEngine(reason, e);
} }
public Engine.Searcher acquireSearcher(String source) { public Engine.Searcher acquireSearcher(String source) {
readAllowed(); readAllowed();
return engine().acquireSearcher(source); Engine engine = getEngine();
try {
return searcherWrapper == null ? engine.acquireSearcher(source) : searcherWrapper.wrap(engineConfig, engine.acquireSearcher(source));
} catch (IOException ex) {
throw new ElasticsearchException("failed to wrap searcher", ex);
}
} }
public void close(String reason, boolean flushEngine) throws IOException { public void close(String reason, boolean flushEngine) throws IOException {
@ -774,8 +770,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
} }
public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
indicesLifecycle.beforeIndexShardPostRecovery(this); if (mapperService.hasMapping(PercolatorService.TYPE_NAME)) {
refresh("percolator_load_queries");
try (Engine.Searcher searcher = getEngine().acquireSearcher("percolator_load_queries")) {
this.percolatorQueriesRegistry.loadQueries(searcher.reader());
}
}
synchronized (mutex) { synchronized (mutex) {
if (state == IndexShardState.CLOSED) { if (state == IndexShardState.CLOSED) {
throw new IndexShardClosedException(shardId); throw new IndexShardClosedException(shardId);
@ -789,7 +791,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
recoveryState.setStage(RecoveryState.Stage.DONE); recoveryState.setStage(RecoveryState.Stage.DONE);
changeState(IndexShardState.POST_RECOVERY, reason); changeState(IndexShardState.POST_RECOVERY, reason);
} }
indicesLifecycle.afterIndexShardPostRecovery(this);
return this; return this;
} }
@ -813,7 +814,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
if (state != IndexShardState.RECOVERING) { if (state != IndexShardState.RECOVERING) {
throw new IndexShardNotRecoveringException(shardId, state); throw new IndexShardNotRecoveringException(shardId, state);
} }
return engineConfig.getTranslogRecoveryPerformer().performBatchRecovery(engine(), operations); return engineConfig.getTranslogRecoveryPerformer().performBatchRecovery(getEngine(), operations);
} }
/** /**
@ -852,7 +853,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
* a remote peer. * a remote peer.
*/ */
public void skipTranslogRecovery() throws IOException { public void skipTranslogRecovery() throws IOException {
assert engineUnsafe() == null : "engine was already created"; assert getEngineOrNull() == null : "engine was already created";
internalPerformTranslogRecovery(true, true); internalPerformTranslogRecovery(true, true);
assert recoveryState.getTranslog().recoveredOperations() == 0; assert recoveryState.getTranslog().recoveredOperations() == 0;
} }
@ -892,7 +893,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
public void finalizeRecovery() { public void finalizeRecovery() {
recoveryState().setStage(RecoveryState.Stage.FINALIZE); recoveryState().setStage(RecoveryState.Stage.FINALIZE);
engine().refresh("recovery_finalization"); getEngine().refresh("recovery_finalization");
startScheduledTasksIfNeeded(); startScheduledTasksIfNeeded();
engineConfig.setEnableGcDeletes(true); engineConfig.setEnableGcDeletes(true);
} }
@ -982,7 +983,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
config.setIndexingBufferSize(shardIndexingBufferSize); config.setIndexingBufferSize(shardIndexingBufferSize);
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
logger.debug("updateBufferSize: engine is closed; skipping"); logger.debug("updateBufferSize: engine is closed; skipping");
return; return;
@ -1057,7 +1058,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
boolean shouldFlush() { boolean shouldFlush() {
if (disableFlush == false) { if (disableFlush == false) {
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
if (engine != null) { if (engine != null) {
try { try {
Translog translog = engine.getTranslog(); Translog translog = engine.getTranslog();
@ -1171,15 +1172,37 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
searchService.onRefreshSettings(settings); searchService.onRefreshSettings(settings);
indexingService.onRefreshSettings(settings); indexingService.onRefreshSettings(settings);
if (change) { if (change) {
engine().onSettingsChanged(); getEngine().onSettingsChanged();
} }
} }
public Translog.View acquireTranslogView() {
Engine engine = getEngine();
assert engine.getTranslog() != null : "translog must not be null";
return engine.getTranslog().newView();
}
public List<Segment> segments(boolean verbose) {
return getEngine().segments(verbose);
}
public void flushAndCloseEngine() throws IOException {
getEngine().flushAndClose();
}
public Translog getTranslog() {
return getEngine().getTranslog();
}
public PercolateStats percolateStats() {
return percolatorQueriesRegistry.stats();
}
class EngineRefresher implements Runnable { class EngineRefresher implements Runnable {
@Override @Override
public void run() { public void run() {
// we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule // we check before if a refresh is needed, if not, we reschedule, otherwise, we fork, refresh, and then reschedule
if (!engine().refreshNeeded()) { if (!getEngine().refreshNeeded()) {
reschedule(); reschedule();
return; return;
} }
@ -1187,7 +1210,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
@Override @Override
public void run() { public void run() {
try { try {
if (engine().refreshNeeded()) { if (getEngine().refreshNeeded()) {
refresh("schedule"); refresh("schedule");
} }
} catch (EngineClosedException e) { } catch (EngineClosedException e) {
@ -1300,8 +1323,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS))); recoveryState.getVerifyIndex().checkIndexTime(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - timeNS)));
} }
public Engine engine() { Engine getEngine() {
Engine engine = engineUnsafe(); Engine engine = getEngineOrNull();
if (engine == null) { if (engine == null) {
throw new EngineClosedException(shardId); throw new EngineClosedException(shardId);
} }
@ -1310,7 +1333,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
/** NOTE: returns null if engine is not yet started (e.g. recovery phase 1, copying over index files, is still running), or if engine is /** NOTE: returns null if engine is not yet started (e.g. recovery phase 1, copying over index files, is still running), or if engine is
* closed. */ * closed. */
protected Engine engineUnsafe() { protected Engine getEngineOrNull() {
return this.currentEngineReference.get(); return this.currentEngineReference.get();
} }
@ -1403,7 +1426,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
}; };
return new EngineConfig(shardId, return new EngineConfig(shardId,
threadPool, indexingService, indexSettings, warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig, threadPool, indexingService, indexSettings, warmer, store, deletionPolicy, mergePolicyConfig.getMergePolicy(), mergeSchedulerConfig,
mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, wrappingService, translogConfig); mapperService.indexAnalyzer(), similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig);
} }
private static class IndexShardOperationCounter extends AbstractRefCounted { private static class IndexShardOperationCounter extends AbstractRefCounted {
@ -1444,7 +1467,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
*/ */
public void sync(Translog.Location location) { public void sync(Translog.Location location) {
try { try {
final Engine engine = engine(); final Engine engine = getEngine();
engine.getTranslog().ensureSynced(location); engine.getTranslog().ensureSynced(location);
} catch (EngineClosedException ex) { } catch (EngineClosedException ex) {
// that's fine since we already synced everything on engine close - this also is conform with the methods documentation // that's fine since we already synced everything on engine close - this also is conform with the methods documentation
@ -1515,4 +1538,5 @@ public class IndexShard extends AbstractIndexShardComponent implements IndexSett
} }
return false; return false;
} }
} }

View File

@ -1,76 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.IndexSearcherWrapper;
import org.elasticsearch.index.engine.IndexSearcherWrappingService;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
/**
* The {@code IndexShardModule} module is responsible for binding the correct
* shard id, index shard, engine factory, and warming service for a newly
* created shard.
*/
public class IndexShardModule extends AbstractModule {
private final ShardId shardId;
private final Settings settings;
private final boolean primary;
// pkg private so tests can mock
Class<? extends EngineFactory> engineFactoryImpl = InternalEngineFactory.class;
public IndexShardModule(ShardId shardId, boolean primary, Settings settings) {
this.settings = settings;
this.shardId = shardId;
this.primary = primary;
if (settings.get("index.translog.type") != null) {
throw new IllegalStateException("a custom translog type is no longer supported. got [" + settings.get("index.translog.type") + "]");
}
}
/** Return true if a shadow engine should be used */
protected boolean useShadowEngine() {
return primary == false && IndexMetaData.isIndexUsingShadowReplicas(settings);
}
@Override
protected void configure() {
bind(ShardId.class).toInstance(shardId);
if (useShadowEngine()) {
bind(IndexShard.class).to(ShadowIndexShard.class).asEagerSingleton();
} else {
bind(IndexShard.class).asEagerSingleton();
}
bind(EngineFactory.class).to(engineFactoryImpl);
bind(IndexSearcherWrappingService.class).asEagerSingleton();
// this injects an empty set in IndexSearcherWrappingService, otherwise guice can't construct IndexSearcherWrappingService
Multibinder<IndexSearcherWrapper> multibinder
= Multibinder.newSetBinder(binder(), IndexSearcherWrapper.class);
}
}

View File

@ -18,32 +18,14 @@
*/ */
package org.elasticsearch.index.shard; package org.elasticsearch.index.shard;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexServicesProvider;
import org.elasticsearch.index.aliases.IndexAliasesService;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.codec.CodecService;
import org.elasticsearch.index.engine.IndexSearcherWrappingService;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.termvectors.TermVectorsService;
import org.elasticsearch.indices.IndicesLifecycle;
import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.cache.query.IndicesQueryCache;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException; import java.io.IOException;
@ -55,23 +37,8 @@ import java.io.IOException;
*/ */
public final class ShadowIndexShard extends IndexShard { public final class ShadowIndexShard extends IndexShard {
@Inject public ShadowIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, ShardPath path, Store store, IndexServicesProvider provider) throws IOException {
public ShadowIndexShard(ShardId shardId, @IndexSettings Settings indexSettings, super(shardId, indexSettings, path, store, provider);
IndicesLifecycle indicesLifecycle, Store store,
ThreadPool threadPool, MapperService mapperService,
IndexQueryParserService queryParserService, IndexCache indexCache,
IndexAliasesService indexAliasesService, IndicesQueryCache indicesQueryCache,
CodecService codecService, TermVectorsService termVectorsService, IndexFieldDataService indexFieldDataService,
@Nullable IndicesWarmer warmer,
SimilarityService similarityService,
EngineFactory factory,
ShardPath path, BigArrays bigArrays, IndexSearcherWrappingService wrappingService) throws IOException {
super(shardId, indexSettings, indicesLifecycle, store,
threadPool, mapperService, queryParserService, indexCache, indexAliasesService,
indicesQueryCache, codecService,
termVectorsService, indexFieldDataService,
warmer, similarityService,
factory, path, bigArrays, wrappingService);
} }
/** /**

View File

@ -30,6 +30,7 @@ import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterService;
@ -108,6 +109,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
private RateLimitingInputStream.Listener snapshotThrottleListener; private RateLimitingInputStream.Listener snapshotThrottleListener;
private RateLimitingInputStream.Listener restoreThrottleListener;
private boolean compress; private boolean compress;
private final ParseFieldMatcher parseFieldMatcher; private final ParseFieldMatcher parseFieldMatcher;
@ -162,6 +165,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
this.restoreRateLimiter = restoreRateLimiter; this.restoreRateLimiter = restoreRateLimiter;
this.rateLimiterListener = rateLimiterListener; this.rateLimiterListener = rateLimiterListener;
this.snapshotThrottleListener = nanos -> rateLimiterListener.onSnapshotPause(nanos); this.snapshotThrottleListener = nanos -> rateLimiterListener.onSnapshotPause(nanos);
this.restoreThrottleListener = nanos -> rateLimiterListener.onRestorePause(nanos);
this.compress = compress; this.compress = compress;
indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress()); indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress());
indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher); indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher);
@ -501,7 +505,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) { public SnapshotContext(SnapshotId snapshotId, ShardId shardId, IndexShardSnapshotStatus snapshotStatus) {
super(snapshotId, Version.CURRENT, shardId); super(snapshotId, Version.CURRENT, shardId);
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
store = indexService.shard(shardId.id()).store(); store = indexService.getShardOrNull(shardId.id()).store();
this.snapshotStatus = snapshotStatus; this.snapshotStatus = snapshotStatus;
} }
@ -785,7 +789,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
*/ */
public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) { public RestoreContext(SnapshotId snapshotId, Version version, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
super(snapshotId, version, shardId, snapshotShardId); super(snapshotId, version, shardId, snapshotShardId);
store = indicesService.indexServiceSafe(shardId.getIndex()).shard(shardId.id()).store(); store = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id()).store();
this.recoveryState = recoveryState; this.recoveryState = recoveryState;
} }
@ -906,16 +910,20 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
*/ */
private void restoreFile(final FileInfo fileInfo) throws IOException { private void restoreFile(final FileInfo fileInfo) throws IOException {
boolean success = false; boolean success = false;
try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
try (InputStream partSliceStream = new PartSliceStream(blobContainer, fileInfo)) {
final InputStream stream;
if (restoreRateLimiter == null) {
stream = partSliceStream;
} else {
stream = new RateLimitingInputStream(partSliceStream, restoreRateLimiter, restoreThrottleListener);
}
try (final IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) { try (final IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), fileInfo.metadata(), IOContext.DEFAULT)) {
final byte[] buffer = new byte[BUFFER_SIZE]; final byte[] buffer = new byte[BUFFER_SIZE];
int length; int length;
while ((length = stream.read(buffer)) > 0) { while ((length = stream.read(buffer)) > 0) {
indexOutput.writeBytes(buffer, 0, length); indexOutput.writeBytes(buffer, 0, length);
recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length); recoveryState.getIndex().addRecoveredBytesToFile(fileInfo.name(), length);
if (restoreRateLimiter != null) {
rateLimiterListener.onRestorePause(restoreRateLimiter.pause(length));
}
} }
Store.verify(indexOutput); Store.verify(indexOutput);
indexOutput.close(); indexOutput.close();

View File

@ -27,6 +27,7 @@ import org.elasticsearch.index.AbstractIndexComponent;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.settings.IndexSettings;
import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.settings.IndexSettingsService;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.indices.store.IndicesStore;
import java.io.Closeable; import java.io.Closeable;
@ -112,7 +113,7 @@ public class IndexStore extends AbstractIndexComponent implements Closeable {
/** /**
* The shard store class that should be used for each shard. * The shard store class that should be used for each shard.
*/ */
public Class<? extends DirectoryService> shardDirectory() { public DirectoryService newDirectoryService(ShardPath path) {
return FsDirectoryService.class; return new FsDirectoryService(indexSettings, this, path);
} }
} }

View File

@ -1318,12 +1318,16 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
@Override @Override
public void writeByte(byte b) throws IOException { public void writeByte(byte b) throws IOException {
final long writtenBytes = this.writtenBytes++; final long writtenBytes = this.writtenBytes++;
if (writtenBytes == checksumPosition) { if (writtenBytes >= checksumPosition) { // we are writing parts of the checksum....
readAndCompareChecksum(); if (writtenBytes == checksumPosition) {
} else if (writtenBytes > checksumPosition) { // we are writing parts of the checksum.... readAndCompareChecksum();
}
final int index = Math.toIntExact(writtenBytes - checksumPosition); final int index = Math.toIntExact(writtenBytes - checksumPosition);
if (index < footerChecksum.length) { if (index < footerChecksum.length) {
footerChecksum[index] = b; footerChecksum[index] = b;
if (index == footerChecksum.length-1) {
verify(); // we have recorded the entire checksum
}
} else { } else {
verify(); // fail if we write more than expected verify(); // fail if we write more than expected
throw new AssertionError("write past EOF expected length: " + metadata.length() + " writtenBytes: " + writtenBytes); throw new AssertionError("write past EOF expected length: " + metadata.length() + " writtenBytes: " + writtenBytes);
@ -1344,16 +1348,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
@Override @Override
public void writeBytes(byte[] b, int offset, int length) throws IOException { public void writeBytes(byte[] b, int offset, int length) throws IOException {
if (writtenBytes + length > checksumPosition) { if (writtenBytes + length > checksumPosition) {
if (actualChecksum == null) { for (int i = 0; i < length; i++) { // don't optimze writing the last block of bytes
assert writtenBytes <= checksumPosition;
final int bytesToWrite = (int) (checksumPosition - writtenBytes);
out.writeBytes(b, offset, bytesToWrite);
readAndCompareChecksum();
offset += bytesToWrite;
length -= bytesToWrite;
writtenBytes += bytesToWrite;
}
for (int i = 0; i < length; i++) {
writeByte(b[offset+i]); writeByte(b[offset+i]);
} }
} else { } else {
@ -1361,7 +1356,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
writtenBytes += length; writtenBytes += length;
} }
} }
} }
/** /**

View File

@ -97,17 +97,6 @@ public interface IndicesLifecycle {
} }
/**
* Called right after the shard is moved into POST_RECOVERY mode
*/
public void afterIndexShardPostRecovery(IndexShard indexShard) {}
/**
* Called right before the shard is moved into POST_RECOVERY mode.
* The shard is ready to be used but not yet marked as POST_RECOVERY.
*/
public void beforeIndexShardPostRecovery(IndexShard indexShard) {}
/** /**
* Called after the index shard has been started. * Called after the index shard has been started.
*/ */

View File

@ -51,18 +51,15 @@ import org.elasticsearch.index.IndexNameModule;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.LocalNodeIdModule; import org.elasticsearch.index.LocalNodeIdModule;
import org.elasticsearch.index.aliases.IndexAliasesServiceModule;
import org.elasticsearch.index.analysis.AnalysisModule; import org.elasticsearch.index.analysis.AnalysisModule;
import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.IndexCacheModule; import org.elasticsearch.index.cache.IndexCacheModule;
import org.elasticsearch.index.fielddata.IndexFieldDataModule;
import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.flush.FlushStats; import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MapperServiceModule;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
@ -343,11 +340,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
modules.add(new AnalysisModule(indexSettings, indicesAnalysisService)); modules.add(new AnalysisModule(indexSettings, indicesAnalysisService));
modules.add(new SimilarityModule(indexSettings)); modules.add(new SimilarityModule(indexSettings));
modules.add(new IndexCacheModule(indexSettings)); modules.add(new IndexCacheModule(indexSettings));
modules.add(new IndexFieldDataModule(indexSettings)); modules.add(new IndexModule());
modules.add(new MapperServiceModule());
modules.add(new IndexAliasesServiceModule());
modules.add(new IndexModule(indexSettings));
pluginsService.processModules(modules); pluginsService.processModules(modules);
Injector indexInjector; Injector indexInjector;

View File

@ -87,7 +87,7 @@ public final class IndicesWarmer extends AbstractComponent {
if (indexService == null) { if (indexService == null) {
return; return;
} }
final IndexShard indexShard = indexService.shard(context.shardId().id()); final IndexShard indexShard = indexService.getShardOrNull(context.shardId().id());
if (indexShard == null) { if (indexShard == null) {
return; return;
} }

View File

@ -121,28 +121,6 @@ public class InternalIndicesLifecycle extends AbstractComponent implements Indic
} }
} }
public void beforeIndexShardPostRecovery(IndexShard indexShard) {
for (Listener listener : listeners) {
try {
listener.beforeIndexShardPostRecovery(indexShard);
} catch (Throwable t) {
logger.warn("{} failed to invoke before shard post recovery callback", t, indexShard.shardId());
throw t;
}
}
}
public void afterIndexShardPostRecovery(IndexShard indexShard) {
for (Listener listener : listeners) {
try {
listener.afterIndexShardPostRecovery(indexShard);
} catch (Throwable t) {
logger.warn("{} failed to invoke after shard post recovery callback", t, indexShard.shardId());
throw t;
}
}
}
public void afterIndexShardStarted(IndexShard indexShard) { public void afterIndexShardStarted(IndexShard indexShard) {
for (Listener listener : listeners) { for (Listener listener : listeners) {

View File

@ -38,7 +38,7 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;

View File

@ -327,7 +327,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
// already deleted on us, ignore it // already deleted on us, ignore it
continue; continue;
} }
IndexSettingsService indexSettingsService = indexService.injector().getInstance(IndexSettingsService.class); IndexSettingsService indexSettingsService = indexService.settingsService();
indexSettingsService.refreshSettings(indexMetaData.settings()); indexSettingsService.refreshSettings(indexMetaData.settings());
} }
} }
@ -505,7 +505,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
continue; continue;
} }
IndexShard indexShard = indexService.shard(shardId); IndexShard indexShard = indexService.getShardOrNull(shardId);
if (indexShard != null) { if (indexShard != null) {
ShardRouting currentRoutingEntry = indexShard.routingEntry(); ShardRouting currentRoutingEntry = indexShard.routingEntry();
// if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated // if the current and global routing are initializing, but are still not the same, its a different "shard" being allocated
@ -591,7 +591,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
final int shardId = shardRouting.id(); final int shardId = shardRouting.id();
if (indexService.hasShard(shardId)) { if (indexService.hasShard(shardId)) {
IndexShard indexShard = indexService.shardSafe(shardId); IndexShard indexShard = indexService.getShard(shardId);
if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) { if (indexShard.state() == IndexShardState.STARTED || indexShard.state() == IndexShardState.POST_RECOVERY) {
// the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting // the master thinks we are initializing, but we are already started or on POST_RECOVERY and waiting
// for master to confirm a shard started message (either master failover, or a cluster event before // for master to confirm a shard started message (either master failover, or a cluster event before
@ -647,7 +647,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
return; return;
} }
} }
final IndexShard indexShard = indexService.shardSafe(shardId); final IndexShard indexShard = indexService.getShard(shardId);
if (indexShard.ignoreRecoveryAttempt()) { if (indexShard.ignoreRecoveryAttempt()) {
// we are already recovering (we can get to this state since the cluster event can happen several // we are already recovering (we can get to this state since the cluster event can happen several
@ -835,7 +835,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
ShardRouting shardRouting = null; ShardRouting shardRouting = null;
final IndexService indexService = indicesService.indexService(shardId.index().name()); final IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) { if (indexShard != null) {
shardRouting = indexShard.routingEntry(); shardRouting = indexShard.routingEntry();
} }

View File

@ -398,7 +398,7 @@ public class SyncedFlushService extends AbstractComponent {
} }
private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) { private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) {
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id()); IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id());
FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true);
logger.trace("{} performing pre sync flush", request.shardId()); logger.trace("{} performing pre sync flush", request.shardId());
Engine.CommitId commitId = indexShard.flush(flushRequest); Engine.CommitId commitId = indexShard.flush(flushRequest);
@ -408,7 +408,7 @@ public class SyncedFlushService extends AbstractComponent {
private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) { private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId());
Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId()); Engine.SyncedFlushResult result = indexShard.syncFlush(request.syncId(), request.expectedCommitId());
logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result);
@ -426,7 +426,7 @@ public class SyncedFlushService extends AbstractComponent {
private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) { private InFlightOpsResponse performInFlightOps(InFlightOpsRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.shardSafe(request.shardId().id()); IndexShard indexShard = indexService.getShard(request.shardId().id());
if (indexShard.routingEntry().primary() == false) { if (indexShard.routingEntry().primary() == false) {
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
} }

View File

@ -234,7 +234,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
protected IndexShard getShard(ShardId shardId) { protected IndexShard getShard(ShardId shardId) {
IndexService indexService = indicesService.indexService(shardId.index().name()); IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
return indexShard; return indexShard;
} }
return null; return null;
@ -264,7 +264,7 @@ public class IndexingMemoryController extends AbstractLifecycleComponent<Indexin
} }
final Translog translog; final Translog translog;
try { try {
translog = indexShard.engine().getTranslog(); translog = indexShard.getTranslog();
} catch (EngineClosedException e) { } catch (EngineClosedException e) {
// not ready yet to be checked for activity // not ready yet to be checked for activity
return null; return null;

View File

@ -89,7 +89,7 @@ public class RecoverySource extends AbstractComponent {
private RecoveryResponse recover(final StartRecoveryRequest request) { private RecoveryResponse recover(final StartRecoveryRequest request) {
final IndexService indexService = indicesService.indexServiceSafe(request.shardId().index().name()); final IndexService indexService = indicesService.indexServiceSafe(request.shardId().index().name());
final IndexShard shard = indexService.shardSafe(request.shardId().id()); final IndexShard shard = indexService.getShard(request.shardId().id());
// starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise // starting recovery from that our (the source) shard state is marking the shard to be in recovery mode as well, otherwise
// the index operations will not be routed to it properly // the index operations will not be routed to it properly

View File

@ -120,9 +120,7 @@ public class RecoverySourceHandler {
* performs the recovery from the local engine to the target * performs the recovery from the local engine to the target
*/ */
public RecoveryResponse recoverToTarget() { public RecoveryResponse recoverToTarget() {
final Engine engine = shard.engine(); try (Translog.View translogView = shard.acquireTranslogView()) {
assert engine.getTranslog() != null : "translog must not be null";
try (Translog.View translogView = engine.getTranslog().newView()) {
logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration()); logger.trace("captured translog id [{}] for recovery", translogView.minTranslogGeneration());
final IndexCommit phase1Snapshot; final IndexCommit phase1Snapshot;
try { try {
@ -179,7 +177,7 @@ public class RecoverySourceHandler {
try { try {
recoverySourceMetadata = store.getMetadata(snapshot); recoverySourceMetadata = store.getMetadata(snapshot);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
shard.engine().failEngine("recovery", ex); shard.failShard("recovery", ex);
throw ex; throw ex;
} }
for (String name : snapshot.getFileNames()) { for (String name : snapshot.getFileNames()) {
@ -287,7 +285,7 @@ public class RecoverySourceHandler {
for (StoreFileMetaData md : metadata) { for (StoreFileMetaData md : metadata) {
logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md); logger.debug("{} checking integrity for file {} after remove corruption exception", shard.shardId(), md);
if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail! if (store.checkIntegrityNoException(md) == false) { // we are corrupted on the primary -- fail!
shard.engine().failEngine("recovery", corruptIndexException); shard.failShard("recovery", corruptIndexException);
logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md); logger.warn("{} Corrupted file detected {} checksum mismatch", shard.shardId(), md);
throw corruptIndexException; throw corruptIndexException;
} }
@ -641,7 +639,7 @@ public class RecoverySourceHandler {
} }
protected void failEngine(IOException cause) { protected void failEngine(IOException cause) {
shard.engine().failEngine("recovery", cause); shard.failShard("recovery", cause);
} }
Future<Void>[] asyncSendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) { Future<Void>[] asyncSendFiles(Store store, StoreFileMetaData[] files, Function<StoreFileMetaData, OutputStream> outputStreamFactory) {
@ -674,7 +672,6 @@ public class RecoverySourceHandler {
try (final OutputStream outputStream = outputStreamFactory.apply(md); try (final OutputStream outputStream = outputStreamFactory.apply(md);
final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) { final IndexInput indexInput = store.directory().openInput(md.name(), IOContext.READONCE)) {
Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStream); Streams.copy(new InputStreamIndexInput(indexInput, md.length()), outputStream);
Store.verify(indexInput);
} }
return null; return null;
}); });

View File

@ -52,7 +52,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler {
// if we relocate we need to close the engine in order to open a new // if we relocate we need to close the engine in order to open a new
// IndexWriter on the other end of the relocation // IndexWriter on the other end of the relocation
engineClosed = true; engineClosed = true;
shard.engine().flushAndClose(); shard.flushAndCloseEngine();
} catch (IOException e) { } catch (IOException e) {
logger.warn("close engine failed", e); logger.warn("close engine failed", e);
shard.failShard("failed to close engine (phase1)", e); shard.failShard("failed to close engine (phase1)", e);

View File

@ -395,7 +395,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
ShardId shardId = request.shardId; ShardId shardId = request.shardId;
IndexService indexService = indicesService.indexService(shardId.index().getName()); IndexService indexService = indicesService.indexService(shardId.index().getName());
if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) { if (indexService != null && indexService.indexUUID().equals(request.indexUUID)) {
return indexService.shard(shardId.id()); return indexService.getShardOrNull(shardId.id());
} }
return null; return null;
} }

View File

@ -152,7 +152,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
try { try {
IndexService indexService = indicesService.indexService(shardId.index().name()); IndexService indexService = indicesService.indexService(shardId.index().name());
if (indexService != null) { if (indexService != null) {
IndexShard indexShard = indexService.shard(shardId.id()); IndexShard indexShard = indexService.getShardOrNull(shardId.id());
if (indexShard != null) { if (indexShard != null) {
final Store store = indexShard.store(); final Store store = indexShard.store();
store.incRef(); store.incRef();

View File

@ -88,10 +88,11 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex {
try { try {
// TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous, // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
// like the indexer does // like the indexer does
TokenStream tokenStream = field.tokenStream(analyzer, null); try (TokenStream tokenStream = field.tokenStream(analyzer, null)) {
if (tokenStream != null) { if (tokenStream != null) {
memoryIndex.addField(field.name(), tokenStream, field.boost()); memoryIndex.addField(field.name(), tokenStream, field.boost());
} }
}
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("Failed to create token stream", e); throw new ElasticsearchException("Failed to create token stream", e);
} }

View File

@ -50,6 +50,7 @@ import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.ObjectMapper;
import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.IndexQueryParserService;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
@ -89,6 +90,7 @@ import java.util.concurrent.ConcurrentMap;
*/ */
public class PercolateContext extends SearchContext { public class PercolateContext extends SearchContext {
private final PercolatorQueriesRegistry percolateQueryRegistry;
public boolean limit; public boolean limit;
private int size; private int size;
public boolean doSort; public boolean doSort;
@ -102,7 +104,6 @@ public class PercolateContext extends SearchContext {
private final PageCacheRecycler pageCacheRecycler; private final PageCacheRecycler pageCacheRecycler;
private final BigArrays bigArrays; private final BigArrays bigArrays;
private final ScriptService scriptService; private final ScriptService scriptService;
private final ConcurrentMap<BytesRef, Query> percolateQueries;
private final int numberOfShards; private final int numberOfShards;
private final Query aliasFilter; private final Query aliasFilter;
private final long originNanoTime = System.nanoTime(); private final long originNanoTime = System.nanoTime();
@ -133,7 +134,7 @@ public class PercolateContext extends SearchContext {
this.indexService = indexService; this.indexService = indexService;
this.fieldDataService = indexService.fieldData(); this.fieldDataService = indexService.fieldData();
this.searchShardTarget = searchShardTarget; this.searchShardTarget = searchShardTarget;
this.percolateQueries = indexShard.percolateRegistry().percolateQueries(); this.percolateQueryRegistry = indexShard.percolateRegistry();
this.types = new String[]{request.documentType()}; this.types = new String[]{request.documentType()};
this.pageCacheRecycler = pageCacheRecycler; this.pageCacheRecycler = pageCacheRecycler;
this.bigArrays = bigArrays.withCircuitBreaking(); this.bigArrays = bigArrays.withCircuitBreaking();
@ -179,7 +180,7 @@ public class PercolateContext extends SearchContext {
} }
public ConcurrentMap<BytesRef, Query> percolateQueries() { public ConcurrentMap<BytesRef, Query> percolateQueries() {
return percolateQueries; return percolateQueryRegistry.percolateQueries();
} }
public Query percolateQuery() { public Query percolateQuery() {

View File

@ -71,7 +71,7 @@ import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper;
import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.percolator.PercolatorQueriesRegistry;
import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
@ -86,7 +86,6 @@ import org.elasticsearch.search.aggregations.AggregationPhase;
import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation;
import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext;
import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.highlight.HighlightField; import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.highlight.HighlightPhase; import org.elasticsearch.search.highlight.HighlightPhase;
@ -177,11 +176,10 @@ public class PercolatorService extends AbstractComponent {
public PercolateShardResponse percolate(PercolateShardRequest request) { public PercolateShardResponse percolate(PercolateShardRequest request) {
IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexService percolateIndexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = percolateIndexService.shardSafe(request.shardId().id()); IndexShard indexShard = percolateIndexService.getShard(request.shardId().id());
indexShard.readAllowed(); // check if we can read the shard... indexShard.readAllowed(); // check if we can read the shard...
PercolatorQueriesRegistry percolateQueryRegistry = indexShard.percolateRegistry();
ShardPercolateService shardPercolateService = indexShard.shardPercolateService(); percolateQueryRegistry.prePercolate();
shardPercolateService.prePercolate();
long startTime = System.nanoTime(); long startTime = System.nanoTime();
// TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request, // TODO: The filteringAliases should be looked up at the coordinating node and serialized with all shard request,
@ -255,7 +253,7 @@ public class PercolatorService extends AbstractComponent {
} finally { } finally {
SearchContext.removeCurrent(); SearchContext.removeCurrent();
context.close(); context.close();
shardPercolateService.postPercolate(System.nanoTime() - startTime); percolateQueryRegistry.postPercolate(System.nanoTime() - startTime);
} }
} }

View File

@ -56,10 +56,11 @@ class SingleDocumentPercolatorIndex implements PercolatorIndex {
Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer(); Analyzer analyzer = context.mapperService().documentMapper(parsedDocument.type()).mappers().indexAnalyzer();
// TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous, // TODO: instead of passing null here, we can have a CTL<Map<String,TokenStream>> and pass previous,
// like the indexer does // like the indexer does
TokenStream tokenStream = field.tokenStream(analyzer, null); try (TokenStream tokenStream = field.tokenStream(analyzer, null)) {
if (tokenStream != null) { if (tokenStream != null) {
memoryIndex.addField(field.name(), tokenStream, field.boost()); memoryIndex.addField(field.name(), tokenStream, field.boost());
} }
}
} catch (Exception e) { } catch (Exception e) {
throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e); throw new ElasticsearchException("Failed to create token stream for [" + field.name() + "]", e);
} }

View File

@ -73,20 +73,6 @@ public abstract class Plugin {
return Collections.emptyList(); return Collections.emptyList();
} }
/**
* Per index shard module.
*/
public Collection<Module> shardModules(Settings indexSettings) {
return Collections.emptyList();
}
/**
* Per index shard service that will be automatically closed.
*/
public Collection<Class<? extends Closeable>> shardServices() {
return Collections.emptyList();
}
/** /**
* Additional node settings loaded by the plugin. Note that settings that are explicit in the nodes settings can't be * Additional node settings loaded by the plugin. Note that settings that are explicit in the nodes settings can't be
* overwritten with the additional settings. These settings added if they don't exist. * overwritten with the additional settings. These settings added if they don't exist.

View File

@ -250,22 +250,6 @@ public class PluginsService extends AbstractComponent {
return services; return services;
} }
public Collection<Module> shardModules(Settings indexSettings) {
List<Module> modules = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
modules.addAll(plugin.v2().shardModules(indexSettings));
}
return modules;
}
public Collection<Class<? extends Closeable>> shardServices() {
List<Class<? extends Closeable>> services = new ArrayList<>();
for (Tuple<PluginInfo, Plugin> plugin : plugins) {
services.addAll(plugin.v2().shardServices());
}
return services;
}
/** /**
* Get information about plugins (jvm and site plugins). * Get information about plugins (jvm and site plugins).
*/ */

View File

@ -43,7 +43,7 @@ import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.IndexingStats;
import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.stats.PercolateStats; import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.refresh.RefreshStats; import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.suggest.stats.SuggestStats; import org.elasticsearch.index.suggest.stats.SuggestStats;

View File

@ -559,7 +559,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) {
IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexService indexService = indicesService.indexServiceSafe(request.index());
IndexShard indexShard = indexService.shardSafe(request.shardId()); IndexShard indexShard = indexService.getShard(request.shardId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId()); SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().id(), request.index(), request.shardId());

View File

@ -33,6 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter;
import org.apache.lucene.search.highlight.TextFragment; import org.apache.lucene.search.highlight.TextFragment;
import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.BytesRefHash;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
@ -109,15 +110,16 @@ public class PlainHighlighter implements Highlighter {
for (Object textToHighlight : textsToHighlight) { for (Object textToHighlight : textsToHighlight) {
String text = textToHighlight.toString(); String text = textToHighlight.toString();
TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().names().indexName(), text); try (TokenStream tokenStream = analyzer.tokenStream(mapper.fieldType().names().indexName(), text)) {
if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) { if (!tokenStream.hasAttribute(CharTermAttribute.class) || !tokenStream.hasAttribute(OffsetAttribute.class)) {
// can't perform highlighting if the stream has no terms (binary token stream) or no offsets // can't perform highlighting if the stream has no terms (binary token stream) or no offsets
continue; continue;
} }
TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments); TextFragment[] bestTextFragments = entry.getBestTextFragments(tokenStream, text, false, numberOfFragments);
for (TextFragment bestTextFragment : bestTextFragments) { for (TextFragment bestTextFragment : bestTextFragments) {
if (bestTextFragment != null && bestTextFragment.getScore() > 0) { if (bestTextFragment != null && bestTextFragment.getScore() > 0) {
fragsList.add(bestTextFragment); fragsList.add(bestTextFragment);
}
} }
} }
} }
@ -165,7 +167,7 @@ public class PlainHighlighter implements Highlighter {
String fieldContents = textsToHighlight.get(0).toString(); String fieldContents = textsToHighlight.get(0).toString();
int end; int end;
try { try {
end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer.tokenStream(mapper.fieldType().names().indexName(), fieldContents)); end = findGoodEndForNoHighlightExcerpt(noMatchSize, analyzer, mapper.fieldType().names().indexName(), fieldContents);
} catch (Exception e) { } catch (Exception e) {
throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e); throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e);
} }
@ -181,8 +183,8 @@ public class PlainHighlighter implements Highlighter {
return true; return true;
} }
private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, TokenStream tokenStream) throws IOException { private static int findGoodEndForNoHighlightExcerpt(int noMatchSize, Analyzer analyzer, String fieldName, String contents) throws IOException {
try { try (TokenStream tokenStream = analyzer.tokenStream(fieldName, contents)) {
if (!tokenStream.hasAttribute(OffsetAttribute.class)) { if (!tokenStream.hasAttribute(OffsetAttribute.class)) {
// Can't split on term boundaries without offsets // Can't split on term boundaries without offsets
return -1; return -1;
@ -200,11 +202,9 @@ public class PlainHighlighter implements Highlighter {
} }
end = attr.endOffset(); end = attr.endOffset();
} }
tokenStream.end();
// We've exhausted the token stream so we should just highlight everything. // We've exhausted the token stream so we should just highlight everything.
return end; return end;
} finally {
tokenStream.end();
tokenStream.close();
} }
} }
} }

View File

@ -28,6 +28,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRef;
import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.CharsRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.automaton.LevenshteinAutomata; import org.apache.lucene.util.automaton.LevenshteinAutomata;
import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
@ -116,22 +117,34 @@ public final class SuggestUtils {
} }
public static int analyze(Analyzer analyzer, CharsRef toAnalyze, String field, TokenConsumer consumer) throws IOException { public static int analyze(Analyzer analyzer, CharsRef toAnalyze, String field, TokenConsumer consumer) throws IOException {
TokenStream ts = analyzer.tokenStream( try (TokenStream ts = analyzer.tokenStream(
field, new FastCharArrayReader(toAnalyze.chars, toAnalyze.offset, toAnalyze.length) field, new FastCharArrayReader(toAnalyze.chars, toAnalyze.offset, toAnalyze.length))) {
); return analyze(ts, consumer);
return analyze(ts, consumer); }
} }
/** NOTE: this method closes the TokenStream, even on exception, which is awkward
* because really the caller who called {@link Analyzer#tokenStream} should close it,
* but when trying that there are recursion issues when we try to use the same
* TokenStrem twice in the same recursion... */
public static int analyze(TokenStream stream, TokenConsumer consumer) throws IOException { public static int analyze(TokenStream stream, TokenConsumer consumer) throws IOException {
stream.reset();
consumer.reset(stream);
int numTokens = 0; int numTokens = 0;
while (stream.incrementToken()) { boolean success = false;
consumer.nextToken(); try {
numTokens++; stream.reset();
consumer.reset(stream);
while (stream.incrementToken()) {
consumer.nextToken();
numTokens++;
}
consumer.end();
} finally {
if (success) {
stream.close();
} else {
IOUtils.closeWhileHandlingException(stream);
}
} }
consumer.end();
stream.close();
return numTokens; return numTokens;
} }

View File

@ -100,9 +100,7 @@ public final class CompletionTokenStream extends TokenStream {
@Override @Override
public void close() throws IOException { public void close() throws IOException {
if (posInc == -1) { input.close();
input.close();
}
} }
public static interface ToFiniteStrings { public static interface ToFiniteStrings {

View File

@ -92,12 +92,13 @@ public final class PhraseSuggester extends Suggester<PhraseSuggestionContext> {
if (gens.size() > 0 && suggestTerms != null) { if (gens.size() > 0 && suggestTerms != null) {
final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit()); final NoisyChannelSpellChecker checker = new NoisyChannelSpellChecker(realWordErrorLikelihood, suggestion.getRequireUnigram(), suggestion.getTokenLimit());
final BytesRef separator = suggestion.separator(); final BytesRef separator = suggestion.separator();
TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField());
WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator); WordScorer wordScorer = suggestion.model().newScorer(indexReader, suggestTerms, suggestField, realWordErrorLikelihood, separator);
Result checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(), Result checkerResult;
gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(), try (TokenStream stream = checker.tokenStream(suggestion.getAnalyzer(), suggestion.getText(), spare, suggestion.getField())) {
suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize()); checkerResult = checker.getCorrections(stream, new MultiCandidateGeneratorWrapper(suggestion.getShardSize(),
gens.toArray(new CandidateGenerator[gens.size()])), suggestion.maxErrors(),
suggestion.getShardSize(), wordScorer, suggestion.confidence(), suggestion.gramSize());
}
PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore); PhraseSuggestion.Entry resultEntry = buildResultEntry(suggestion, spare, checkerResult.cutoffScore);
response.addTerm(resultEntry); response.addTerm(resultEntry);

View File

@ -293,7 +293,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent<SnapshotSh
for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().entrySet()) { for (final Map.Entry<ShardId, IndexShardSnapshotStatus> shardEntry : entry.getValue().entrySet()) {
final ShardId shardId = shardEntry.getKey(); final ShardId shardId = shardEntry.getKey();
try { try {
final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shard(shardId.id()); final IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).getShardOrNull(shardId.id());
executor.execute(new AbstractRunnable() { executor.execute(new AbstractRunnable() {
@Override @Override
public void doRun() { public void doRun() {

View File

@ -69,8 +69,8 @@ grant codeBase "${es.security.plugin.lang-groovy}" {
permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect"; permission java.lang.RuntimePermission "accessClassInPackage.sun.reflect";
// needed by GroovyScriptEngineService to close its classloader (why?) // needed by GroovyScriptEngineService to close its classloader (why?)
permission java.lang.RuntimePermission "closeClassLoader"; permission java.lang.RuntimePermission "closeClassLoader";
// Allow executing groovy scripts with codesource of /groovy/script // Allow executing groovy scripts with codesource of /untrusted
permission groovy.security.GroovyCodeSourcePermission "/groovy/script"; permission groovy.security.GroovyCodeSourcePermission "/untrusted";
}; };
grant codeBase "${es.security.plugin.lang-javascript}" { grant codeBase "${es.security.plugin.lang-javascript}" {

View File

@ -18,8 +18,8 @@
*/ */
/* /*
* Limited security policy for groovy scripts. * Limited security policy for scripts.
* This is what is needed for its invokeDynamic functionality to work. * This is what is needed for invokeDynamic functionality to work.
*/ */
grant { grant {

View File

@ -158,7 +158,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase {
IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node);
IndexService indexShards = indexServices.indexServiceSafe(index); IndexService indexShards = indexServices.indexServiceSafe(index);
for (Integer shardId : indexShards.shardIds()) { for (Integer shardId : indexShards.shardIds()) {
IndexShard shard = indexShards.shardSafe(shardId); IndexShard shard = indexShards.getShard(shardId);
if (randomBoolean()) { if (randomBoolean()) {
shard.failShard("test", new CorruptIndexException("test corrupted", "")); shard.failShard("test", new CorruptIndexException("test corrupted", ""));
Set<String> nodes = corruptedShardIDMap.get(shardId); Set<String> nodes = corruptedShardIDMap.get(shardId);

View File

@ -65,7 +65,7 @@ public class UpgradeReallyOldIndexIT extends StaticIndexBackwardCompatibilityIT
for (IndicesService services : internalCluster().getInstances(IndicesService.class)) { for (IndicesService services : internalCluster().getInstances(IndicesService.class)) {
IndexService indexService = services.indexService(index); IndexService indexService = services.indexService(index);
if (indexService != null) { if (indexService != null) {
assertEquals(version, indexService.shard(0).minimumCompatibleVersion()); assertEquals(version, indexService.getShardOrNull(0).minimumCompatibleVersion());
} }
} }

View File

@ -24,7 +24,9 @@ import org.elasticsearch.test.ESTestCase;
import java.io.FilePermission; import java.io.FilePermission;
import java.security.AccessControlContext; import java.security.AccessControlContext;
import java.security.AccessController; import java.security.AccessController;
import java.security.AllPermission;
import java.security.CodeSource; import java.security.CodeSource;
import java.security.Permission;
import java.security.PermissionCollection; import java.security.PermissionCollection;
import java.security.Permissions; import java.security.Permissions;
import java.security.PrivilegedAction; import java.security.PrivilegedAction;
@ -48,8 +50,13 @@ public class ESPolicyTests extends ESTestCase {
*/ */
public void testNullCodeSource() throws Exception { public void testNullCodeSource() throws Exception {
assumeTrue("test cannot run with security manager", System.getSecurityManager() == null); assumeTrue("test cannot run with security manager", System.getSecurityManager() == null);
// create a policy with AllPermission
Permission all = new AllPermission();
PermissionCollection allCollection = all.newPermissionCollection();
allCollection.add(all);
ESPolicy policy = new ESPolicy(allCollection);
// restrict ourselves to NoPermission
PermissionCollection noPermissions = new Permissions(); PermissionCollection noPermissions = new Permissions();
ESPolicy policy = new ESPolicy(noPermissions);
assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission("foo", "read"))); assertFalse(policy.implies(new ProtectionDomain(null, noPermissions), new FilePermission("foo", "read")));
} }

View File

@ -35,7 +35,6 @@ import java.security.ProtectionDomain;
import java.security.cert.Certificate; import java.security.cert.Certificate;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Objects;
import java.util.Set; import java.util.Set;
/** /**
@ -99,18 +98,24 @@ final class MockPluginPolicy extends Policy {
excludedSources.add(RandomizedRunner.class.getProtectionDomain().getCodeSource()); excludedSources.add(RandomizedRunner.class.getProtectionDomain().getCodeSource());
// junit library // junit library
excludedSources.add(Assert.class.getProtectionDomain().getCodeSource()); excludedSources.add(Assert.class.getProtectionDomain().getCodeSource());
// groovy scripts // scripts
excludedSources.add(new CodeSource(new URL("file:/groovy/script"), (Certificate[])null)); excludedSources.add(new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[])null));
Loggers.getLogger(getClass()).debug("Apply permissions [{}] excluding codebases [{}]", extraPermissions, excludedSources); Loggers.getLogger(getClass()).debug("Apply permissions [{}] excluding codebases [{}]", extraPermissions, excludedSources);
} }
@Override @Override
public boolean implies(ProtectionDomain domain, Permission permission) { public boolean implies(ProtectionDomain domain, Permission permission) {
CodeSource codeSource = domain.getCodeSource();
// codesource can be null when reducing privileges via doPrivileged()
if (codeSource == null) {
return false;
}
if (standardPolicy.implies(domain, permission)) { if (standardPolicy.implies(domain, permission)) {
return true; return true;
} else if (excludedSources.contains(domain.getCodeSource()) == false && } else if (excludedSources.contains(codeSource) == false &&
Objects.toString(domain.getCodeSource()).contains("test-classes") == false) { codeSource.toString().contains("test-classes") == false) {
return extraPermissions.implies(permission); return extraPermissions.implies(permission);
} else { } else {
return false; return false;

View File

@ -181,7 +181,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase {
DiscoveryNode discoveryNode = state.getNodes().get(nodeId); DiscoveryNode discoveryNode = state.getNodes().get(nodeId);
IndicesService indicesService = internalTestCluster.getInstance(IndicesService.class, discoveryNode.getName()); IndicesService indicesService = internalTestCluster.getInstance(IndicesService.class, discoveryNode.getName());
IndexService indexService = indicesService.indexService(shard.index()); IndexService indexService = indicesService.indexService(shard.index());
IndexShard indexShard = indexService.shard(shard.id()); IndexShard indexShard = indexService.getShardOrNull(shard.id());
assertEquals(indexShard.shardPath().getRootDataPath().toString(), dataPath); assertEquals(indexShard.shardPath().getRootDataPath().toString(), dataPath);
} }

View File

@ -925,6 +925,138 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase {
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2")); assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node2"));
} }
public void testForSingleDataNode() {
Settings diskSettings = settingsBuilder()
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true)
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%")
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build();
ImmutableOpenMap.Builder<String, DiskUsage> usagesBuilder = ImmutableOpenMap.builder();
usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used
usagesBuilder.put("node2", new DiskUsage("node2", "n2", "/dev/null", 100, 20)); // 80% used
usagesBuilder.put("node3", new DiskUsage("node3", "n3", "/dev/null", 100, 100)); // 0% used
ImmutableOpenMap<String, DiskUsage> usages = usagesBuilder.build();
// We have an index with 1 primary shards each taking 40 bytes. Each node has 100 bytes available
ImmutableOpenMap.Builder<String, Long> shardSizes = ImmutableOpenMap.builder();
shardSizes.put("[test][0][p]", 40L);
shardSizes.put("[test][1][p]", 40L);
final ClusterInfo clusterInfo = new DevNullClusterInfo(usages, usages, shardSizes.build());
DiskThresholdDecider diskThresholdDecider = new DiskThresholdDecider(diskSettings);
MetaData metaData = MetaData.builder()
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(2).numberOfReplicas(0))
.build();
RoutingTable routingTable = RoutingTable.builder()
.addAsNew(metaData.index("test"))
.build();
logger.info("--> adding one master node, one data node");
Map<String, String> masterNodeAttributes = new HashMap<>();
masterNodeAttributes.put("master", "true");
masterNodeAttributes.put("data", "false");
Map<String, String> dataNodeAttributes = new HashMap<>();
dataNodeAttributes.put("master", "false");
dataNodeAttributes.put("data", "true");
DiscoveryNode discoveryNode1 = new DiscoveryNode("", "node1", new LocalTransportAddress("1"), masterNodeAttributes, Version.CURRENT);
DiscoveryNode discoveryNode2 = new DiscoveryNode("", "node2", new LocalTransportAddress("2"), dataNodeAttributes, Version.CURRENT);
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(discoveryNode1).put(discoveryNode2).build();
ClusterState baseClusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
.metaData(metaData)
.routingTable(routingTable)
.nodes(discoveryNodes)
.build();
// Two shards consumes 80% of disk space in data node, but we have only one data node, shards should remain.
ShardRouting firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED, 1);
ShardRouting secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", null, null, true, ShardRoutingState.STARTED, 1);
RoutingNode firstRoutingNode = new RoutingNode("node2", discoveryNode2, Arrays.asList(firstRouting, secondRouting));
RoutingTable.Builder builder = RoutingTable.builder().add(
IndexRoutingTable.builder("test")
.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 0))
.addShard(firstRouting)
.build()
)
.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 1))
.addShard(secondRouting)
.build()
)
);
ClusterState clusterState = ClusterState.builder(baseClusterState).routingTable(builder.build()).build();
RoutingAllocation routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo);
Decision decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
// Two shards should start happily
assertThat(decision.type(), equalTo(Decision.Type.YES));
ClusterInfoService cis = new ClusterInfoService() {
@Override
public ClusterInfo getClusterInfo() {
logger.info("--> calling fake getClusterInfo");
return clusterInfo;
}
@Override
public void addListener(Listener listener) {
}
};
AllocationDeciders deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList(
new SameShardAllocationDecider(Settings.EMPTY), diskThresholdDecider
)));
AllocationService strategy = new AllocationService(settingsBuilder()
.put("cluster.routing.allocation.concurrent_recoveries", 10)
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always")
.put("cluster.routing.allocation.cluster_concurrent_rebalance", -1)
.build(), deciders, makeShardsAllocators(), cis);
RoutingAllocation.Result result = strategy.reroute(clusterState);
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), nullValue());
// Add another datanode, it should relocate.
logger.info("--> adding node3");
DiscoveryNode discoveryNode3 = new DiscoveryNode("", "node3", new LocalTransportAddress("3"), dataNodeAttributes, Version.CURRENT);
ClusterState updateClusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder(clusterState.nodes())
.put(discoveryNode3)).build();
firstRouting = TestShardRouting.newShardRouting("test", 0, "node2", null, null, true, ShardRoutingState.STARTED, 1);
secondRouting = TestShardRouting.newShardRouting("test", 1, "node2", "node3", null, true, ShardRoutingState.RELOCATING, 1);
firstRoutingNode = new RoutingNode("node2", discoveryNode2, Arrays.asList(firstRouting, secondRouting));
builder = RoutingTable.builder().add(
IndexRoutingTable.builder("test")
.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 0))
.addShard(firstRouting)
.build()
)
.addIndexShard(new IndexShardRoutingTable.Builder(new ShardId("test", 1))
.addShard(secondRouting)
.build()
)
);
clusterState = ClusterState.builder(updateClusterState).routingTable(builder.build()).build();
routingAllocation = new RoutingAllocation(null, new RoutingNodes(clusterState), discoveryNodes, clusterInfo);
decision = diskThresholdDecider.canRemain(firstRouting, firstRoutingNode, routingAllocation);
assertThat(decision.type(), equalTo(Decision.Type.YES));
result = strategy.reroute(clusterState);
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().state(), equalTo(STARTED));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(0).primaryShard().relocatingNodeId(), nullValue());
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().state(), equalTo(RELOCATING));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().currentNodeId(), equalTo("node2"));
assertThat(result.routingTable().index("test").getShards().get(1).primaryShard().relocatingNodeId(), equalTo("node3"));
}
public void logShardStates(ClusterState state) { public void logShardStates(ClusterState state) {
RoutingNodes rn = state.getRoutingNodes(); RoutingNodes rn = state.getRoutingNodes();
logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}", logger.info("--> counts: total: {}, unassigned: {}, initializing: {}, relocating: {}, started: {}",

View File

@ -19,26 +19,24 @@
package org.elasticsearch.cluster.routing.operation.hash.murmur3; package org.elasticsearch.cluster.routing.operation.hash.murmur3;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import com.google.common.hash.HashFunction;
import com.google.common.hash.Hashing;
import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.cluster.routing.Murmur3HashFunction;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
public class Murmur3HashFunctionTests extends ESTestCase { public class Murmur3HashFunctionTests extends ESTestCase {
public void test() { private static Murmur3HashFunction HASH = new Murmur3HashFunction();
// Make sure that we agree with guava
Murmur3HashFunction murmur3 = new Murmur3HashFunction(); public void testKnownValues() {
HashFunction guavaMurmur3 = Hashing.murmur3_32(); assertHash(0x5a0cb7c3, "hell");
for (int i = 0; i < 100; ++i) { assertHash(0xd7c31989, "hello");
final String id = RandomStrings.randomRealisticUnicodeOfCodepointLength(getRandom(), RandomInts.randomIntBetween(getRandom(), 1, 20)); assertHash(0x22ab2984, "hello w");
//final String id = "0"; assertHash(0xdf0ca123, "hello wo");
final int hash1 = guavaMurmur3.newHasher().putUnencodedChars(id).hash().asInt(); assertHash(0xe7744d61, "hello wor");
final int hash2 = murmur3.hash(id); assertHash(0xe07db09c, "The quick brown fox jumps over the lazy dog");
assertEquals(hash1, hash2); assertHash(0x4e63d2ad, "The quick brown fox jumps over the lazy cog");
}
} }
private static void assertHash(int expected, String stringInput) {
assertEquals(expected, HASH.hash(stringInput));
}
} }

View File

@ -0,0 +1,81 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.hash;
import org.elasticsearch.test.ESTestCase;
import org.junit.Test;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
import java.security.MessageDigest;
import static org.junit.Assert.*;
public class MessageDigestsTests extends ESTestCase {
private void assertHash(String expected, String test, MessageDigest messageDigest) {
String actual = MessageDigests.toHexString(messageDigest.digest(test.getBytes(StandardCharsets.UTF_8)));
assertEquals(expected, actual);
}
@Test
public void testMd5() throws Exception {
assertHash("d41d8cd98f00b204e9800998ecf8427e", "", MessageDigests.md5());
assertHash("900150983cd24fb0d6963f7d28e17f72", "abc", MessageDigests.md5());
assertHash("8215ef0796a20bcaaae116d3876c664a", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.md5());
assertHash("7707d6ae4e027c70eea2a935c2296f21", new String(new char[1000000]).replace("\0", "a"), MessageDigests.md5());
assertHash("9e107d9d372bb6826bd81d3542a419d6", "The quick brown fox jumps over the lazy dog", MessageDigests.md5());
assertHash("1055d3e698d289f2af8663725127bd4b", "The quick brown fox jumps over the lazy cog", MessageDigests.md5());
}
@Test
public void testSha1() throws Exception {
assertHash("da39a3ee5e6b4b0d3255bfef95601890afd80709", "", MessageDigests.sha1());
assertHash("a9993e364706816aba3e25717850c26c9cd0d89d", "abc", MessageDigests.sha1());
assertHash("84983e441c3bd26ebaae4aa1f95129e5e54670f1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha1());
assertHash("34aa973cd4c4daa4f61eeb2bdbad27316534016f", new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha1());
assertHash("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12", "The quick brown fox jumps over the lazy dog", MessageDigests.sha1());
assertHash("de9f2c7fd25e1b3afad3e85a0bd17d9b100db4b3", "The quick brown fox jumps over the lazy cog", MessageDigests.sha1());
}
@Test
public void testSha256() throws Exception {
assertHash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "", MessageDigests.sha256());
assertHash("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc", MessageDigests.sha256());
assertHash("248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1", "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq", MessageDigests.sha256());
assertHash("cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0", new String(new char[1000000]).replace("\0", "a"), MessageDigests.sha256());
assertHash("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592", "The quick brown fox jumps over the lazy dog", MessageDigests.sha256());
assertHash("e4c4d8f3bf76b692de791a173e05321150f7a345b46484fe427f6acc7ecc81be", "The quick brown fox jumps over the lazy cog", MessageDigests.sha256());
}
@Test
public void testToHexString() throws Exception {
for (int i = 0; i < 1024; i++) {
BigInteger expected = BigInteger.probablePrime(256, random());
byte[] bytes = expected.toByteArray();
String hex = MessageDigests.toHexString(bytes);
String zeros = new String(new char[2 * bytes.length]).replace("\0", "0");
String expectedAsString = expected.toString(16);
String expectedHex = zeros.substring(expectedAsString.length()) + expectedAsString;
assertEquals(expectedHex, hex);
BigInteger actual = new BigInteger(hex, 16);
assertEquals(expected, actual);
}
}
}

View File

@ -19,37 +19,34 @@
package org.elasticsearch.common.hashing; package org.elasticsearch.common.hashing;
import com.google.common.hash.HashCode;
import com.google.common.hash.Hashing;
import org.elasticsearch.common.hash.MurmurHash3; import org.elasticsearch.common.hash.MurmurHash3;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.nio.ByteBuffer; import java.io.UnsupportedEncodingException;
import java.nio.ByteOrder; import java.nio.charset.StandardCharsets;
import java.nio.LongBuffer;
public class MurmurHash3Tests extends ESTestCase { public class MurmurHash3Tests extends ESTestCase {
public void testKnownValues() throws UnsupportedEncodingException {
public void testHash128() { assertHash(0x629942693e10f867L, 0x92db0b82baeb5347L, "hell", 0);
final int iters = scaledRandomIntBetween(100, 5000); assertHash(0xa78ddff5adae8d10L, 0x128900ef20900135L, "hello", 1);
for (int i = 0; i < iters; ++i) { assertHash(0x8a486b23f422e826L, 0xf962a2c58947765fL, "hello ", 2);
final int seed = randomInt(); assertHash(0x2ea59f466f6bed8cL, 0xc610990acc428a17L, "hello w", 3);
final int offset = randomInt(20); assertHash(0x79f6305a386c572cL, 0x46305aed3483b94eL, "hello wo", 4);
final int len = randomInt(randomBoolean() ? 20 : 200); assertHash(0xc2219d213ec1f1b5L, 0xa1d8e2e0a52785bdL, "hello wor", 5);
final byte[] bytes = new byte[len + offset + randomInt(3)]; assertHash(0xe34bbc7bbc071b6cL, 0x7a433ca9c49a9347L, "The quick brown fox jumps over the lazy dog", 0);
getRandom().nextBytes(bytes); assertHash(0x658ca970ff85269aL, 0x43fee3eaa68e5c3eL, "The quick brown fox jumps over the lazy cog", 0);
HashCode h1 = Hashing.murmur3_128(seed).hashBytes(bytes, offset, len);
MurmurHash3.Hash128 h2 = MurmurHash3.hash128(bytes, offset, len, seed, new MurmurHash3.Hash128());
assertEquals(h1, h2);
}
} }
private void assertEquals(HashCode h1, MurmurHash3.Hash128 h2) { private static void assertHash(long lower, long upper, String inputString, long seed) {
final LongBuffer longs = ByteBuffer.wrap(h1.asBytes()).order(ByteOrder.LITTLE_ENDIAN).asLongBuffer(); byte[] bytes = inputString.getBytes(StandardCharsets.UTF_8);
assertEquals(2, longs.limit()); MurmurHash3.Hash128 expected = new MurmurHash3.Hash128();
assertEquals(h1.asLong(), h2.h1); expected.h1 = lower;
assertEquals(longs.get(), h2.h1); expected.h2 = upper;
assertEquals(longs.get(), h2.h2); assertHash(expected, MurmurHash3.hash128(bytes, 0, bytes.length, seed, new MurmurHash3.Hash128()));
} }
private static void assertHash(MurmurHash3.Hash128 expected, MurmurHash3.Hash128 actual) {
assertEquals(expected.h1, actual.h1);
assertEquals(expected.h2, actual.h2);
}
} }

View File

@ -60,6 +60,22 @@ public abstract class ModuleTestCase extends ESTestCase {
fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s);
} }
// /** Configures the module and asserts "instance" is bound to "to". */
// public void assertInstanceBinding(Module module, Class to, Object instance) {
// List<Element> elements = Elements.getElements(module);
// for (Element element : elements) {
// if (element instanceof ProviderInstanceBinding) {
// assertEquals(instance, ((ProviderInstanceBinding) element).getProviderInstance().get());
// return;
// }
// }
// StringBuilder s = new StringBuilder();
// for (Element element : elements) {
// s.append(element + "\n");
// }
// fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s);
// }
/** /**
* Attempts to configure the module, and asserts an {@link IllegalArgumentException} is * Attempts to configure the module, and asserts an {@link IllegalArgumentException} is
* caught, containing the given messages * caught, containing the given messages
@ -164,6 +180,10 @@ public abstract class ModuleTestCase extends ESTestCase {
return; return;
} }
} }
} else if (element instanceof ProviderInstanceBinding) {
ProviderInstanceBinding binding = (ProviderInstanceBinding) element;
assertTrue(tester.test(to.cast(binding.getProviderInstance().get())));
return;
} }
} }
StringBuilder s = new StringBuilder(); StringBuilder s = new StringBuilder();

View File

@ -21,20 +21,21 @@ package org.elasticsearch.common.logging.log4j;
import org.apache.log4j.Appender; import org.apache.log4j.Appender;
import org.apache.log4j.Logger; import org.apache.log4j.Logger;
import org.elasticsearch.common.cli.CliToolTestCase;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import java.nio.charset.StandardCharsets; import java.nio.charset.StandardCharsets;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.*;
import static org.hamcrest.Matchers.notNullValue;
/** /**
* *
@ -148,7 +149,34 @@ public class LoggingConfigurationTests extends ESTestCase {
LogConfigurator.resolveConfig(environment, builder); LogConfigurator.resolveConfig(environment, builder);
Settings logSettings = builder.build(); Settings logSettings = builder.build();
assertThat(logSettings.get("yml"), Matchers.nullValue()); assertThat(logSettings.get("yml"), nullValue());
}
// tests that custom settings are not overwritten by settings in the config file
@Test
public void testResolveOrder() throws Exception {
Path tmpDir = createTempDir();
Path loggingConf = tmpDir.resolve(loggingConfiguration("yaml"));
Files.write(loggingConf, "logger.test: INFO, file\n".getBytes(StandardCharsets.UTF_8));
Files.write(loggingConf, "appender.file.type: file\n".getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND);
Environment environment = InternalSettingsPreparer.prepareEnvironment(
Settings.builder()
.put("path.conf", tmpDir.toAbsolutePath())
.put("path.home", createTempDir().toString())
.put("logger.test", "TRACE, console")
.put("appender.console.type", "console")
.put("appender.console.layout.type", "consolePattern")
.put("appender.console.layout.conversionPattern", "[%d{ISO8601}][%-5p][%-25c] %m%n")
.build(), new CliToolTestCase.MockTerminal());
LogConfigurator.configure(environment.settings());
// args should overwrite whatever is in the config
ESLogger esLogger = Log4jESLoggerFactory.getLogger("test");
Logger logger = ((Log4jESLogger) esLogger).logger();
Appender appender = logger.getAppender("console");
assertThat(appender, notNullValue());
assertTrue(logger.isTraceEnabled());
appender = logger.getAppender("file");
assertThat(appender, nullValue());
} }
private static String loggingConfiguration(String suffix) { private static String loggingConfiguration(String suffix) {

View File

@ -0,0 +1,66 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.search.IndexSearcher;
import org.elasticsearch.common.inject.ModuleTestCase;
import org.elasticsearch.index.engine.EngineConfig;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.EngineFactory;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.test.engine.MockEngineFactory;
public class IndexModuleTests extends ModuleTestCase {
public void testWrapperIsBound() {
IndexModule module = new IndexModule();
assertInstanceBinding(module, IndexSearcherWrapper.class,(x) -> x == null);
module.indexSearcherWrapper = Wrapper.class;
assertBinding(module, IndexSearcherWrapper.class, Wrapper.class);
}
public void testEngineFactoryBound() {
IndexModule module = new IndexModule();
assertBinding(module, EngineFactory.class, InternalEngineFactory.class);
module.engineFactoryImpl = MockEngineFactory.class;
assertBinding(module, EngineFactory.class, MockEngineFactory.class);
}
public void testOtherServiceBound() {
IndexModule module = new IndexModule();
assertBinding(module, IndexService.class, IndexService.class);
assertBinding(module, IndexServicesProvider.class, IndexServicesProvider.class);
}
public static final class Wrapper implements IndexSearcherWrapper {
@Override
public DirectoryReader wrap(DirectoryReader reader) {
return null;
}
@Override
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
return null;
}
}
}

Some files were not shown because too many files have changed in this diff Show More