Changed the underlying DLS implementation
Instead of wrapping the IndexSearcher and applying the role query during the rewrite, the role query gets applied in a custom filtered reader that applies the query via the live docs. The big advantage is that DLS is being applied in all document based APIs instead of just the _search and _percolate APIs. In order to better deal with the cost of converting the role query to a bitset, the bitsets are cached in the bitset filter cache and if the role query bitset is sparse the role query and main query will execute in a leapfrog manner to make executing queries faster. If the role query bitset isn't sparse, we fallback to livedocs. Closes elastic/elasticsearch#537 Original commit: elastic/x-pack-elasticsearch@330b96e1f2
This commit is contained in:
parent
c1fc6e5e62
commit
547b6346f6
|
@ -113,7 +113,7 @@ When field level security is enabled for an index:
|
|||
|
||||
==== Document Level Security
|
||||
|
||||
Enabling document level security restricts which documents can be accessed from any Elasticsearch query API.
|
||||
Enabling document level security restricts which documents can be accessed from any document based API.
|
||||
To enable document level security, you use a query to specify the documents that each role can access in the `roles.yml` file.
|
||||
You specify the document query with the `query` option. The document query is associated with a particular index or index pattern and
|
||||
operates in conjunction with the privileges specified for the indices.
|
||||
|
@ -155,5 +155,16 @@ customer_care:
|
|||
indices:
|
||||
'*':
|
||||
privileges: read
|
||||
query: '{"term" : {"field2" : "value2"}}''
|
||||
--------------------------------------------------
|
||||
query: '{"term" : {"department_id" : "12"}}''
|
||||
--------------------------------------------------
|
||||
|
||||
===== Limitations
|
||||
|
||||
When document level security is enabled for an index:
|
||||
|
||||
* The get, multi get, termsvector and multi termsvector APIs aren't executed in real time. The realtime option for these APIs is forcefully set to false.
|
||||
* Document level security isn't applied for APIs that aren't document based oriented. For example this is the case for the field stats API.
|
||||
* Document level security doesn't affect global index statistics that relevancy scoring uses. So this means that scores are computed without taking the role query into account.
|
||||
Note that, documents not matching with the role query are never returned.
|
||||
* The `has_child` and `has_parent` queries aren't supported as role query in the `roles.yml` file.
|
||||
The `has_child` and `has_parent` queries can be used in the search API with document level security enabled.
|
|
@ -8,6 +8,7 @@ package org.elasticsearch.shield.action.interceptor;
|
|||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.shield.User;
|
||||
import org.elasticsearch.shield.authz.accesscontrol.IndicesAccessControl;
|
||||
|
@ -34,7 +35,7 @@ public abstract class FieldSecurityRequestInterceptor<Request> extends AbstractC
|
|||
} else if (request instanceof IndicesRequest) {
|
||||
indicesRequests = Collections.singletonList((IndicesRequest) request);
|
||||
} else {
|
||||
return;
|
||||
throw new IllegalArgumentException(LoggerMessageFormat.format("Expected a request of type [{}] or [{}] but got [{}] instead", CompositeIndicesRequest.class, IndicesRequest.class, request.getClass()));
|
||||
}
|
||||
IndicesAccessControl indicesAccessControl = ((TransportRequest) request).getFromContext(InternalAuthorizationService.INDICES_PERMISSIONS_KEY);
|
||||
for (IndicesRequest indicesRequest : indicesRequests) {
|
||||
|
|
|
@ -5,16 +5,26 @@
|
|||
*/
|
||||
package org.elasticsearch.shield.action.interceptor;
|
||||
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.RealtimeRequest;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.shield.User;
|
||||
import org.elasticsearch.shield.authz.InternalAuthorizationService;
|
||||
import org.elasticsearch.shield.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* If field level security is enabled this interceptor disables the realtime feature of get, multi get, termsvector and
|
||||
* If field level or document level security is enabled this interceptor disables the realtime feature of get, multi get, termsvector and
|
||||
* multi termsvector requests.
|
||||
*/
|
||||
public class RealtimeRequestInterceptor extends FieldSecurityRequestInterceptor<RealtimeRequest> {
|
||||
public class RealtimeRequestInterceptor extends AbstractComponent implements RequestInterceptor<RealtimeRequest> {
|
||||
|
||||
@Inject
|
||||
public RealtimeRequestInterceptor(Settings settings) {
|
||||
|
@ -22,8 +32,28 @@ public class RealtimeRequestInterceptor extends FieldSecurityRequestInterceptor<
|
|||
}
|
||||
|
||||
@Override
|
||||
public void disableFeatures(RealtimeRequest request) {
|
||||
request.realtime(false);
|
||||
public void intercept(RealtimeRequest request, User user) {
|
||||
List<? extends IndicesRequest> indicesRequests;
|
||||
if (request instanceof CompositeIndicesRequest) {
|
||||
indicesRequests = ((CompositeIndicesRequest) request).subRequests();
|
||||
} else if (request instanceof IndicesRequest) {
|
||||
indicesRequests = Collections.singletonList((IndicesRequest) request);
|
||||
} else {
|
||||
throw new IllegalArgumentException(LoggerMessageFormat.format("Expected a request of type [{}] or [{}] but got [{}] instead", CompositeIndicesRequest.class, IndicesRequest.class, request.getClass()));
|
||||
}
|
||||
IndicesAccessControl indicesAccessControl = ((TransportRequest) request).getFromContext(InternalAuthorizationService.INDICES_PERMISSIONS_KEY);
|
||||
for (IndicesRequest indicesRequest : indicesRequests) {
|
||||
for (String index : indicesRequest.indices()) {
|
||||
IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions(index);
|
||||
if (indexAccessControl != null && (indexAccessControl.getFields() != null || indexAccessControl.getQueries() != null)) {
|
||||
logger.debug("intercepted request for index [{}] with field level or document level security enabled, forcefully disabling realtime", index);
|
||||
request.realtime(false);
|
||||
return;
|
||||
} else {
|
||||
logger.trace("intercepted request for index [{}] with field level security and document level not enabled, doing nothing", index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.shield.authz.accesscontrol;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.FilterDirectoryReader;
|
||||
import org.apache.lucene.index.FilterLeafReader;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.*;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A reader that only exposes documents via {@link #getLiveDocs()} that matches with the provided role query.
|
||||
*/
|
||||
public final class DocumentSubsetReader extends FilterLeafReader {
|
||||
|
||||
public static DirectoryReader wrap(DirectoryReader in, BitsetFilterCache bitsetFilterCache, Query roleQuery) throws IOException {
|
||||
return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery);
|
||||
}
|
||||
|
||||
final static class DocumentSubsetDirectoryReader extends FilterDirectoryReader {
|
||||
|
||||
private final Query roleQuery;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
|
||||
DocumentSubsetDirectoryReader(final DirectoryReader in, final BitsetFilterCache bitsetFilterCache, final Query roleQuery) throws IOException {
|
||||
super(in, new SubReaderWrapper() {
|
||||
@Override
|
||||
public LeafReader wrap(LeafReader reader) {
|
||||
try {
|
||||
return new DocumentSubsetReader(reader, bitsetFilterCache, roleQuery);
|
||||
} catch (Exception e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
this.roleQuery = roleQuery;
|
||||
|
||||
verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
|
||||
return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery);
|
||||
}
|
||||
|
||||
private static void verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(DirectoryReader reader) {
|
||||
if (reader instanceof FilterDirectoryReader) {
|
||||
FilterDirectoryReader filterDirectoryReader = (FilterDirectoryReader) reader;
|
||||
if (filterDirectoryReader instanceof DocumentSubsetDirectoryReader) {
|
||||
throw new IllegalArgumentException(LoggerMessageFormat.format("Can't wrap [{}] twice", DocumentSubsetDirectoryReader.class));
|
||||
} else {
|
||||
verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(filterDirectoryReader.getDelegate());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private final BitSet roleQueryBits;
|
||||
private volatile int numDocs = -1;
|
||||
|
||||
private DocumentSubsetReader(final LeafReader in, BitsetFilterCache bitsetFilterCache, final Query roleQuery) throws Exception {
|
||||
super(in);
|
||||
this.roleQueryBits = bitsetFilterCache.getBitSetProducer(roleQuery).getBitSet(in.getContext());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Bits getLiveDocs() {
|
||||
final Bits actualLiveDocs = in.getLiveDocs();
|
||||
if (roleQueryBits == null) {
|
||||
// If we would a <code>null</code> liveDocs then that would mean that no docs are marked as deleted,
|
||||
// but that isn't the case. No docs match with the role query and therefor all docs are marked as deleted
|
||||
return new Bits.MatchNoBits(in.maxDoc());
|
||||
} else if (actualLiveDocs == null) {
|
||||
return roleQueryBits;
|
||||
} else {
|
||||
// apply deletes when needed:
|
||||
return new Bits() {
|
||||
|
||||
@Override
|
||||
public boolean get(int index) {
|
||||
return roleQueryBits.get(index) && actualLiveDocs.get(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return roleQueryBits.length();
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int numDocs() {
|
||||
// The reason the implement this method is that numDocs should be equal to the number of set bits in liveDocs. (would be weird otherwise)
|
||||
// for the Shield DSL use case this get invoked in the QueryPhase class (in core ES) if match_all query is used as main query
|
||||
// and this is also invoked in tests.
|
||||
if (numDocs == -1) {
|
||||
final Bits liveDocs = in.getLiveDocs();
|
||||
if (roleQueryBits == null) {
|
||||
numDocs = 0;
|
||||
} else if (liveDocs == null) {
|
||||
numDocs = roleQueryBits.cardinality();
|
||||
} else {
|
||||
// this is slow, but necessary in order to be correct:
|
||||
try {
|
||||
DocIdSetIterator iterator = new FilteredDocIdSetIterator(new BitSetIterator(roleQueryBits, roleQueryBits.approximateCardinality())) {
|
||||
@Override
|
||||
protected boolean match(int doc) {
|
||||
return liveDocs.get(doc);
|
||||
}
|
||||
};
|
||||
int counter = 0;
|
||||
for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) {
|
||||
counter++;
|
||||
}
|
||||
numDocs = counter;
|
||||
} catch (IOException e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
return numDocs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasDeletions() {
|
||||
// we always return liveDocs and hide docs:
|
||||
return true;
|
||||
}
|
||||
|
||||
BitSet getRoleQueryBits() {
|
||||
return roleQueryBits;
|
||||
}
|
||||
|
||||
Bits getWrappedLiveDocs() {
|
||||
return in.getLiveDocs();
|
||||
}
|
||||
|
||||
}
|
|
@ -12,6 +12,7 @@ import org.apache.lucene.util.FilterIterator;
|
|||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -58,6 +59,7 @@ public final class FieldSubsetReader extends FilterLeafReader {
|
|||
}
|
||||
});
|
||||
this.fieldNames = fieldNames;
|
||||
verifyNoOtherFieldSubsetDirectoryReaderIsWrapped(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -68,6 +70,17 @@ public final class FieldSubsetReader extends FilterLeafReader {
|
|||
public Set<String> getFieldNames() {
|
||||
return fieldNames;
|
||||
}
|
||||
|
||||
private static void verifyNoOtherFieldSubsetDirectoryReaderIsWrapped(DirectoryReader reader) {
|
||||
if (reader instanceof FilterDirectoryReader) {
|
||||
FilterDirectoryReader filterDirectoryReader = (FilterDirectoryReader) reader;
|
||||
if (filterDirectoryReader instanceof FieldSubsetDirectoryReader) {
|
||||
throw new IllegalArgumentException(LoggerMessageFormat.format("Can't wrap [{}] twice", FieldSubsetDirectoryReader.class));
|
||||
} else {
|
||||
verifyNoOtherFieldSubsetDirectoryReaderIsWrapped(filterDirectoryReader.getDelegate());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** List of filtered fields */
|
||||
|
|
|
@ -6,12 +6,16 @@
|
|||
package org.elasticsearch.shield.authz.accesscontrol;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.util.*;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.engine.IndexSearcherWrapper;
|
||||
|
@ -27,26 +31,23 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.shield.authz.InternalAuthorizationService;
|
||||
import org.elasticsearch.shield.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
||||
import org.elasticsearch.shield.support.Exceptions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.*;
|
||||
|
||||
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
|
||||
import static org.apache.lucene.search.BooleanClause.Occur.MUST;
|
||||
|
||||
/**
|
||||
* An {@link IndexSearcherWrapper} implementation that is used for field and document level security.
|
||||
*
|
||||
* <p/>
|
||||
* Based on the {@link RequestContext} this class will enable field and/or document level security.
|
||||
*
|
||||
* <p/>
|
||||
* Field level security is enabled by wrapping the original {@link DirectoryReader} in a {@link FieldSubsetReader}
|
||||
* in the {@link #wrap(DirectoryReader)} method.
|
||||
*
|
||||
* Document level security is enabled by replacing the original {@link IndexSearcher} with a {@link ShieldIndexSearcherWrapper.ShieldIndexSearcher}
|
||||
* <p/>
|
||||
* Document level security is enabled by wrapping the original {@link DirectoryReader} in a {@link DocumentSubsetReader}
|
||||
* instance.
|
||||
*/
|
||||
public final class ShieldIndexSearcherWrapper extends AbstractIndexShardComponent implements IndexSearcherWrapper {
|
||||
|
@ -54,14 +55,16 @@ public final class ShieldIndexSearcherWrapper extends AbstractIndexShardComponen
|
|||
private final MapperService mapperService;
|
||||
private final Set<String> allowedMetaFields;
|
||||
private final IndexQueryParserService parserService;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
|
||||
private volatile boolean shardStarted = false;
|
||||
|
||||
@Inject
|
||||
public ShieldIndexSearcherWrapper(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService parserService, IndicesLifecycle indicesLifecycle, MapperService mapperService) {
|
||||
public ShieldIndexSearcherWrapper(ShardId shardId, @IndexSettings Settings indexSettings, IndexQueryParserService parserService, IndicesLifecycle indicesLifecycle, MapperService mapperService, BitsetFilterCache bitsetFilterCache) {
|
||||
super(shardId, indexSettings);
|
||||
this.mapperService = mapperService;
|
||||
this.parserService = parserService;
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
indicesLifecycle.addListener(new ShardLifecycleListener());
|
||||
|
||||
Set<String> allowedMetaFields = new HashSet<>();
|
||||
|
@ -100,18 +103,31 @@ public final class ShieldIndexSearcherWrapper extends AbstractIndexShardComponen
|
|||
}
|
||||
|
||||
IndicesAccessControl.IndexAccessControl permissions = indicesAccessControl.getIndexPermissions(shardId.getIndex());
|
||||
// Either no permissions have been defined for an index or no fields have been configured for a role permission
|
||||
if (permissions == null || permissions.getFields() == null) {
|
||||
// No permissions have been defined for an index, so don't intercept the index reader for access control
|
||||
if (permissions == null) {
|
||||
return reader;
|
||||
}
|
||||
|
||||
// now add the allowed fields based on the current granted permissions and :
|
||||
Set<String> allowedFields = new HashSet<>(allowedMetaFields);
|
||||
for (String field : permissions.getFields()) {
|
||||
allowedFields.addAll(mapperService.simpleMatchToIndexNames(field));
|
||||
if (permissions.getQueries() != null) {
|
||||
BooleanQuery.Builder roleQuery = new BooleanQuery.Builder();
|
||||
for (BytesReference bytesReference : permissions.getQueries()) {
|
||||
ParsedQuery parsedQuery = parserService.parse(bytesReference);
|
||||
roleQuery.add(parsedQuery.query(), FILTER);
|
||||
}
|
||||
reader = DocumentSubsetReader.wrap(reader, bitsetFilterCache, roleQuery.build());
|
||||
}
|
||||
resolveParentChildJoinFields(allowedFields);
|
||||
return FieldSubsetReader.wrap(reader, allowedFields);
|
||||
|
||||
if (permissions.getFields() != null) {
|
||||
// now add the allowed fields based on the current granted permissions and :
|
||||
Set<String> allowedFields = new HashSet<>(allowedMetaFields);
|
||||
for (String field : permissions.getFields()) {
|
||||
allowedFields.addAll(mapperService.simpleMatchToIndexNames(field));
|
||||
}
|
||||
resolveParentChildJoinFields(allowedFields);
|
||||
reader = FieldSubsetReader.wrap(reader, allowedFields);
|
||||
}
|
||||
|
||||
return reader;
|
||||
} catch (IOException e) {
|
||||
logger.error("Unable to apply field level security");
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
|
@ -120,53 +136,61 @@ public final class ShieldIndexSearcherWrapper extends AbstractIndexShardComponen
|
|||
|
||||
@Override
|
||||
public IndexSearcher wrap(EngineConfig engineConfig, IndexSearcher searcher) throws EngineException {
|
||||
RequestContext context = RequestContext.current();
|
||||
if (context == null) {
|
||||
if (shardStarted == false) {
|
||||
// The shard this index searcher wrapper has been created for hasn't started yet,
|
||||
// We may load some initial stuff like for example previous stored percolator queries and recovery,
|
||||
// so for this reason we should provide access to all documents:
|
||||
return searcher;
|
||||
} else {
|
||||
logger.debug("couldn't locate the current request, document level security hides all documents");
|
||||
return new ShieldIndexSearcher(engineConfig, searcher, new MatchNoDocsQuery());
|
||||
}
|
||||
}
|
||||
final DirectoryReader directoryReader = (DirectoryReader) searcher.getIndexReader();
|
||||
if (directoryReader instanceof DocumentSubsetDirectoryReader) {
|
||||
// The reasons why we return a custom searcher:
|
||||
// 1) in the case the role query is sparse then large part of the main query can be skipped
|
||||
// 2) If the role query doesn't match with any docs in a segment, that a segment can be skipped
|
||||
IndexSearcher indexSearcher = new IndexSearcher(directoryReader) {
|
||||
|
||||
ShardId shardId = ShardUtils.extractShardId(searcher.getIndexReader());
|
||||
if (shardId == null) {
|
||||
throw new IllegalStateException(LoggerMessageFormat.format("couldn't extract shardId from reader [{}]", searcher.getIndexReader()));
|
||||
}
|
||||
IndicesAccessControl indicesAccessControl = context.getRequest().getFromContext(InternalAuthorizationService.INDICES_PERMISSIONS_KEY);
|
||||
if (indicesAccessControl == null) {
|
||||
throw Exceptions.authorizationError("no indices permissions found");
|
||||
}
|
||||
@Override
|
||||
protected void search(List<LeafReaderContext> leaves, Weight weight, Collector collector) throws IOException {
|
||||
for (LeafReaderContext ctx : leaves) { // search each subreader
|
||||
final LeafCollector leafCollector;
|
||||
try {
|
||||
leafCollector = collector.getLeafCollector(ctx);
|
||||
} catch (CollectionTerminatedException e) {
|
||||
// there is no doc of interest in this reader context
|
||||
// continue with the following leaf
|
||||
continue;
|
||||
}
|
||||
// The reader is always of type DocumentSubsetReader when we get here:
|
||||
DocumentSubsetReader reader = (DocumentSubsetReader) ctx.reader();
|
||||
|
||||
IndicesAccessControl.IndexAccessControl permissions = indicesAccessControl.getIndexPermissions(shardId.getIndex());
|
||||
if (permissions == null) {
|
||||
return searcher;
|
||||
} else if (permissions.getQueries() == null) {
|
||||
return searcher;
|
||||
}
|
||||
BitSet roleQueryBits = reader.getRoleQueryBits();
|
||||
if (roleQueryBits == null) {
|
||||
// nothing matches with the role query, so skip this segment:
|
||||
continue;
|
||||
}
|
||||
|
||||
final Query roleQuery;
|
||||
switch (permissions.getQueries().size()) {
|
||||
case 0:
|
||||
roleQuery = new MatchNoDocsQuery();
|
||||
break;
|
||||
case 1:
|
||||
roleQuery = parserService.parse(permissions.getQueries().iterator().next()).query();
|
||||
break;
|
||||
default:
|
||||
BooleanQuery bq = new BooleanQuery();
|
||||
for (BytesReference bytesReference : permissions.getQueries()) {
|
||||
ParsedQuery parsedQuery = parserService.parse(bytesReference);
|
||||
bq.add(parsedQuery.query(), MUST);
|
||||
Scorer scorer = weight.scorer(ctx);
|
||||
if (scorer != null) {
|
||||
try {
|
||||
// if the role query result set is sparse then we should use the SparseFixedBitSet for advancing:
|
||||
if (roleQueryBits instanceof SparseFixedBitSet) {
|
||||
SparseFixedBitSet sparseFixedBitSet = (SparseFixedBitSet) roleQueryBits;
|
||||
Bits realLiveDocs = reader.getWrappedLiveDocs();
|
||||
intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, realLiveDocs);
|
||||
} else {
|
||||
BulkScorer bulkScorer = weight.bulkScorer(ctx);
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
bulkScorer.score(leafCollector, liveDocs);
|
||||
}
|
||||
} catch (CollectionTerminatedException e) {
|
||||
// collection was terminated prematurely
|
||||
// continue with the following leaf
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
roleQuery = bq;
|
||||
break;
|
||||
};
|
||||
indexSearcher.setQueryCache(engineConfig.getQueryCache());
|
||||
indexSearcher.setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
|
||||
indexSearcher.setSimilarity(engineConfig.getSimilarity());
|
||||
return indexSearcher;
|
||||
}
|
||||
return new ShieldIndexSearcher(engineConfig, searcher, roleQuery);
|
||||
return searcher;
|
||||
}
|
||||
|
||||
public Set<String> getAllowedMetaFields() {
|
||||
|
@ -183,73 +207,6 @@ public final class ShieldIndexSearcherWrapper extends AbstractIndexShardComponen
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* An {@link IndexSearcher} implementation that applies the role query for document level security during the
|
||||
* query rewrite and disabled the query cache if required when field level security is enabled.
|
||||
*/
|
||||
static final class ShieldIndexSearcher extends IndexSearcher {
|
||||
|
||||
private final Query roleQuery;
|
||||
|
||||
private ShieldIndexSearcher(EngineConfig engineConfig, IndexSearcher in, Query roleQuery) {
|
||||
super(in.getIndexReader());
|
||||
setSimilarity(in.getSimilarity(true));
|
||||
setQueryCache(engineConfig.getQueryCache());
|
||||
setQueryCachingPolicy(engineConfig.getQueryCachingPolicy());
|
||||
try {
|
||||
this.roleQuery = super.rewrite(roleQuery);
|
||||
} catch (IOException e) {
|
||||
throw new IllegalStateException("Could not rewrite role query", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(Query query) throws IOException {
|
||||
query = super.rewrite(query);
|
||||
query = wrap(query);
|
||||
query = super.rewrite(query);
|
||||
return query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "ShieldIndexSearcher(" + super.toString() + ")";
|
||||
}
|
||||
|
||||
private boolean isFilteredBy(Query query, Query filter) {
|
||||
if (query instanceof ConstantScoreQuery) {
|
||||
return isFilteredBy(((ConstantScoreQuery) query).getQuery(), filter);
|
||||
} else if (query instanceof BooleanQuery) {
|
||||
BooleanQuery bq = (BooleanQuery) query;
|
||||
for (BooleanClause clause : bq) {
|
||||
if (clause.isRequired() && isFilteredBy(clause.getQuery(), filter)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
Query queryClone = query.clone();
|
||||
queryClone.setBoost(1);
|
||||
Query filterClone = filter.clone();
|
||||
filterClone.setBoost(1f);
|
||||
return queryClone.equals(filterClone);
|
||||
}
|
||||
}
|
||||
|
||||
private Query wrap(Query original) throws IOException {
|
||||
if (isFilteredBy(original, roleQuery)) {
|
||||
// this is necessary in order to make rewrite "stable",
|
||||
// ie calling it several times on the same query keeps
|
||||
// on returning the same query instance
|
||||
return original;
|
||||
}
|
||||
return new BooleanQuery.Builder()
|
||||
.add(original, MUST)
|
||||
.add(roleQuery, FILTER)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
private class ShardLifecycleListener extends IndicesLifecycle.Listener {
|
||||
|
||||
@Override
|
||||
|
@ -260,4 +217,14 @@ public final class ShieldIndexSearcherWrapper extends AbstractIndexShardComponen
|
|||
}
|
||||
}
|
||||
|
||||
static void intersectScorerAndRoleBits(Scorer scorer, SparseFixedBitSet roleBits, LeafCollector collector, Bits acceptDocs) throws IOException {
|
||||
// ConjunctionDISI uses the DocIdSetIterator#cost() to order the iterators, so if roleBits has the lowest cardinality it should be used first:
|
||||
DocIdSetIterator iterator = ConjunctionDISI.intersect(Arrays.asList(new BitSetIterator(roleBits, roleBits.approximateCardinality()), scorer));
|
||||
for (int docId = iterator.nextDoc(); docId < DocIdSetIterator.NO_MORE_DOCS; docId = iterator.nextDoc()) {
|
||||
if (acceptDocs == null || acceptDocs.get(docId)) {
|
||||
collector.collect(docId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ public class DocumentAndFieldLevelSecurityTests extends ShieldIntegTestCase {
|
|||
" indices:\n" +
|
||||
" '*':\n" +
|
||||
" privileges: ALL\n" +
|
||||
" fields: field2\n" +
|
||||
" query: '{\"term\" : {\"field1\" : \"value1\"}}'\n";
|
||||
" fields: field1\n" +
|
||||
" query: '{\"term\" : {\"field2\" : \"value2\"}}'\n";
|
||||
}
|
||||
|
||||
public void testSimpleQuery() throws Exception {
|
||||
|
@ -98,7 +98,10 @@ public class DocumentAndFieldLevelSecurityTests extends ShieldIntegTestCase {
|
|||
.setSettings(Settings.builder().put(IndexCacheModule.QUERY_CACHE_EVERYTHING, true))
|
||||
.addMapping("type1", "field1", "type=string", "field2", "type=string")
|
||||
);
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1", "field2", "value2")
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1")
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field2", "value2")
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
|
||||
|
@ -109,10 +112,25 @@ public class DocumentAndFieldLevelSecurityTests extends ShieldIntegTestCase {
|
|||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("1"));
|
||||
assertThat(response.getHits().getAt(0).sourceAsMap().size(), equalTo(1));
|
||||
assertThat(response.getHits().getAt(0).sourceAsMap().get("field1"), equalTo("value1"));
|
||||
response = client().prepareSearch("test")
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("2"));
|
||||
assertThat(response.getHits().getAt(0).sourceAsMap().size(), equalTo(1));
|
||||
assertThat(response.getHits().getAt(0).sourceAsMap().get("field2"), equalTo("value2"));
|
||||
|
||||
// this is a bit weird the document level permission (all docs with field2:value2) don't match with the field level permissions (field1),
|
||||
// this results in document 2 being returned but no fields are visible:
|
||||
response = client().prepareSearch("test")
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user3", USERS_PASSWD))
|
||||
.get();
|
||||
assertHitCount(response, 0);
|
||||
assertHitCount(response, 1);
|
||||
assertThat(response.getHits().getAt(0).getId(), equalTo("2"));
|
||||
assertThat(response.getHits().getAt(0).sourceAsMap().size(), equalTo(0));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,9 +5,17 @@
|
|||
*/
|
||||
package org.elasticsearch.integration;
|
||||
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.get.MultiGetResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateResponse;
|
||||
import org.elasticsearch.action.percolate.PercolateSourceBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.QuerySourceBuilder;
|
||||
import org.elasticsearch.action.termvectors.MultiTermVectorsResponse;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsRequest;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsResponse;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.bucket.children.Children;
|
||||
import org.elasticsearch.search.aggregations.bucket.global.Global;
|
||||
|
@ -22,6 +30,7 @@ import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.BASIC
|
|||
import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
@ -75,16 +84,190 @@ public class DocumentLevelSecurityTests extends ShieldIntegTestCase {
|
|||
|
||||
SearchResponse response = client().prepareSearch("test")
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.setQuery(randomBoolean() ? QueryBuilders.termQuery("field1", "value1") : QueryBuilders.matchAllQuery())
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHits(response, "1");
|
||||
response = client().prepareSearch("test")
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.setQuery(randomBoolean() ? QueryBuilders.termQuery("field2", "value2") : QueryBuilders.matchAllQuery())
|
||||
.get();
|
||||
assertHitCount(response, 1);
|
||||
assertSearchHits(response, "2");
|
||||
}
|
||||
|
||||
public void testGetApi() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=string", "field2", "type=string")
|
||||
);
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1")
|
||||
.get();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field2", "value2")
|
||||
.get();
|
||||
|
||||
Boolean realtime = randomFrom(true, false, null);
|
||||
GetResponse response = client().prepareGet("test", "type1", "1")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(true));
|
||||
assertThat(response.getId(), equalTo("1"));
|
||||
response = client().prepareGet("test", "type1", "2")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(true));
|
||||
assertThat(response.getId(), equalTo("2"));
|
||||
|
||||
response = client().prepareGet("test", "type1", "1")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(false));
|
||||
response = client().prepareGet("test", "type1", "2")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(false));
|
||||
}
|
||||
|
||||
public void testMGetApi() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=string", "field2", "type=string")
|
||||
);
|
||||
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1")
|
||||
.get();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field2", "value2")
|
||||
.get();
|
||||
|
||||
Boolean realtime = randomFrom(true, false, null);
|
||||
MultiGetResponse response = client().prepareMultiGet()
|
||||
.add("test", "type1", "1")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses()[0].isFailed(), is(false));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(true));
|
||||
assertThat(response.getResponses()[0].getResponse().getId(), equalTo("1"));
|
||||
|
||||
response = client().prepareMultiGet()
|
||||
.add("test", "type1", "2")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses()[0].isFailed(), is(false));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(true));
|
||||
assertThat(response.getResponses()[0].getResponse().getId(), equalTo("2"));
|
||||
|
||||
response = client().prepareMultiGet()
|
||||
.add("test", "type1", "1")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses()[0].isFailed(), is(false));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(false));
|
||||
|
||||
response = client().prepareMultiGet()
|
||||
.add("test", "type1", "2")
|
||||
.setRealtime(realtime)
|
||||
.setRefresh(true)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses()[0].isFailed(), is(false));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(false));
|
||||
}
|
||||
|
||||
public void testTVApi() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets_payloads", "field2", "type=string,term_vector=with_positions_offsets_payloads")
|
||||
);
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1")
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field2", "value2")
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
|
||||
Boolean realtime = randomFrom(true, false, null);
|
||||
TermVectorsResponse response = client().prepareTermVectors("test", "type1", "1")
|
||||
.setRealtime(realtime)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(true));
|
||||
assertThat(response.getId(), is("1"));
|
||||
|
||||
response = client().prepareTermVectors("test", "type1", "2")
|
||||
.setRealtime(realtime)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(true));
|
||||
assertThat(response.getId(), is("2"));
|
||||
|
||||
response = client().prepareTermVectors("test", "type1", "1")
|
||||
.setRealtime(realtime)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(false));
|
||||
|
||||
response = client().prepareTermVectors("test", "type1", "2")
|
||||
.setRealtime(realtime)
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.isExists(), is(false));
|
||||
}
|
||||
|
||||
public void testMTVApi() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=string,term_vector=with_positions_offsets_payloads", "field2", "type=string,term_vector=with_positions_offsets_payloads")
|
||||
);
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1")
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
client().prepareIndex("test", "type1", "2").setSource("field2", "value2")
|
||||
.setRefresh(true)
|
||||
.get();
|
||||
|
||||
Boolean realtime = randomFrom(true, false, null);
|
||||
MultiTermVectorsResponse response = client().prepareMultiTermVectors()
|
||||
.add(new TermVectorsRequest("test", "type1", "1").realtime(realtime))
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses().length, equalTo(1));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(true));
|
||||
assertThat(response.getResponses()[0].getResponse().getId(), is("1"));
|
||||
|
||||
response = client().prepareMultiTermVectors()
|
||||
.add(new TermVectorsRequest("test", "type1", "2").realtime(realtime))
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses().length, equalTo(1));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(true));
|
||||
assertThat(response.getResponses()[0].getResponse().getId(), is("2"));
|
||||
|
||||
response = client().prepareMultiTermVectors()
|
||||
.add(new TermVectorsRequest("test", "type1", "1").realtime(realtime))
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user2", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses().length, equalTo(1));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(false));
|
||||
|
||||
response = client().prepareMultiTermVectors()
|
||||
.add(new TermVectorsRequest("test", "type1", "2").realtime(realtime))
|
||||
.putHeader(BASIC_AUTH_HEADER, basicAuthHeaderValue("user1", USERS_PASSWD))
|
||||
.get();
|
||||
assertThat(response.getResponses().length, equalTo(1));
|
||||
assertThat(response.getResponses()[0].getResponse().isExists(), is(false));
|
||||
}
|
||||
|
||||
public void testGlobalAggregation() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.addMapping("type1", "field1", "type=string", "field2", "type=string")
|
||||
|
|
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.shield.authz.accesscontrol;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.join.BitDocIdSetFilter;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BitDocIdSet;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class DocumentSubsetReaderTests extends ESTestCase {
|
||||
|
||||
private Directory directory;
|
||||
private BitsetFilterCache bitsetFilterCache;
|
||||
|
||||
@Before
|
||||
public void before() {
|
||||
directory = newDirectory();
|
||||
bitsetFilterCache = mock(BitsetFilterCache.class);
|
||||
when(bitsetFilterCache.getBitSetProducer(Matchers.any(Query.class))).then(invocationOnMock -> {
|
||||
final Query query = (Query) invocationOnMock.getArguments()[0];
|
||||
return (BitSetProducer) context -> {
|
||||
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
|
||||
IndexSearcher searcher = new IndexSearcher(topLevelContext);
|
||||
searcher.setQueryCache(null);
|
||||
Weight weight = searcher.createNormalizedWeight(query, false);
|
||||
DocIdSetIterator it = weight.scorer(context);
|
||||
return BitSet.of(it, context.reader().maxDoc());
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
@After
|
||||
public void after() throws Exception {
|
||||
directory.close();
|
||||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig());
|
||||
|
||||
Document document = new Document();
|
||||
document.add(new StringField("field", "value1", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
document = new Document();
|
||||
document.add(new StringField("field", "value2", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
document = new Document();
|
||||
document.add(new StringField("field", "value3", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
document = new Document();
|
||||
document.add(new StringField("field", "value4", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
iw.forceMerge(1);
|
||||
iw.deleteDocuments(new Term("field", "value3"));
|
||||
iw.close();
|
||||
DirectoryReader directoryReader = DirectoryReader.open(directory);
|
||||
|
||||
IndexSearcher indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value1"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||
TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
assertThat(result.totalHits, equalTo(1));
|
||||
assertThat(result.scoreDocs[0].doc, equalTo(0));
|
||||
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value2"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
assertThat(result.totalHits, equalTo(1));
|
||||
assertThat(result.scoreDocs[0].doc, equalTo(1));
|
||||
|
||||
// this doc has been marked as deleted:
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value3"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0));
|
||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
assertThat(result.totalHits, equalTo(0));
|
||||
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value4"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
assertThat(result.totalHits, equalTo(1));
|
||||
assertThat(result.scoreDocs[0].doc, equalTo(3));
|
||||
|
||||
directoryReader.close();
|
||||
}
|
||||
|
||||
public void testLiveDocs() throws Exception {
|
||||
int numDocs = scaledRandomIntBetween(16, 128);
|
||||
IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig());
|
||||
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
Document document = new Document();
|
||||
document.add(new StringField("field", "value" + i, Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
}
|
||||
|
||||
iw.forceMerge(1);
|
||||
iw.close();
|
||||
|
||||
DirectoryReader in = DirectoryReader.open(directory);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
DirectoryReader directoryReader = DocumentSubsetReader.wrap(in, bitsetFilterCache, new TermQuery(new Term("field", "value" + i)));
|
||||
assertThat("should have one segment after force merge", directoryReader.leaves().size(), equalTo(1));
|
||||
|
||||
LeafReader leafReader = directoryReader.leaves().get(0).reader();
|
||||
assertThat(leafReader.numDocs(), equalTo(1));
|
||||
Bits liveDocs = leafReader.getLiveDocs();
|
||||
assertThat(liveDocs.length(), equalTo(numDocs));
|
||||
for (int docId = 0; docId < numDocs; docId++) {
|
||||
if (docId == i) {
|
||||
assertThat("docId [" + docId +"] should match", liveDocs.get(docId), is(true));
|
||||
} else {
|
||||
assertThat("docId [" + docId +"] should not match", liveDocs.get(docId), is(false));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
in.close();
|
||||
}
|
||||
|
||||
public void testWrapTwice() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(null);
|
||||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
iw.close();
|
||||
BitsetFilterCache bitsetFilterCache = mock(BitsetFilterCache.class);
|
||||
|
||||
DirectoryReader directoryReader = DocumentSubsetReader.wrap(DirectoryReader.open(dir), bitsetFilterCache, new MatchAllDocsQuery());
|
||||
try {
|
||||
DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new MatchAllDocsQuery());
|
||||
fail("shouldn't be able to wrap DocumentSubsetDirectoryReader twice");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Can't wrap [class org.elasticsearch.shield.authz.accesscontrol.DocumentSubsetReader$DocumentSubsetDirectoryReader] twice"));
|
||||
}
|
||||
|
||||
directoryReader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
|
@ -9,19 +9,26 @@ import org.apache.lucene.analysis.MockAnalyzer;
|
|||
import org.apache.lucene.document.*;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.index.TermsEnum.SeekStatus;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.SourceFieldMapper;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
/** Simple tests for this filterreader */
|
||||
public class FieldSubsetReaderTests extends ESTestCase {
|
||||
|
||||
|
@ -767,4 +774,22 @@ public class FieldSubsetReaderTests extends ESTestCase {
|
|||
TestUtil.checkReader(ir);
|
||||
IOUtils.close(ir, iw, dir);
|
||||
}
|
||||
|
||||
public void testWrapTwice() throws Exception {
|
||||
Directory dir = newDirectory();
|
||||
IndexWriterConfig iwc = new IndexWriterConfig(null);
|
||||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
iw.close();
|
||||
|
||||
DirectoryReader directoryReader = DirectoryReader.open(dir);
|
||||
directoryReader = FieldSubsetReader.wrap(directoryReader, Collections.emptySet());
|
||||
try {
|
||||
FieldSubsetReader.wrap(directoryReader, Collections.emptySet());
|
||||
fail("shouldn't be able to wrap FieldSubsetDirectoryReader twice");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Can't wrap [class org.elasticsearch.shield.authz.accesscontrol.FieldSubsetReader$FieldSubsetDirectoryReader] twice"));
|
||||
}
|
||||
directoryReader.close();
|
||||
dir.close();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.shield.authz.accesscontrol;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.join.BitSetProducer;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.mapper.DocumentMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.IndexQueryParserService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesLifecycle;
|
||||
import org.elasticsearch.indices.IndicesWarmer;
|
||||
import org.elasticsearch.shield.authz.InternalAuthorizationService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.mockito.Matchers;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Matchers.*;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ShieldIndexSearcherWrapperIntegrationTests extends ESTestCase {
|
||||
|
||||
public void testDLS() throws Exception {
|
||||
ShardId shardId = new ShardId("_index", 0);
|
||||
EngineConfig engineConfig = new EngineConfig(shardId, null, null, Settings.EMPTY, null, null, null, null, null, null, null, null, null, null, null, QueryCachingPolicy.ALWAYS_CACHE, null, null); // can't mock...
|
||||
|
||||
MapperService mapperService = mock(MapperService.class);
|
||||
when(mapperService.docMappers(anyBoolean())).thenReturn(Collections.emptyList());
|
||||
when(mapperService.simpleMatchToIndexNames(anyString())).then(new Answer<Collection<String>>() {
|
||||
@Override
|
||||
public Collection<String> answer(InvocationOnMock invocationOnMock) throws Throwable {
|
||||
return Collections.singletonList((String) invocationOnMock.getArguments()[0]);
|
||||
}
|
||||
});
|
||||
|
||||
TransportRequest request = new TransportRequest.Empty();
|
||||
RequestContext.setCurrent(new RequestContext(request));
|
||||
IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(true, null, ImmutableSet.of(new BytesArray("{}")));
|
||||
request.putInContext(InternalAuthorizationService.INDICES_PERMISSIONS_KEY, new IndicesAccessControl(true, ImmutableMap.of("_index", indexAccessControl)));
|
||||
IndexQueryParserService parserService = mock(IndexQueryParserService.class);
|
||||
|
||||
IndicesLifecycle indicesLifecycle = mock(IndicesLifecycle.class);
|
||||
BitsetFilterCache bitsetFilterCache = mock(BitsetFilterCache.class);
|
||||
when(bitsetFilterCache.getBitSetProducer(Matchers.any(Query.class))).then(new Answer<BitSetProducer>() {
|
||||
@Override
|
||||
public BitSetProducer answer(InvocationOnMock invocationOnMock) throws Throwable {
|
||||
final Query query = (Query) invocationOnMock.getArguments()[0];
|
||||
return context -> {
|
||||
IndexSearcher searcher = new IndexSearcher(context);
|
||||
searcher.setQueryCache(null);
|
||||
Weight weight = searcher.createNormalizedWeight(query, false);
|
||||
DocIdSetIterator it = weight.scorer(context);
|
||||
if (it != null) {
|
||||
int maxDoc = context.reader().maxDoc();
|
||||
BitSet bitSet = randomBoolean() ? new SparseFixedBitSet(maxDoc) : new FixedBitSet(maxDoc);
|
||||
bitSet.or(it);
|
||||
return bitSet;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
ShieldIndexSearcherWrapper wrapper = new ShieldIndexSearcherWrapper(
|
||||
shardId, Settings.EMPTY, parserService, indicesLifecycle, mapperService, bitsetFilterCache
|
||||
);
|
||||
|
||||
Directory directory = newDirectory();
|
||||
IndexWriter iw = new IndexWriter(
|
||||
directory,
|
||||
new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)
|
||||
);
|
||||
|
||||
int numValues = scaledRandomIntBetween(2, 16);
|
||||
String[] values = new String[numValues];
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
values[i] = "value" + i;
|
||||
}
|
||||
int[] valuesHitCount = new int[numValues];
|
||||
|
||||
int numDocs = scaledRandomIntBetween(32, 128);
|
||||
int commitAfter = scaledRandomIntBetween(1, numDocs);
|
||||
logger.info("Going to index [{}] documents with [{}] unique values and commit after [{}] documents have been indexed", numDocs, numValues, commitAfter);
|
||||
|
||||
for (int doc = 1; doc <= numDocs; doc++) {
|
||||
int valueIndex = (numValues - 1) % doc;
|
||||
|
||||
Document document = new Document();
|
||||
String id = String.valueOf(doc);
|
||||
document.add(new StringField("id", id, Field.Store.NO));
|
||||
String value = values[valueIndex];
|
||||
document.add(new StringField("field", value, Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
if (doc % 11 == 0) {
|
||||
iw.deleteDocuments(new Term("id", id));
|
||||
} else {
|
||||
if (commitAfter % commitAfter == 0) {
|
||||
iw.commit();
|
||||
}
|
||||
valuesHitCount[valueIndex]++;
|
||||
}
|
||||
}
|
||||
iw.close();
|
||||
StringBuilder valueToHitCountOutput = new StringBuilder();
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
valueToHitCountOutput.append(values[i]).append('\t').append(valuesHitCount[i]).append('\n');
|
||||
}
|
||||
logger.info("Value count matrix:\n{}", valueToHitCountOutput);
|
||||
|
||||
DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId);
|
||||
for (int i = 0; i < numValues; i++) {
|
||||
ParsedQuery parsedQuery = new ParsedQuery(new TermQuery(new Term("field", values[i])));
|
||||
when(parserService.parse(any(BytesReference.class))).thenReturn(parsedQuery);
|
||||
DirectoryReader wrappedDirectoryReader = wrapper.wrap(directoryReader);
|
||||
IndexSearcher indexSearcher = wrapper.wrap(engineConfig, new IndexSearcher(wrappedDirectoryReader));
|
||||
|
||||
int expectedHitCount = valuesHitCount[i];
|
||||
logger.info("Going to verify hit count with query [{}] with expected total hits [{}]", parsedQuery.query(), expectedHitCount);
|
||||
TotalHitCountCollector countCollector = new TotalHitCountCollector();
|
||||
indexSearcher.search(new MatchAllDocsQuery(), countCollector);
|
||||
assertThat(countCollector.getTotalHits(), equalTo(expectedHitCount));
|
||||
assertThat(wrappedDirectoryReader.numDocs(), equalTo(expectedHitCount));
|
||||
}
|
||||
|
||||
directoryReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
}
|
|
@ -7,10 +7,16 @@ package org.elasticsearch.shield.authz.accesscontrol;
|
|||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.*;
|
||||
import org.apache.lucene.search.*;
|
||||
import org.apache.lucene.search.similarities.BM25Similarity;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.RAMDirectory;
|
||||
import org.apache.lucene.util.SparseFixedBitSet;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -19,6 +25,9 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.cache.query.none.NoneQueryCache;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
|
@ -26,20 +35,24 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.index.similarity.SimilarityLookupService;
|
||||
import org.elasticsearch.indices.InternalIndicesLifecycle;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.aggregations.LeafBucketCollector;
|
||||
import org.elasticsearch.shield.authz.InternalAuthorizationService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Mockito;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.elasticsearch.shield.authz.accesscontrol.ShieldIndexSearcherWrapper.intersectScorerAndRoleBits;
|
||||
import static org.hamcrest.Matchers.*;
|
||||
import static org.mockito.Matchers.eq;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class ShieldIndexSearcherWrapperTests extends ESTestCase {
|
||||
public class ShieldIndexSearcherWrapperUnitTests extends ESTestCase {
|
||||
|
||||
private ShardId shardId;
|
||||
private TransportRequest request;
|
||||
|
@ -58,7 +71,7 @@ public class ShieldIndexSearcherWrapperTests extends ESTestCase {
|
|||
|
||||
shardId = new ShardId(index, 0);
|
||||
InternalIndicesLifecycle indicesLifecycle = new InternalIndicesLifecycle(settings);
|
||||
shieldIndexSearcherWrapper = new ShieldIndexSearcherWrapper(shardId, settings, null, indicesLifecycle, mapperService);
|
||||
shieldIndexSearcherWrapper = new ShieldIndexSearcherWrapper(shardId, settings, null, indicesLifecycle, mapperService, null);
|
||||
IndexShard indexShard = mock(IndexShard.class);
|
||||
when(indexShard.shardId()).thenReturn(shardId);
|
||||
indicesLifecycle.afterIndexShardPostRecovery(indexShard);
|
||||
|
@ -196,6 +209,105 @@ public class ShieldIndexSearcherWrapperTests extends ESTestCase {
|
|||
assertResolvedFields("field1", "field1", ParentFieldMapper.joinField("parent1"), ParentFieldMapper.joinField("parent2"));
|
||||
}
|
||||
|
||||
public void testDelegateSimilarity() throws Exception {
|
||||
ShardId shardId = new ShardId("_index", 0);
|
||||
EngineConfig engineConfig = new EngineConfig(shardId, null, null, Settings.EMPTY, null, null, null, null, null, null, new BM25Similarity(), null, null, null, new NoneQueryCache(shardId.index(), Settings.EMPTY), QueryCachingPolicy.ALWAYS_CACHE, null, null); // can't mock...
|
||||
|
||||
BitsetFilterCache bitsetFilterCache = mock(BitsetFilterCache.class);
|
||||
DirectoryReader directoryReader = DocumentSubsetReader.wrap(esIn, bitsetFilterCache, new MatchAllDocsQuery());
|
||||
IndexSearcher indexSearcher = new IndexSearcher(directoryReader);
|
||||
IndexSearcher result = shieldIndexSearcherWrapper.wrap(engineConfig, indexSearcher);
|
||||
assertThat(result, not(sameInstance(indexSearcher)));
|
||||
assertThat(result.getSimilarity(true), sameInstance(engineConfig.getSimilarity()));
|
||||
}
|
||||
|
||||
public void testIntersectScorerAndRoleBits() throws Exception {
|
||||
final Directory directory = newDirectory();
|
||||
IndexWriter iw = new IndexWriter(
|
||||
directory,
|
||||
new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)
|
||||
);
|
||||
|
||||
Document document = new Document();
|
||||
document.add(new StringField("field1", "value1", Field.Store.NO));
|
||||
document.add(new StringField("field2", "value1", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
document = new Document();
|
||||
document.add(new StringField("field1", "value2", Field.Store.NO));
|
||||
document.add(new StringField("field2", "value1", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
document = new Document();
|
||||
document.add(new StringField("field1", "value3", Field.Store.NO));
|
||||
document.add(new StringField("field2", "value1", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
document = new Document();
|
||||
document.add(new StringField("field1", "value4", Field.Store.NO));
|
||||
document.add(new StringField("field2", "value1", Field.Store.NO));
|
||||
iw.addDocument(document);
|
||||
|
||||
iw.commit();
|
||||
iw.deleteDocuments(new Term("field1", "value3"));
|
||||
iw.close();
|
||||
DirectoryReader directoryReader = DirectoryReader.open(directory);
|
||||
IndexSearcher searcher = new IndexSearcher(directoryReader);
|
||||
Weight weight = searcher.createNormalizedWeight(new TermQuery(new Term("field2", "value1")), false);
|
||||
|
||||
LeafReaderContext leaf = directoryReader.leaves().get(0);
|
||||
Scorer scorer = weight.scorer(leaf);
|
||||
|
||||
SparseFixedBitSet sparseFixedBitSet = query(leaf, "field1", "value1");
|
||||
LeafCollector leafCollector = new LeafBucketCollector() {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assertThat(doc, equalTo(0));
|
||||
}
|
||||
};
|
||||
intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs());
|
||||
|
||||
sparseFixedBitSet = query(leaf, "field1", "value2");
|
||||
leafCollector = new LeafBucketCollector() {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assertThat(doc, equalTo(1));
|
||||
}
|
||||
};
|
||||
intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs());
|
||||
|
||||
|
||||
sparseFixedBitSet = query(leaf, "field1", "value3");
|
||||
leafCollector = new LeafBucketCollector() {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
fail("docId [" + doc + "] should have been deleted");
|
||||
}
|
||||
};
|
||||
intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs());
|
||||
|
||||
sparseFixedBitSet = query(leaf, "field1", "value4");
|
||||
leafCollector = new LeafBucketCollector() {
|
||||
@Override
|
||||
public void collect(int doc, long bucket) throws IOException {
|
||||
assertThat(doc, equalTo(3));
|
||||
}
|
||||
};
|
||||
intersectScorerAndRoleBits(scorer, sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs());
|
||||
|
||||
directoryReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
||||
private SparseFixedBitSet query(LeafReaderContext leaf, String field, String value) throws IOException {
|
||||
SparseFixedBitSet sparseFixedBitSet = new SparseFixedBitSet(leaf.reader().maxDoc());
|
||||
TermsEnum tenum = leaf.reader().terms(field).iterator();
|
||||
while (tenum.next().utf8ToString().equals(value) == false) {}
|
||||
PostingsEnum penum = tenum.postings(null);
|
||||
sparseFixedBitSet.or(penum);
|
||||
return sparseFixedBitSet;
|
||||
}
|
||||
|
||||
private void assertResolvedFields(String expression, String... expectedFields) {
|
||||
IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl(true, ImmutableSet.of(expression), null);
|
||||
request.putInContext(InternalAuthorizationService.INDICES_PERMISSIONS_KEY, new IndicesAccessControl(true, ImmutableMap.of("_index", indexAccessControl)));
|
Loading…
Reference in New Issue