Use separate BitSet cache in Doc Level Security (#43899)
Document level security was depending on the shared "BitsetFilterCache" which (by design) never expires its entries. However, when using DLS queries - particularly templated ones - the number (and memory usage) of generated bitsets can be significant. This change introduces a new cache specifically for BitSets used in DLS queries, that has memory usage constraints and access time expiry. The whole cache is automatically cleared if the role cache is cleared. Individual bitsets are cleared when the corresponding lucene index reader is closed. The cache defaults to 50MB, and entries expire if unused for 7 days. Backport of: #43669
This commit is contained in:
parent
05c0cff1b6
commit
31b19bd022
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.security.authz.accesscontrol;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.IndexReaderContext;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.ReaderUtil;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
import org.apache.lucene.search.Scorer;
|
||||
import org.apache.lucene.search.Weight;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.apache.lucene.util.FixedBitSet;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
/**
|
||||
* This is a cache for {@link BitSet} instances that are used with the {@link DocumentSubsetReader}.
|
||||
* It is bounded by memory size and access time.
|
||||
*
|
||||
* @see org.elasticsearch.index.cache.bitset.BitsetFilterCache
|
||||
*/
|
||||
public final class DocumentSubsetBitsetCache implements IndexReader.ClosedListener, Closeable, Accountable {
|
||||
|
||||
/**
|
||||
* The TTL defaults to 1 week. We depend on the {@code max_bytes} setting to keep the cache to a sensible size, by evicting LRU
|
||||
* entries, however there is benefit in reclaiming memory by expiring bitsets that have not be used for some period of time.
|
||||
* Because {@link org.elasticsearch.xpack.core.security.authz.permission.IndicesPermission.Group#query} can be templated, it is
|
||||
* not uncommon for a query to only be used for a relatively short period of time (e.g. because a user's metadata changed, or because
|
||||
* that user is an infrequent user of Elasticsearch). This access time expiry helps free up memory in those circumstances even if the
|
||||
* cache is never filled.
|
||||
*/
|
||||
static final Setting<TimeValue> CACHE_TTL_SETTING =
|
||||
Setting.timeSetting("xpack.security.dls.bitset.cache.ttl", TimeValue.timeValueHours(24 * 7), Property.NodeScope);
|
||||
|
||||
static final Setting<ByteSizeValue> CACHE_SIZE_SETTING = Setting.byteSizeSetting("xpack.security.dls.bitset.cache.size",
|
||||
new ByteSizeValue(50, ByteSizeUnit.MB), Property.NodeScope);
|
||||
|
||||
private static final BitSet NULL_MARKER = new FixedBitSet(0);
|
||||
|
||||
private final Logger logger;
|
||||
private final Cache<BitsetCacheKey, BitSet> bitsetCache;
|
||||
private final Map<IndexReader.CacheKey, Set<BitsetCacheKey>> keysByIndex;
|
||||
|
||||
public DocumentSubsetBitsetCache(Settings settings) {
|
||||
this.logger = LogManager.getLogger(getClass());
|
||||
final TimeValue ttl = CACHE_TTL_SETTING.get(settings);
|
||||
final ByteSizeValue size = CACHE_SIZE_SETTING.get(settings);
|
||||
this.bitsetCache = CacheBuilder.<BitsetCacheKey, BitSet>builder()
|
||||
.setExpireAfterAccess(ttl)
|
||||
.setMaximumWeight(size.getBytes())
|
||||
.weigher((key, bitSet) -> bitSet == NULL_MARKER ? 0 : bitSet.ramBytesUsed()).build();
|
||||
this.keysByIndex = new ConcurrentHashMap<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClose(IndexReader.CacheKey ownerCoreCacheKey) {
|
||||
final Set<BitsetCacheKey> keys = keysByIndex.remove(ownerCoreCacheKey);
|
||||
if (keys != null) {
|
||||
// Because this Set has been removed from the map, and the only update to the set is performed in a
|
||||
// Map#compute call, it should not be possible to get a concurrent modification here.
|
||||
keys.forEach(bitsetCache::invalidate);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
clear("close");
|
||||
}
|
||||
|
||||
public void clear(String reason) {
|
||||
logger.debug("clearing all DLS bitsets because [{}]", reason);
|
||||
// Due to the order here, it is possible than a new entry could be added _after_ the keysByIndex map is cleared
|
||||
// but _before_ the cache is cleared. This would mean it sits orphaned in keysByIndex, but this is not a issue.
|
||||
// When the index is closed, the key will be removed from the map, and there will not be a corresponding item
|
||||
// in the cache, which will make the cache-invalidate a no-op.
|
||||
// Since the entry is not in the cache, if #getBitSet is called, it will be loaded, and the new key will be added
|
||||
// to the index without issue.
|
||||
keysByIndex.clear();
|
||||
bitsetCache.invalidateAll();
|
||||
}
|
||||
|
||||
int entryCount() {
|
||||
return this.bitsetCache.count();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long ramBytesUsed() {
|
||||
return this.bitsetCache.weight();
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain the {@link BitSet} for the given {@code query} in the given {@code context}.
|
||||
* If there is a cached entry for that query and context, it will be returned.
|
||||
* Otherwise a new BitSet will be created and stored in the cache.
|
||||
* The returned BitSet may be null (e.g. if the query has no results).
|
||||
*/
|
||||
@Nullable
|
||||
public BitSet getBitSet(final Query query, final LeafReaderContext context) throws ExecutionException {
|
||||
final IndexReader.CacheHelper coreCacheHelper = context.reader().getCoreCacheHelper();
|
||||
if (coreCacheHelper == null) {
|
||||
throw new IllegalArgumentException("Reader " + context.reader() + " does not support caching");
|
||||
}
|
||||
coreCacheHelper.addClosedListener(this);
|
||||
final IndexReader.CacheKey indexKey = coreCacheHelper.getKey();
|
||||
final BitsetCacheKey cacheKey = new BitsetCacheKey(indexKey, query);
|
||||
|
||||
final BitSet bitSet = bitsetCache.computeIfAbsent(cacheKey, ignore1 -> {
|
||||
// This ensures all insertions into the set are guarded by ConcurrentHashMap's atomicity guarantees.
|
||||
keysByIndex.compute(indexKey, (ignore2, set) -> {
|
||||
if (set == null) {
|
||||
set = Sets.newConcurrentHashSet();
|
||||
}
|
||||
set.add(cacheKey);
|
||||
return set;
|
||||
});
|
||||
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
|
||||
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
|
||||
searcher.setQueryCache(null);
|
||||
final Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1f);
|
||||
Scorer s = weight.scorer(context);
|
||||
if (s == null) {
|
||||
// A cache loader is not allowed to return null, return a marker object instead.
|
||||
return NULL_MARKER;
|
||||
} else {
|
||||
return BitSet.of(s.iterator(), context.reader().maxDoc());
|
||||
}
|
||||
});
|
||||
if (bitSet == NULL_MARKER) {
|
||||
return null;
|
||||
} else {
|
||||
return bitSet;
|
||||
}
|
||||
}
|
||||
|
||||
public static List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(CACHE_TTL_SETTING, CACHE_SIZE_SETTING);
|
||||
}
|
||||
|
||||
public Map<String, Object> usageStats() {
|
||||
final ByteSizeValue ram = new ByteSizeValue(ramBytesUsed(), ByteSizeUnit.BYTES);
|
||||
return new MapBuilder()
|
||||
.put("count", entryCount())
|
||||
.put("memory", ram.toString())
|
||||
.put("memory_in_bytes", ram.getBytes())
|
||||
.immutableMap();
|
||||
}
|
||||
|
||||
private class BitsetCacheKey {
|
||||
final IndexReader.CacheKey index;
|
||||
final Query query;
|
||||
|
||||
private BitsetCacheKey(IndexReader.CacheKey index, Query query) {
|
||||
this.index = index;
|
||||
this.query = query;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
final BitsetCacheKey that = (BitsetCacheKey) other;
|
||||
return Objects.equals(this.index, that.index) &&
|
||||
Objects.equals(this.query, that.query);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(index, query);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getClass().getSimpleName() + "(" + index + "," + query + ")";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
|
@ -34,9 +33,9 @@ import java.util.concurrent.ExecutionException;
|
|||
*/
|
||||
public final class DocumentSubsetReader extends FilterLeafReader {
|
||||
|
||||
public static DocumentSubsetDirectoryReader wrap(DirectoryReader in, BitsetFilterCache bitsetFilterCache,
|
||||
public static DocumentSubsetDirectoryReader wrap(DirectoryReader in, DocumentSubsetBitsetCache bitsetCache,
|
||||
Query roleQuery) throws IOException {
|
||||
return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery);
|
||||
return new DocumentSubsetDirectoryReader(in, bitsetCache, roleQuery);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -110,21 +109,21 @@ public final class DocumentSubsetReader extends FilterLeafReader {
|
|||
public static final class DocumentSubsetDirectoryReader extends FilterDirectoryReader {
|
||||
|
||||
private final Query roleQuery;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
private final DocumentSubsetBitsetCache bitsetCache;
|
||||
|
||||
DocumentSubsetDirectoryReader(final DirectoryReader in, final BitsetFilterCache bitsetFilterCache, final Query roleQuery)
|
||||
throws IOException {
|
||||
DocumentSubsetDirectoryReader(final DirectoryReader in, final DocumentSubsetBitsetCache bitsetCache,
|
||||
final Query roleQuery) throws IOException {
|
||||
super(in, new SubReaderWrapper() {
|
||||
@Override
|
||||
public LeafReader wrap(LeafReader reader) {
|
||||
try {
|
||||
return new DocumentSubsetReader(reader, bitsetFilterCache, roleQuery);
|
||||
return new DocumentSubsetReader(reader, bitsetCache, roleQuery);
|
||||
} catch (Exception e) {
|
||||
throw ExceptionsHelper.convertToElastic(e);
|
||||
}
|
||||
}
|
||||
});
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
this.bitsetCache = bitsetCache;
|
||||
this.roleQuery = roleQuery;
|
||||
|
||||
verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(in);
|
||||
|
@ -132,7 +131,7 @@ public final class DocumentSubsetReader extends FilterLeafReader {
|
|||
|
||||
@Override
|
||||
protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
|
||||
return new DocumentSubsetDirectoryReader(in, bitsetFilterCache, roleQuery);
|
||||
return new DocumentSubsetDirectoryReader(in, bitsetCache, roleQuery);
|
||||
}
|
||||
|
||||
private static void verifyNoOtherDocumentSubsetDirectoryReaderIsWrapped(DirectoryReader reader) {
|
||||
|
@ -156,9 +155,9 @@ public final class DocumentSubsetReader extends FilterLeafReader {
|
|||
private final BitSet roleQueryBits;
|
||||
private final int numDocs;
|
||||
|
||||
private DocumentSubsetReader(final LeafReader in, BitsetFilterCache bitsetFilterCache, final Query roleQuery) throws Exception {
|
||||
private DocumentSubsetReader(final LeafReader in, DocumentSubsetBitsetCache bitsetCache, final Query roleQuery) throws Exception {
|
||||
super(in);
|
||||
this.roleQueryBits = bitsetFilterCache.getBitSetProducer(roleQuery).getBitSet(in.getContext());
|
||||
this.roleQueryBits = bitsetCache.getBitSet(roleQuery, in.getContext());
|
||||
this.numDocs = getNumDocs(in, roleQuery, roleQueryBits);
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@ import org.elasticsearch.ExceptionsHelper;
|
|||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardUtils;
|
||||
|
@ -44,17 +43,17 @@ public class SecurityIndexReaderWrapper implements CheckedFunction<DirectoryRead
|
|||
private static final Logger logger = LogManager.getLogger(SecurityIndexReaderWrapper.class);
|
||||
|
||||
private final Function<ShardId, QueryShardContext> queryShardContextProvider;
|
||||
private final BitsetFilterCache bitsetFilterCache;
|
||||
private final DocumentSubsetBitsetCache bitsetCache;
|
||||
private final XPackLicenseState licenseState;
|
||||
private final ThreadContext threadContext;
|
||||
private final ScriptService scriptService;
|
||||
|
||||
public SecurityIndexReaderWrapper(Function<ShardId, QueryShardContext> queryShardContextProvider,
|
||||
BitsetFilterCache bitsetFilterCache, ThreadContext threadContext, XPackLicenseState licenseState,
|
||||
DocumentSubsetBitsetCache bitsetCache, ThreadContext threadContext, XPackLicenseState licenseState,
|
||||
ScriptService scriptService) {
|
||||
this.scriptService = scriptService;
|
||||
this.queryShardContextProvider = queryShardContextProvider;
|
||||
this.bitsetFilterCache = bitsetFilterCache;
|
||||
this.bitsetCache = bitsetCache;
|
||||
this.threadContext = threadContext;
|
||||
this.licenseState = licenseState;
|
||||
}
|
||||
|
@ -84,7 +83,7 @@ public class SecurityIndexReaderWrapper implements CheckedFunction<DirectoryRead
|
|||
if (documentPermissions != null && documentPermissions.hasDocumentLevelPermissions()) {
|
||||
BooleanQuery filterQuery = documentPermissions.filter(getUser(), scriptService, shardId, queryShardContextProvider);
|
||||
if (filterQuery != null) {
|
||||
wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetFilterCache, new ConstantScoreQuery(filterQuery));
|
||||
wrappedReader = DocumentSubsetReader.wrap(wrappedReader, bitsetCache, new ConstantScoreQuery(filterQuery));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.security.support;
|
||||
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A utility class to facilitate iterating over (and modifying) a {@link org.elasticsearch.common.cache.Cache}.
|
||||
* The semantics of the cache are such that when iterating (with the potential to call {@link Iterator#remove()}), we must prevent any
|
||||
* other modifications.
|
||||
* This class provides the necessary methods to support this constraint in a clear manner.
|
||||
*/
|
||||
public class CacheIteratorHelper<K, V> {
|
||||
private final Cache<K, V> cache;
|
||||
private final ReleasableLock updateLock;
|
||||
private final ReleasableLock iteratorLock;
|
||||
|
||||
public CacheIteratorHelper(Cache<K, V> cache) {
|
||||
this.cache = cache;
|
||||
final ReadWriteLock lock = new ReentrantReadWriteLock();
|
||||
// the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using the
|
||||
// iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache
|
||||
// the read lock is obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or values
|
||||
// of the cache the write lock must obtained to prevent any modifications.
|
||||
updateLock = new ReleasableLock(lock.readLock());
|
||||
iteratorLock = new ReleasableLock(lock.writeLock());
|
||||
}
|
||||
|
||||
public ReleasableLock acquireUpdateLock() {
|
||||
return updateLock.acquire();
|
||||
}
|
||||
|
||||
private ReleasableLock acquireForIterator() {
|
||||
return iteratorLock.acquire();
|
||||
}
|
||||
|
||||
public void removeKeysIf(Predicate<K> removeIf) {
|
||||
// the cache cannot be modified while doing this operation per the terms of the cache iterator
|
||||
try (ReleasableLock ignored = this.acquireForIterator()) {
|
||||
Iterator<K> iterator = cache.keys().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
K key = iterator.next();
|
||||
if (removeIf.test(key)) {
|
||||
iterator.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,248 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.security.authz.accesscontrol;
|
||||
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.StringField;
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.IndexWriterConfig;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.BitSet;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
import org.elasticsearch.index.query.TermQueryBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class DocumentSubsetBitsetCacheTests extends ESTestCase {
|
||||
|
||||
public void testSameBitSetIsReturnedForIdenticalQuery() throws Exception {
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
runTestOnIndex((shardContext, leafContext) -> {
|
||||
final Query query1 = QueryBuilders.termQuery("field-1", "value-1").toQuery(shardContext);
|
||||
final BitSet bitSet1 = cache.getBitSet(query1, leafContext);
|
||||
assertThat(bitSet1, notNullValue());
|
||||
|
||||
final Query query2 = QueryBuilders.termQuery("field-1", "value-1").toQuery(shardContext);
|
||||
final BitSet bitSet2 = cache.getBitSet(query2, leafContext);
|
||||
assertThat(bitSet2, notNullValue());
|
||||
|
||||
assertThat(bitSet2, Matchers.sameInstance(bitSet1));
|
||||
});
|
||||
}
|
||||
|
||||
public void testNullBitSetIsReturnedForNonMatchingQuery() throws Exception {
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
runTestOnIndex((shardContext, leafContext) -> {
|
||||
final Query query = QueryBuilders.termQuery("does-not-exist", "any-value").toQuery(shardContext);
|
||||
final BitSet bitSet = cache.getBitSet(query, leafContext);
|
||||
assertThat(bitSet, nullValue());
|
||||
});
|
||||
}
|
||||
|
||||
public void testNullEntriesAreNotCountedInMemoryUsage() throws Exception {
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
|
||||
runTestOnIndex((shardContext, leafContext) -> {
|
||||
for (int i = 1; i <= randomIntBetween(3, 6); i++) {
|
||||
final Query query = QueryBuilders.termQuery("dne-" + i, "dne- " + i).toQuery(shardContext);
|
||||
final BitSet bitSet = cache.getBitSet(query, leafContext);
|
||||
assertThat(bitSet, nullValue());
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
assertThat(cache.entryCount(), equalTo(i));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public void testCacheRespectsMemoryLimit() throws Exception {
|
||||
// This value is based on the internal implementation details of lucene's FixedBitSet
|
||||
// If the implementation changes, this can be safely updated to match the new ram usage for a single bitset
|
||||
final long expectedBytesPerBitSet = 56;
|
||||
|
||||
// Enough to hold exactly 2 bit-sets in the cache
|
||||
final long maxCacheBytes = expectedBytesPerBitSet * 2;
|
||||
final Settings settings = Settings.builder()
|
||||
.put(DocumentSubsetBitsetCache.CACHE_SIZE_SETTING.getKey(), maxCacheBytes + "b")
|
||||
.build();
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(settings);
|
||||
assertThat(cache.entryCount(), equalTo(0));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
|
||||
runTestOnIndex((shardContext, leafContext) -> {
|
||||
Query previousQuery = null;
|
||||
BitSet previousBitSet = null;
|
||||
for (int i = 1; i <= 5; i++) {
|
||||
final TermQueryBuilder queryBuilder = QueryBuilders.termQuery("field-" + i, "value-" + i);
|
||||
final Query query = queryBuilder.toQuery(shardContext);
|
||||
final BitSet bitSet = cache.getBitSet(query, leafContext);
|
||||
assertThat(bitSet, notNullValue());
|
||||
assertThat(bitSet.ramBytesUsed(), equalTo(expectedBytesPerBitSet));
|
||||
|
||||
// The first time through we have 1 entry, after that we have 2
|
||||
final int expectedCount = i == 1 ? 1 : 2;
|
||||
assertThat(cache.entryCount(), equalTo(expectedCount));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(expectedCount * expectedBytesPerBitSet));
|
||||
|
||||
// Older queries should get evicted, but the query from last iteration should still be cached
|
||||
if (previousQuery != null) {
|
||||
assertThat(cache.getBitSet(previousQuery, leafContext), sameInstance(previousBitSet));
|
||||
assertThat(cache.entryCount(), equalTo(expectedCount));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(expectedCount * expectedBytesPerBitSet));
|
||||
}
|
||||
previousQuery = query;
|
||||
previousBitSet = bitSet;
|
||||
|
||||
assertThat(cache.getBitSet(queryBuilder.toQuery(shardContext), leafContext), sameInstance(bitSet));
|
||||
assertThat(cache.entryCount(), equalTo(expectedCount));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(expectedCount * expectedBytesPerBitSet));
|
||||
}
|
||||
|
||||
assertThat(cache.entryCount(), equalTo(2));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(2 * expectedBytesPerBitSet));
|
||||
|
||||
cache.clear("testing");
|
||||
|
||||
assertThat(cache.entryCount(), equalTo(0));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
});
|
||||
}
|
||||
|
||||
public void testCacheRespectsAccessTimeExpiry() throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put(DocumentSubsetBitsetCache.CACHE_TTL_SETTING.getKey(), "10ms")
|
||||
.build();
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(settings);
|
||||
assertThat(cache.entryCount(), equalTo(0));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
|
||||
runTestOnIndex((shardContext, leafContext) -> {
|
||||
final Query query1 = QueryBuilders.termQuery("field-1", "value-1").toQuery(shardContext);
|
||||
final BitSet bitSet1 = cache.getBitSet(query1, leafContext);
|
||||
assertThat(bitSet1, notNullValue());
|
||||
|
||||
final Query query2 = QueryBuilders.termQuery("field-2", "value-2").toQuery(shardContext);
|
||||
assertBusy(() -> {
|
||||
// Force the cache to perform eviction
|
||||
final BitSet bitSet2 = cache.getBitSet(query2, leafContext);
|
||||
assertThat(bitSet2, notNullValue());
|
||||
|
||||
// Loop until the cache has less than 2 items, which mean that something we evicted
|
||||
assertThat(cache.entryCount(), Matchers.lessThan(2));
|
||||
|
||||
}, 100, TimeUnit.MILLISECONDS);
|
||||
|
||||
// Check that the original bitset is no longer in the cache (a new instance is returned)
|
||||
assertThat(cache.getBitSet(query1, leafContext), not(sameInstance(bitSet1)));
|
||||
});
|
||||
}
|
||||
|
||||
public void testCacheIsPerIndex() throws Exception {
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
assertThat(cache.entryCount(), equalTo(0));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
|
||||
final int iterations = randomIntBetween(3, 10);
|
||||
AtomicInteger counter = new AtomicInteger(0);
|
||||
|
||||
final CheckedBiConsumer<QueryShardContext, LeafReaderContext, Exception> consumer =
|
||||
new CheckedBiConsumer<QueryShardContext, LeafReaderContext, Exception>() {
|
||||
@Override
|
||||
public void accept(QueryShardContext shardContext, LeafReaderContext leafContext) throws Exception {
|
||||
final int count = counter.incrementAndGet();
|
||||
final Query query = QueryBuilders.termQuery("field-1", "value-1").toQuery(shardContext);
|
||||
final BitSet bitSet = cache.getBitSet(query, leafContext);
|
||||
|
||||
assertThat(bitSet, notNullValue());
|
||||
assertThat(cache.entryCount(), equalTo(count));
|
||||
|
||||
if (count < iterations) {
|
||||
// Need to do this nested, or else the cache will be cleared when the index reader is closed
|
||||
DocumentSubsetBitsetCacheTests.this.runTestOnIndex(this);
|
||||
}
|
||||
}
|
||||
};
|
||||
runTestOnIndex(consumer);
|
||||
}
|
||||
|
||||
public void testCacheClearEntriesWhenIndexIsClosed() throws Exception {
|
||||
final DocumentSubsetBitsetCache cache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
assertThat(cache.entryCount(), equalTo(0));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
|
||||
for (int i = 1; i <= randomIntBetween(2, 5); i++) {
|
||||
runTestOnIndex((shardContext, leafContext) -> {
|
||||
for (int j = 1; j <= randomIntBetween(2, 10); j++) {
|
||||
final Query query = QueryBuilders.termQuery("field-" + j, "value-1").toQuery(shardContext);
|
||||
final BitSet bitSet = cache.getBitSet(query, leafContext);
|
||||
assertThat(bitSet, notNullValue());
|
||||
}
|
||||
assertThat(cache.entryCount(), not(equalTo(0)));
|
||||
assertThat(cache.ramBytesUsed(), not(equalTo(0L)));
|
||||
});
|
||||
assertThat(cache.entryCount(), equalTo(0));
|
||||
assertThat(cache.ramBytesUsed(), equalTo(0L));
|
||||
}
|
||||
}
|
||||
|
||||
private void runTestOnIndex(CheckedBiConsumer<QueryShardContext, LeafReaderContext, Exception> body) throws Exception {
|
||||
final ShardId shardId = new ShardId("idx_" + randomAlphaOfLengthBetween(2, 8), randomAlphaOfLength(12), 0);
|
||||
final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY);
|
||||
final MapperService mapperService = mock(MapperService.class);
|
||||
final long nowInMillis = randomNonNegativeLong();
|
||||
|
||||
final Client client = mock(Client.class);
|
||||
when(client.settings()).thenReturn(Settings.EMPTY);
|
||||
|
||||
final IndexWriterConfig writerConfig = new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE);
|
||||
try (Directory directory = newDirectory();
|
||||
IndexWriter iw = new IndexWriter(directory, writerConfig)) {
|
||||
for (int i = 1; i <= 100; i++) {
|
||||
Document document = new Document();
|
||||
for (int j = 1; j <= 10; j++) {
|
||||
document.add(new StringField("field-" + j, "value-" + i, Field.Store.NO));
|
||||
}
|
||||
iw.addDocument(document);
|
||||
}
|
||||
iw.commit();
|
||||
|
||||
try (DirectoryReader directoryReader = DirectoryReader.open(directory)) {
|
||||
final LeafReaderContext leaf = directoryReader.leaves().get(0);
|
||||
|
||||
final QueryShardContext context = new QueryShardContext(shardId.id(), indexSettings, null, null, null, mapperService,
|
||||
null, null, xContentRegistry(), writableRegistry(), client, leaf.reader(), () -> nowInMillis, null);
|
||||
|
||||
body.accept(context, leaf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -21,18 +21,13 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
|
@ -45,7 +40,7 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
|
||||
private Directory directory;
|
||||
private DirectoryReader directoryReader;
|
||||
private BitsetFilterCache bitsetFilterCache;
|
||||
private DocumentSubsetBitsetCache bitsetCache;
|
||||
|
||||
@Before
|
||||
public void setUpDirectory() {
|
||||
|
@ -55,18 +50,7 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
assertTrue(DocumentSubsetReader.NUM_DOCS_CACHE.toString(),
|
||||
DocumentSubsetReader.NUM_DOCS_CACHE.isEmpty());
|
||||
directory = newDirectory();
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY);
|
||||
bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, Accountable accountable) {
|
||||
|
||||
}
|
||||
});
|
||||
bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -77,7 +61,7 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
assertTrue(DocumentSubsetReader.NUM_DOCS_CACHE.toString(),
|
||||
DocumentSubsetReader.NUM_DOCS_CACHE.isEmpty());
|
||||
directory.close();
|
||||
bitsetFilterCache.close();
|
||||
bitsetCache.close();
|
||||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
|
@ -104,14 +88,14 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
iw.close();
|
||||
openDirectoryReader();
|
||||
|
||||
IndexSearcher indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
||||
IndexSearcher indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetCache,
|
||||
new TermQuery(new Term("field", "value1"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||
TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
assertThat(result.totalHits.value, equalTo(1L));
|
||||
assertThat(result.scoreDocs[0].doc, equalTo(0));
|
||||
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetCache,
|
||||
new TermQuery(new Term("field", "value2"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
|
@ -119,13 +103,13 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
assertThat(result.scoreDocs[0].doc, equalTo(1));
|
||||
|
||||
// this doc has been marked as deleted:
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetCache,
|
||||
new TermQuery(new Term("field", "value3"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0));
|
||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
assertThat(result.totalHits.value, equalTo(0L));
|
||||
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache,
|
||||
indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetCache,
|
||||
new TermQuery(new Term("field", "value4"))));
|
||||
assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1));
|
||||
result = indexSearcher.search(new MatchAllDocsQuery(), 1);
|
||||
|
@ -154,7 +138,7 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
Query roleQuery = new TermQuery(new Term("field", "value" + i));
|
||||
DirectoryReader wrappedReader = DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, roleQuery);
|
||||
DirectoryReader wrappedReader = DocumentSubsetReader.wrap(directoryReader, bitsetCache, roleQuery);
|
||||
|
||||
LeafReader leafReader = wrappedReader.leaves().get(0).reader();
|
||||
assertThat(leafReader.hasDeletions(), is(true));
|
||||
|
@ -176,26 +160,16 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
IndexWriterConfig iwc = new IndexWriterConfig(null);
|
||||
IndexWriter iw = new IndexWriter(dir, iwc);
|
||||
iw.close();
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, Accountable accountable) {
|
||||
}
|
||||
});
|
||||
DirectoryReader directoryReader = DocumentSubsetReader.wrap(DirectoryReader.open(dir), bitsetFilterCache, new MatchAllDocsQuery());
|
||||
DirectoryReader directoryReader = DocumentSubsetReader.wrap(DirectoryReader.open(dir), bitsetCache, new MatchAllDocsQuery());
|
||||
try {
|
||||
DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new MatchAllDocsQuery());
|
||||
DocumentSubsetReader.wrap(directoryReader, bitsetCache, new MatchAllDocsQuery());
|
||||
fail("shouldn't be able to wrap DocumentSubsetDirectoryReader twice");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Can't wrap [class org.elasticsearch.xpack.core.security.authz.accesscontrol" +
|
||||
".DocumentSubsetReader$DocumentSubsetDirectoryReader] twice"));
|
||||
}
|
||||
|
||||
bitsetFilterCache.close();
|
||||
bitsetCache.close();
|
||||
directoryReader.close();
|
||||
dir.close();
|
||||
}
|
||||
|
@ -219,7 +193,7 @@ public class DocumentSubsetReaderTests extends ESTestCase {
|
|||
|
||||
// open reader
|
||||
DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw), new ShardId("_index", "_na_", 0));
|
||||
ir = DocumentSubsetReader.wrap(ir, bitsetFilterCache, new MatchAllDocsQuery());
|
||||
ir = DocumentSubsetReader.wrap(ir, bitsetCache, new MatchAllDocsQuery());
|
||||
assertEquals(2, ir.numDocs());
|
||||
assertEquals(1, ir.leaves().size());
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@ import org.apache.lucene.search.ScoreDoc;
|
|||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TotalHitCountCollector;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -30,7 +29,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardContext;
|
||||
|
@ -87,21 +85,11 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT
|
|||
QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, null, null, null, mapperService,
|
||||
null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null);
|
||||
QueryShardContext queryShardContext = spy(realQueryShardContext);
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, Accountable accountable) {
|
||||
|
||||
}
|
||||
});
|
||||
DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
XPackLicenseState licenseState = mock(XPackLicenseState.class);
|
||||
when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true);
|
||||
SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper(s -> queryShardContext,
|
||||
bitsetFilterCache, threadContext, licenseState, scriptService) {
|
||||
bitsetCache, threadContext, licenseState, scriptService) {
|
||||
|
||||
@Override
|
||||
protected IndicesAccessControl getIndicesAccessControl() {
|
||||
|
@ -169,7 +157,7 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT
|
|||
assertThat(wrappedDirectoryReader.numDocs(), equalTo(expectedHitCount));
|
||||
}
|
||||
|
||||
bitsetFilterCache.close();
|
||||
bitsetCache.close();
|
||||
directoryReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
@ -211,21 +199,12 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT
|
|||
QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, null, null, null, mapperService,
|
||||
null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null);
|
||||
QueryShardContext queryShardContext = spy(realQueryShardContext);
|
||||
IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY);
|
||||
BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() {
|
||||
@Override
|
||||
public void onCache(ShardId shardId, Accountable accountable) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRemoval(ShardId shardId, Accountable accountable) {
|
||||
}
|
||||
});
|
||||
DocumentSubsetBitsetCache bitsetCache = new DocumentSubsetBitsetCache(Settings.EMPTY);
|
||||
|
||||
XPackLicenseState licenseState = mock(XPackLicenseState.class);
|
||||
when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true);
|
||||
SecurityIndexReaderWrapper wrapper = new SecurityIndexReaderWrapper(s -> queryShardContext,
|
||||
bitsetFilterCache, threadContext, licenseState, scriptService) {
|
||||
bitsetCache, threadContext, licenseState, scriptService) {
|
||||
|
||||
@Override
|
||||
protected IndicesAccessControl getIndicesAccessControl() {
|
||||
|
@ -281,7 +260,7 @@ public class SecurityIndexReaderWrapperIntegrationTests extends AbstractBuilderT
|
|||
}
|
||||
}
|
||||
|
||||
bitsetFilterCache.close();
|
||||
bitsetCache.close();
|
||||
directoryReader.close();
|
||||
directory.close();
|
||||
}
|
||||
|
|
|
@ -118,6 +118,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmSettings;
|
|||
import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken;
|
||||
import org.elasticsearch.xpack.core.security.authz.AuthorizationEngine;
|
||||
import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetBitsetCache;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexReaderWrapper;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
||||
|
@ -284,6 +285,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
private final SetOnce<SecurityActionFilter> securityActionFilter = new SetOnce<>();
|
||||
private final SetOnce<SecurityIndexManager> securityIndex = new SetOnce<>();
|
||||
private final SetOnce<NioGroupFactory> groupFactory = new SetOnce<>();
|
||||
private final SetOnce<DocumentSubsetBitsetCache> dlsBitsetCache = new SetOnce<>();
|
||||
private final List<BootstrapCheck> bootstrapChecks;
|
||||
private final List<SecurityExtension> securityExtensions = new ArrayList<>();
|
||||
|
||||
|
@ -398,6 +400,10 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
securityContext.set(new SecurityContext(settings, threadPool.getThreadContext()));
|
||||
components.add(securityContext.get());
|
||||
|
||||
if (XPackSettings.DLS_FLS_ENABLED.get(settings)) {
|
||||
dlsBitsetCache.set(new DocumentSubsetBitsetCache(settings));
|
||||
}
|
||||
|
||||
// audit trail service construction
|
||||
final List<AuditTrail> auditTrails = XPackSettings.AUDIT_ENABLED.get(settings)
|
||||
? Collections.singletonList(new LoggingAuditTrail(settings, clusterService, threadPool))
|
||||
|
@ -455,7 +461,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
components.add(apiKeyService);
|
||||
final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
privilegeStore, rolesProviders, threadPool.getThreadContext(), getLicenseState(), fieldPermissionsCache, apiKeyService,
|
||||
new DeprecationRoleDescriptorConsumer(clusterService, threadPool));
|
||||
dlsBitsetCache.get(), new DeprecationRoleDescriptorConsumer(clusterService, threadPool));
|
||||
securityIndex.get().addIndexStateListener(allRolesStore::onSecurityIndexStateChange);
|
||||
|
||||
// to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be
|
||||
|
@ -638,6 +644,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
AuthorizationService.addSettings(settingsList);
|
||||
Automatons.addSettings(settingsList);
|
||||
settingsList.addAll(CompositeRolesStore.getSettings());
|
||||
settingsList.addAll(DocumentSubsetBitsetCache.getSettings());
|
||||
settingsList.add(FieldPermissionsCache.CACHE_SIZE_SETTING);
|
||||
settingsList.add(TokenService.TOKEN_EXPIRATION);
|
||||
settingsList.add(TokenService.DELETE_INTERVAL);
|
||||
|
@ -692,6 +699,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
if (enabled) {
|
||||
assert getLicenseState() != null;
|
||||
if (XPackSettings.DLS_FLS_ENABLED.get(settings)) {
|
||||
assert dlsBitsetCache.get() != null;
|
||||
module.setReaderWrapper(indexService ->
|
||||
new SecurityIndexReaderWrapper(
|
||||
shardId -> indexService.newQueryShardContext(shardId.id(),
|
||||
|
@ -702,7 +710,7 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
|||
throw new IllegalArgumentException("permission filters are not allowed to use the current timestamp");
|
||||
|
||||
}, null),
|
||||
indexService.cache() != null ? indexService.cache().bitsetFilterCache() : null,
|
||||
dlsBitsetCache.get(),
|
||||
indexService.getThreadPool().getThreadContext(), getLicenseState(),
|
||||
indexService.getScriptService()));
|
||||
/*
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.common.IteratingActionListener;
|
|||
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetBitsetCache;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsCache;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition.FieldGrantExcludeGroup;
|
||||
|
@ -39,6 +40,7 @@ import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege;
|
|||
import org.elasticsearch.xpack.core.security.authz.privilege.Privilege;
|
||||
import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore;
|
||||
import org.elasticsearch.xpack.core.security.authz.store.RoleRetrievalResult;
|
||||
import org.elasticsearch.xpack.core.security.support.CacheIteratorHelper;
|
||||
import org.elasticsearch.xpack.core.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.core.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
|
@ -53,14 +55,11 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.ReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
@ -84,18 +83,6 @@ public class CompositeRolesStore {
|
|||
Setting.intSetting("xpack.security.authz.store.roles.negative_lookup_cache.max_size", 10000, Property.NodeScope);
|
||||
private static final Logger logger = LogManager.getLogger(CompositeRolesStore.class);
|
||||
|
||||
// the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using
|
||||
// the iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache
|
||||
// the read lock is obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or values of
|
||||
// the cache the write lock must obtained to prevent any modifications
|
||||
private final ReleasableLock readLock;
|
||||
private final ReleasableLock writeLock;
|
||||
|
||||
{
|
||||
final ReadWriteLock iterationLock = new ReentrantReadWriteLock();
|
||||
readLock = new ReleasableLock(iterationLock.readLock());
|
||||
writeLock = new ReleasableLock(iterationLock.writeLock());
|
||||
}
|
||||
|
||||
private final FileRolesStore fileRolesStore;
|
||||
private final NativeRolesStore nativeRolesStore;
|
||||
|
@ -104,7 +91,9 @@ public class CompositeRolesStore {
|
|||
private final Consumer<Collection<RoleDescriptor>> effectiveRoleDescriptorsConsumer;
|
||||
private final FieldPermissionsCache fieldPermissionsCache;
|
||||
private final Cache<RoleKey, Role> roleCache;
|
||||
private final CacheIteratorHelper<RoleKey, Role> roleCacheHelper;
|
||||
private final Cache<String, Boolean> negativeLookupCache;
|
||||
private final DocumentSubsetBitsetCache dlsBitsetCache;
|
||||
private final ThreadContext threadContext;
|
||||
private final AtomicLong numInvalidation = new AtomicLong();
|
||||
private final AnonymousUser anonymousUser;
|
||||
|
@ -117,8 +106,10 @@ public class CompositeRolesStore {
|
|||
ReservedRolesStore reservedRolesStore, NativePrivilegeStore privilegeStore,
|
||||
List<BiConsumer<Set<String>, ActionListener<RoleRetrievalResult>>> rolesProviders,
|
||||
ThreadContext threadContext, XPackLicenseState licenseState, FieldPermissionsCache fieldPermissionsCache,
|
||||
ApiKeyService apiKeyService, Consumer<Collection<RoleDescriptor>> effectiveRoleDescriptorsConsumer) {
|
||||
ApiKeyService apiKeyService, @Nullable DocumentSubsetBitsetCache dlsBitsetCache,
|
||||
Consumer<Collection<RoleDescriptor>> effectiveRoleDescriptorsConsumer) {
|
||||
this.fileRolesStore = fileRolesStore;
|
||||
this.dlsBitsetCache = dlsBitsetCache;
|
||||
fileRolesStore.addListener(this::invalidate);
|
||||
this.nativeRolesStore = nativeRolesStore;
|
||||
this.privilegeStore = privilegeStore;
|
||||
|
@ -132,6 +123,7 @@ public class CompositeRolesStore {
|
|||
builder.setMaximumWeight(cacheSize);
|
||||
}
|
||||
this.roleCache = builder.build();
|
||||
this.roleCacheHelper = new CacheIteratorHelper(roleCache);
|
||||
this.threadContext = threadContext;
|
||||
CacheBuilder<String, Boolean> nlcBuilder = CacheBuilder.builder();
|
||||
final int nlcCacheSize = NEGATIVE_LOOKUP_CACHE_SIZE_SETTING.get(settings);
|
||||
|
@ -261,7 +253,7 @@ public class CompositeRolesStore {
|
|||
logger.trace("Building role from descriptors [{}] for names [{}] from source [{}]", roleDescriptors, roleKey.names, roleKey.source);
|
||||
buildRoleFromDescriptors(roleDescriptors, fieldPermissionsCache, privilegeStore, ActionListener.wrap(role -> {
|
||||
if (role != null && tryCache) {
|
||||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
try (ReleasableLock ignored = roleCacheHelper.acquireUpdateLock()) {
|
||||
/* this is kinda spooky. We use a read/write lock to ensure we don't modify the cache if we hold
|
||||
* the write lock (fetching stats for instance - which is kinda overkill?) but since we fetching
|
||||
* stuff in an async fashion we need to make sure that if the cache got invalidated since we
|
||||
|
@ -420,47 +412,31 @@ public class CompositeRolesStore {
|
|||
public void invalidateAll() {
|
||||
numInvalidation.incrementAndGet();
|
||||
negativeLookupCache.invalidateAll();
|
||||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
try (ReleasableLock ignored = roleCacheHelper.acquireUpdateLock()) {
|
||||
roleCache.invalidateAll();
|
||||
}
|
||||
if (dlsBitsetCache != null) {
|
||||
dlsBitsetCache.clear("role store invalidation");
|
||||
}
|
||||
}
|
||||
|
||||
public void invalidate(String role) {
|
||||
numInvalidation.incrementAndGet();
|
||||
|
||||
// the cache cannot be modified while doing this operation per the terms of the cache iterator
|
||||
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||
Iterator<RoleKey> keyIter = roleCache.keys().iterator();
|
||||
while (keyIter.hasNext()) {
|
||||
RoleKey key = keyIter.next();
|
||||
if (key.names.contains(role)) {
|
||||
keyIter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
roleCacheHelper.removeKeysIf(key -> key.names.contains(role));
|
||||
negativeLookupCache.invalidate(role);
|
||||
}
|
||||
|
||||
public void invalidate(Set<String> roles) {
|
||||
numInvalidation.incrementAndGet();
|
||||
|
||||
// the cache cannot be modified while doing this operation per the terms of the cache iterator
|
||||
try (ReleasableLock ignored = writeLock.acquire()) {
|
||||
Iterator<RoleKey> keyIter = roleCache.keys().iterator();
|
||||
while (keyIter.hasNext()) {
|
||||
RoleKey key = keyIter.next();
|
||||
if (Sets.haveEmptyIntersection(key.names, roles) == false) {
|
||||
keyIter.remove();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
roleCacheHelper.removeKeysIf(key -> Sets.haveEmptyIntersection(key.names, roles) == false);
|
||||
roles.forEach(negativeLookupCache::invalidate);
|
||||
}
|
||||
|
||||
public void usageStats(ActionListener<Map<String, Object>> listener) {
|
||||
final Map<String, Object> usage = new HashMap<>(2);
|
||||
usage.put("file", fileRolesStore.usageStats());
|
||||
usage.put("dls", Collections.singletonMap("bit_set_cache", dlsBitsetCache.usageStats()));
|
||||
nativeRolesStore.usageStats(ActionListener.wrap(map -> {
|
||||
usage.put("native", map);
|
||||
listener.onResponse(usage);
|
||||
|
|
|
@ -149,7 +149,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final AtomicReference<Collection<RoleDescriptor>> effectiveRoleDescriptors = new AtomicReference<Collection<RoleDescriptor>>();
|
||||
CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, nativeRolesStore,
|
||||
reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(),
|
||||
new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class),
|
||||
new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class), null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
|
||||
PlainActionFuture<Role> roleFuture = new PlainActionFuture<>();
|
||||
|
@ -224,7 +224,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final AtomicReference<Collection<RoleDescriptor>> effectiveRoleDescriptors = new AtomicReference<Collection<RoleDescriptor>>();
|
||||
CompositeRolesStore compositeRolesStore = new CompositeRolesStore(Settings.EMPTY, fileRolesStore, nativeRolesStore,
|
||||
reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(),
|
||||
new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class),
|
||||
new ThreadContext(Settings.EMPTY), licenseState, cache, mock(ApiKeyService.class), null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
|
||||
PlainActionFuture<Role> roleFuture = new PlainActionFuture<>();
|
||||
|
@ -276,7 +276,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
nativePrivilegeStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
|
||||
|
@ -337,7 +337,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final AtomicReference<Collection<RoleDescriptor>> effectiveRoleDescriptors = new AtomicReference<Collection<RoleDescriptor>>();
|
||||
final CompositeRolesStore compositeRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore,
|
||||
reservedRolesStore, mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(settings),
|
||||
new XPackLicenseState(settings), cache, mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds));
|
||||
new XPackLicenseState(settings), cache, mock(ApiKeyService.class), null, rds -> effectiveRoleDescriptors.set(rds));
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
|
||||
final String roleName = randomAlphaOfLengthBetween(1, 10);
|
||||
|
@ -374,7 +374,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
|
||||
|
@ -460,7 +460,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider1, inMemoryProvider2),
|
||||
new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS),
|
||||
cache, mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds));
|
||||
cache, mock(ApiKeyService.class), null, rds -> effectiveRoleDescriptors.set(rds));
|
||||
|
||||
final Set<String> roleNames = Sets.newHashSet("roleA", "roleB", "unknown");
|
||||
PlainActionFuture<Role> future = new PlainActionFuture<>();
|
||||
|
@ -674,7 +674,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Arrays.asList(inMemoryProvider1, failingProvider),
|
||||
new ThreadContext(SECURITY_ENABLED_SETTINGS), new XPackLicenseState(SECURITY_ENABLED_SETTINGS),
|
||||
cache, mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds));
|
||||
cache, mock(ApiKeyService.class), null, rds -> effectiveRoleDescriptors.set(rds));
|
||||
|
||||
final Set<String> roleNames = Sets.newHashSet("roleA", "roleB", "unknown");
|
||||
PlainActionFuture<Role> future = new PlainActionFuture<>();
|
||||
|
@ -720,7 +720,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
CompositeRolesStore compositeRolesStore = new CompositeRolesStore(
|
||||
Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class),
|
||||
Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache,
|
||||
mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds));
|
||||
mock(ApiKeyService.class), null, rds -> effectiveRoleDescriptors.set(rds));
|
||||
|
||||
Set<String> roleNames = Sets.newHashSet("roleA");
|
||||
PlainActionFuture<Role> future = new PlainActionFuture<>();
|
||||
|
@ -735,7 +735,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
compositeRolesStore = new CompositeRolesStore(
|
||||
Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class),
|
||||
Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache,
|
||||
mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds));
|
||||
mock(ApiKeyService.class), null, rds -> effectiveRoleDescriptors.set(rds));
|
||||
// these licenses allow custom role providers
|
||||
xPackLicenseState.update(randomFrom(OperationMode.PLATINUM, OperationMode.TRIAL), true, null);
|
||||
roleNames = Sets.newHashSet("roleA");
|
||||
|
@ -752,7 +752,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
compositeRolesStore = new CompositeRolesStore(
|
||||
Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore, mock(NativePrivilegeStore.class),
|
||||
Arrays.asList(inMemoryProvider), new ThreadContext(Settings.EMPTY), xPackLicenseState, cache,
|
||||
mock(ApiKeyService.class), rds -> effectiveRoleDescriptors.set(rds));
|
||||
mock(ApiKeyService.class), null, rds -> effectiveRoleDescriptors.set(rds));
|
||||
xPackLicenseState.update(randomFrom(OperationMode.PLATINUM, OperationMode.TRIAL), false, null);
|
||||
roleNames = Sets.newHashSet("roleA");
|
||||
future = new PlainActionFuture<>();
|
||||
|
@ -783,7 +783,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
CompositeRolesStore compositeRolesStore = new CompositeRolesStore(
|
||||
Settings.EMPTY, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(Settings.EMPTY),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), rds -> {}) {
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null, rds -> {}) {
|
||||
@Override
|
||||
public void invalidateAll() {
|
||||
numInvalidation.incrementAndGet();
|
||||
|
@ -835,7 +835,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
CompositeRolesStore compositeRolesStore = new CompositeRolesStore(SECURITY_ENABLED_SETTINGS,
|
||||
fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), rds -> {}) {
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null, rds -> {}) {
|
||||
@Override
|
||||
public void invalidateAll() {
|
||||
numInvalidation.incrementAndGet();
|
||||
|
@ -865,7 +865,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), rds -> {});
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null, rds -> {});
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
|
||||
PlainActionFuture<Role> rolesFuture = new PlainActionFuture<>();
|
||||
|
@ -904,7 +904,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(settings),
|
||||
new XPackLicenseState(settings), cache, mock(ApiKeyService.class), rds -> {});
|
||||
new XPackLicenseState(settings), cache, mock(ApiKeyService.class), null, rds -> {});
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
|
||||
PlainActionFuture<Role> rolesFuture = new PlainActionFuture<>();
|
||||
|
@ -932,7 +932,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
|
||||
|
@ -962,7 +962,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
mock(NativePrivilegeStore.class), Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, mock(ApiKeyService.class), null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
verify(fileRolesStore).addListener(any(Consumer.class)); // adds a listener in ctor
|
||||
IllegalArgumentException iae = expectThrows(IllegalArgumentException.class,
|
||||
|
@ -997,7 +997,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
nativePrivStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService,
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService, null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
AuditUtil.getOrGenerateRequestId(threadContext);
|
||||
final Authentication authentication = new Authentication(new User("test api key user", "superuser"),
|
||||
|
@ -1042,7 +1042,7 @@ public class CompositeRolesStoreTests extends ESTestCase {
|
|||
final CompositeRolesStore compositeRolesStore =
|
||||
new CompositeRolesStore(SECURITY_ENABLED_SETTINGS, fileRolesStore, nativeRolesStore, reservedRolesStore,
|
||||
nativePrivStore, Collections.emptyList(), new ThreadContext(SECURITY_ENABLED_SETTINGS),
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService,
|
||||
new XPackLicenseState(SECURITY_ENABLED_SETTINGS), cache, apiKeyService, null,
|
||||
rds -> effectiveRoleDescriptors.set(rds));
|
||||
AuditUtil.getOrGenerateRequestId(threadContext);
|
||||
final Authentication authentication = new Authentication(new User("test api key user", "api_key"),
|
||||
|
|
Loading…
Reference in New Issue