Merge branch 'master' into packager

Original commit: elastic/x-pack-elasticsearch@56b860fbf1
This commit is contained in:
Joe Fleming 2016-03-18 10:39:17 -07:00
commit e910a89506
119 changed files with 1383 additions and 1303 deletions

View File

@ -281,7 +281,8 @@ public class SearchInputIT extends ESIntegTestCase {
parser.nextToken();
IndicesQueriesRegistry indicesQueryRegistry = internalCluster().getInstance(IndicesQueriesRegistry.class);
SearchInputFactory factory = new SearchInputFactory(Settings.EMPTY, WatcherClientProxy.of(client()), indicesQueryRegistry, null);
SearchInputFactory factory = new SearchInputFactory(Settings.EMPTY, WatcherClientProxy.of(client()), indicesQueryRegistry,
null, null);
SearchInput searchInput = factory.parseInput("_id", parser);
assertEquals(SearchInput.TYPE, searchInput.type());

View File

@ -319,8 +319,7 @@ public class SearchTransformIT extends ESIntegTestCase {
IndicesQueriesRegistry indicesQueryRegistry = internalCluster().getInstance(IndicesQueriesRegistry.class);
SearchTransformFactory transformFactory = new SearchTransformFactory(Settings.EMPTY, WatcherClientProxy.of(client()),
indicesQueryRegistry,
null);
indicesQueryRegistry, null, null);
ExecutableSearchTransform executable = transformFactory.parseExecutable("_id", parser);
assertThat(executable, notNullValue());

View File

@ -53,7 +53,8 @@ public class ShieldCachePermissionIT extends ShieldIntegTestCase {
return super.configRoles()
+ "\nread_one_idx:\n"
+ " indices:\n"
+ " 'data': READ\n";
+ " 'data':\n"
+ " - read\n";
}
@Override

View File

@ -7,8 +7,8 @@ dependencies {
integTest {
cluster {
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
systemProperty 'es.shield.audit.enabled', 'true'
systemProperty 'es.shield.audit.outputs', 'index'
setting 'shield.audit.enabled', 'true'
setting 'shield.audit.outputs', 'index'
setupCommand 'setupDummyUser',
'bin/xpack/esusers', 'useradd', 'test_user', '-p', 'changeme', '-r', 'admin'
waitCondition = { node, ant ->

View File

@ -34,8 +34,8 @@ integTest {
cluster {
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
systemProperty 'es.xpack.watcher.enabled', 'false'
systemProperty 'es.xpack.monitoring.enabled', 'false'
setting 'xpack.watcher.enabled', 'false'
setting 'xpack.monitoring.enabled', 'false'
setupCommand 'setupDummyUser',
'bin/xpack/esusers', 'useradd', 'test_user', '-p', 'changeme', '-r', 'admin'
waitCondition = { node, ant ->

View File

@ -17,10 +17,10 @@ integTest {
cluster {
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
// TODO: these should be settings?
systemProperty 'es.shield.authc.realms.custom.order', '0'
systemProperty 'es.shield.authc.realms.custom.type', 'custom'
systemProperty 'es.shield.authc.realms.esusers.order', '1'
systemProperty 'es.shield.authc.realms.esusers.type', 'esusers'
setting 'shield.authc.realms.custom.order', '0'
setting 'shield.authc.realms.custom.type', 'custom'
setting 'shield.authc.realms.esusers.order', '1'
setting 'shield.authc.realms.esusers.type', 'esusers'
setupCommand 'setupDummyUser',
'bin/xpack/esusers', 'useradd', 'test_user', '-p', 'changeme', '-r', 'admin'

View File

@ -6,7 +6,7 @@ dependencies {
integTest {
cluster {
systemProperty 'es.script.inline', 'true'
setting 'script.inline', 'true'
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
extraConfigFile 'xpack/roles.yml', 'roles.yml'
[

View File

@ -1,51 +1,77 @@
admin:
cluster: all
cluster:
- all
indices:
'*':
privileges: all
run_as: '*'
- names: '*'
privileges: [ all ]
run_as:
- '*'
# Search and write on both source and destination indices. It should work if you could just search on the source and
# write to the destination but that isn't how shield works.
minimal:
indices:
source:
privileges: search, write, create_index, indices:admin/refresh
dest:
privileges: search, write, create_index, indices:admin/refresh
- names: source
privileges:
- read
- write
- create_index
- indices:admin/refresh
- names: dest
privileges:
- read
- write
- create_index
- indices:admin/refresh
# Read only operations on indices
readonly:
indices:
'*':
privileges: search
- names: '*'
privileges: [ read ]
# Write operations on destination index, none on source index
dest_only:
indices:
dest:
privileges: write
- names: dest
privileges: [ write ]
# Search and write on both source and destination indices with document level security filtering out some docs.
can_not_see_hidden_docs:
indices:
source:
privileges: search, write, create_index, indices:admin/refresh
- names: source
privileges:
- read
- write
- create_index
- indices:admin/refresh
query:
bool:
must_not:
match:
hidden: true
dest:
privileges: search, write, create_index, indices:admin/refresh
- names: dest
privileges:
- read
- write
- create_index
- indices:admin/refresh
# Search and write on both source and destination indices with field level security.
can_not_see_hidden_fields:
indices:
source:
privileges: search, write, create_index, indices:admin/refresh
- names: source
privileges:
- read
- write
- create_index
- indices:admin/refresh
fields:
- foo
- bar
dest:
privileges: search, write, create_index, indices:admin/refresh
- names: dest
privileges:
- read
- write
- create_index
- indices:admin/refresh

View File

@ -4,12 +4,12 @@ admin:
'*': all
watcher_manager:
cluster: manage_watcher, cluster:monitor/nodes/info, cluster:monitor/health
cluster: manage
indices:
'.watcher-history-*': all
watcher_monitor:
cluster: monitor_watcher
cluster: monitor
indices:
'.watcher-history-*': read

View File

@ -1,17 +1,30 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges:
- all
graph_explorer:
cluster: cluster:monitor/health
cluster:
- cluster:monitor/health
indices:
'*':
privileges: graph, indices:data/write/index, indices:admin/refresh, indices:admin/create
- names: '*'
privileges:
- read
- write
- indices:admin/refresh
- indices:admin/create
no_graph_explorer:
cluster: cluster:monitor/health
cluster:
- cluster:monitor/health
indices:
'*':
privileges: indices:data/read/search, indices:data/write/index, indices:admin/refresh, indices:admin/create
- names: '*'
privileges:
- indices:data/read/search
- indices:data/write/index
- indices:admin/refresh
- indices:admin/create

View File

@ -24,7 +24,7 @@ public class GraphWithShieldInsufficientRoleIT extends GraphWithShieldIT {
super.test();
fail();
} catch(AssertionError ae) {
assertThat(ae.getMessage(), containsString("action [indices:data/read/graph/explore"));
assertThat(ae.getMessage(), containsString("action [indices:data/read/xpack/graph/explore"));
assertThat(ae.getMessage(), containsString("returned [403 Forbidden]"));
assertThat(ae.getMessage(), containsString("is unauthorized for user [no_graph_explorer]"));
}

View File

@ -8,7 +8,7 @@ dependencies {
}
// needed to be consistent with ssl host checking
String san = getSubjectAlternativeNameString()
Object san = new SanEvaluator()
// location of generated keystores and certificates
File keystoreDir = new File(project.buildDir, 'keystore')
@ -145,18 +145,18 @@ project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each
integTest {
cluster {
systemProperty 'es.xpack.monitoring.agent.interval', '3s'
systemProperty 'es.xpack.monitoring.agent.exporters._http.type', 'http'
systemProperty 'es.xpack.monitoring.agent.exporters._http.enabled', 'false'
systemProperty 'es.xpack.monitoring.agent.exporters._http.ssl.truststore.path', clientKeyStore.name
systemProperty 'es.xpack.monitoring.agent.exporters._http.ssl.truststore.password', 'keypass'
systemProperty 'es.xpack.monitoring.agent.exporters._http.auth.username', 'monitoring_agent'
systemProperty 'es.xpack.monitoring.agent.exporters._http.auth.password', 'changeme'
setting 'xpack.monitoring.agent.interval', '3s'
setting 'xpack.monitoring.agent.exporters._http.type', 'http'
setting 'xpack.monitoring.agent.exporters._http.enabled', 'false'
setting 'xpack.monitoring.agent.exporters._http.ssl.truststore.path', clientKeyStore.name
setting 'xpack.monitoring.agent.exporters._http.ssl.truststore.password', 'keypass'
setting 'xpack.monitoring.agent.exporters._http.auth.username', 'monitoring_agent'
setting 'xpack.monitoring.agent.exporters._http.auth.password', 'changeme'
systemProperty 'es.shield.transport.ssl', 'true'
systemProperty 'es.shield.http.ssl', 'true'
systemProperty 'es.shield.ssl.keystore.path', nodeKeystore.name
systemProperty 'es.shield.ssl.keystore.password', 'keypass'
setting 'shield.transport.ssl', 'true'
setting 'shield.http.ssl', 'true'
setting 'shield.ssl.keystore.path', nodeKeystore.name
setting 'shield.ssl.keystore.password', 'keypass'
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
@ -200,144 +200,159 @@ processTestResources {
}
}
/** A lazy evaluator to find the san to use for certificate generation. */
class SanEvaluator {
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/** Return all interfaces (and subinterfaces) on the system */
static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
private static String san = null
String toString() {
synchronized (SanEvaluator.class) {
if (san == null) {
san = getSubjectAlternativeNameString()
}
}
});
return all;
}
return san
}
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
// Code stolen from NetworkUtils/InetAddresses/NetworkAddress to support SAN
/** Return all interfaces (and subinterfaces) on the system */
private static List<NetworkInterface> getInterfaces() throws SocketException {
List<NetworkInterface> all = new ArrayList<>();
addAllInterfaces(all, Collections.list(NetworkInterface.getNetworkInterfaces()));
Collections.sort(all, new Comparator<NetworkInterface>() {
@Override
public int compare(NetworkInterface left, NetworkInterface right) {
return Integer.compare(left.getIndex(), right.getIndex());
}
});
return all;
}
/** Helper for getInterfaces, recursively adds subinterfaces to {@code target} */
private static void addAllInterfaces(List<NetworkInterface> target, List<NetworkInterface> level) {
if (!level.isEmpty()) {
target.addAll(level);
for (NetworkInterface intf : level) {
addAllInterfaces(target, Collections.list(intf.getSubInterfaces()));
}
}
}
}
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
if (intf.isUp()) {
// NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface
// while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too,
// otherwise things just won't work out of box. So we include all addresses from loopback interfaces.
for (InetAddress address : Collections.list(intf.getInetAddresses())) {
if (intf.isLoopback() || address.isLoopbackAddress()) {
list.add(address);
private static String getSubjectAlternativeNameString() {
List<InetAddress> list = new ArrayList<>();
for (NetworkInterface intf : getInterfaces()) {
if (intf.isUp()) {
// NOTE: some operating systems (e.g. BSD stack) assign a link local address to the loopback interface
// while technically not a loopback address, some of these treat them as one (e.g. OS X "localhost") so we must too,
// otherwise things just won't work out of box. So we include all addresses from loopback interfaces.
for (InetAddress address : Collections.list(intf.getInetAddresses())) {
if (intf.isLoopback() || address.isLoopbackAddress()) {
list.add(address);
}
}
}
}
}
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address)address);
} else {
hostAddress = address.getHostAddress();
}
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
if (list.isEmpty()) {
throw new IllegalArgumentException("no up-and-running loopback addresses found, got " + getInterfaces());
}
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
StringBuilder builder = new StringBuilder("san=");
for (int i = 0; i < list.size(); i++) {
InetAddress address = list.get(i);
String hostAddress;
if (address instanceof Inet6Address) {
hostAddress = compressedIPV6Address((Inet6Address)address);
} else {
hostAddress = address.getHostAddress();
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
builder.append("ip:").append(hostAddress);
String hostname = address.getHostName();
if (hostname.equals(address.getHostAddress()) == false) {
builder.append(",dns:").append(hostname);
}
runStart = -1;
}
}
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
if (i != (list.size() - 1)) {
builder.append(",");
}
}
return builder.toString();
}
private static String compressedIPV6Address(Inet6Address inet6Address) {
byte[] bytes = inet6Address.getAddress();
int[] hextets = new int[8];
for (int i = 0; i < hextets.length; i++) {
hextets[i] = (bytes[2 * i] & 255) << 8 | bytes[2 * i + 1] & 255;
}
compressLongestRunOfZeroes(hextets);
return hextetsToIPv6String(hextets);
}
/**
* Identify and mark the longest run of zeroes in an IPv6 address.
*
* <p>Only runs of two or more hextets are considered. In case of a tie, the
* leftmost run wins. If a qualifying run is found, its hextets are replaced
* by the sentinel value -1.
*
* @param hextets {@code int[]} mutable array of eight 16-bit hextets
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
private static void compressLongestRunOfZeroes(int[] hextets) {
int bestRunStart = -1;
int bestRunLength = -1;
int runStart = -1;
for (int i = 0; i < hextets.length + 1; i++) {
if (i < hextets.length && hextets[i] == 0) {
if (runStart < 0) {
runStart = i;
}
} else if (runStart >= 0) {
int runLength = i - runStart;
if (runLength > bestRunLength) {
bestRunStart = runStart;
bestRunLength = runLength;
}
runStart = -1;
}
}
lastWasNumber = thisIsNumber;
if (bestRunLength >= 2) {
Arrays.fill(hextets, bestRunStart, bestRunStart + bestRunLength, -1);
}
}
/**
* Convert a list of hextets into a human-readable IPv6 address.
*
* <p>In order for "::" compression to work, the input should contain negative
* sentinel values in place of the elided zeroes.
*
* @param hextets {@code int[]} array of eight 16-bit hextets, or -1s
*/
private static String hextetsToIPv6String(int[] hextets) {
/*
* While scanning the array, handle these state transitions:
* start->num => "num" start->gap => "::"
* num->num => ":num" num->gap => "::"
* gap->num => "num" gap->gap => ""
*/
StringBuilder buf = new StringBuilder(39);
boolean lastWasNumber = false;
for (int i = 0; i < hextets.length; i++) {
boolean thisIsNumber = hextets[i] >= 0;
if (thisIsNumber) {
if (lastWasNumber) {
buf.append(':');
}
buf.append(Integer.toHexString(hextets[i]));
} else {
if (i == 0 || lastWasNumber) {
buf.append("::");
}
}
lastWasNumber = thisIsNumber;
}
return buf.toString();
}
return buf.toString();
}

View File

@ -0,0 +1,19 @@
# Integration tests for smoke testing plugins
#
"Secret settings are correctly filtered":
- do:
cluster.state: {}
- set: {master_node: master}
- do:
nodes.info:
metric: [ settings ]
- is_true: nodes
- is_true: nodes.$master.settings.xpack.monitoring.agent.exporters._http.type
- is_false: nodes.$master.settings.xpack.monitoring.agent.exporters._http.auth.username
- is_false: nodes.$master.settings.xpack.monitoring.agent.exporters._http.auth.password
- is_false: nodes.$master.settings.xpack.monitoring.agent.exporters._http.ssl.truststore.path
- is_false: nodes.$master.settings.xpack.monitoring.agent.exporters._http.ssl.truststore.password

View File

@ -8,8 +8,8 @@ dependencies {
integTest {
cluster {
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
systemProperty 'es.script.inline', 'true'
systemProperty 'es.xpack.shield.enabled', 'false'
systemProperty 'es.xpack.monitoring.enabled', 'false'
setting 'script.inline', 'true'
setting 'xpack.shield.enabled', 'false'
setting 'xpack.monitoring.enabled', 'false'
}
}

View File

@ -8,8 +8,8 @@ dependencies {
integTest {
cluster {
plugin 'x-pack', project(':x-plugins:elasticsearch:x-pack')
systemProperty 'es.xpack.shield.enabled', 'false'
systemProperty 'es.xpack.monitoring.enabled', 'false'
systemProperty 'es.http.port', '9400'
setting 'xpack.shield.enabled', 'false'
setting 'xpack.monitoring.enabled', 'false'
setting 'http.port', '9400'
}
}

View File

@ -1,18 +1,29 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges:
- all
watcher_manager:
cluster: manage_watcher, cluster:monitor/nodes/info, cluster:monitor/health
cluster:
- manage
indices:
'.watcher-history-*': all
run_as: powerless_user, watcher_manager
- names: '.watcher-history-*'
privileges:
- all
run_as:
- powerless_user
- watcher_manager
watcher_monitor:
cluster: monitor_watcher
cluster:
- monitor
indices:
'.watcher-history-*': read
- names: '.watcher-history-*'
privileges:
- read
crappy_role:
cluster:

View File

@ -10,7 +10,6 @@ import java.util.Collection;
import java.util.Collections;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.network.NetworkModule;
@ -23,8 +22,6 @@ import org.elasticsearch.graph.license.GraphLicensee;
import org.elasticsearch.graph.license.GraphModule;
import org.elasticsearch.graph.rest.action.RestGraphAction;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.shield.Shield;
import org.elasticsearch.xpack.XPackPlugin;
public class Graph extends Plugin {
@ -37,11 +34,6 @@ public class Graph extends Plugin {
public Graph(Settings settings) {
this.transportClientMode = XPackPlugin.transportClientMode(settings);
enabled = enabled(settings);
// adding the graph privileges to shield
if (Shield.enabled(settings)) {
Shield.registerIndexPrivilege( "graph", GraphExploreAction.NAME, SearchTransportService.QUERY_ACTION_NAME,
SearchAction.NAME, SearchTransportService.QUERY_FETCH_ACTION_NAME);
}
}
@Override

View File

@ -12,7 +12,7 @@ public class GraphExploreAction extends Action<GraphExploreRequest, GraphExplore
GraphExploreRequestBuilder> {
public static final GraphExploreAction INSTANCE = new GraphExploreAction();
public static final String NAME = "indices:data/read/graph/explore";
public static final String NAME = "indices:data/read/xpack/graph/explore";
private GraphExploreAction() {
super(NAME);

View File

@ -11,7 +11,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class DeleteLicenseAction extends Action<DeleteLicenseRequest, DeleteLicenseResponse, DeleteLicenseRequestBuilder> {
public static final DeleteLicenseAction INSTANCE = new DeleteLicenseAction();
public static final String NAME = "cluster:admin/plugin/license/delete";
public static final String NAME = "cluster:admin/xpack/license/delete";
private DeleteLicenseAction() {
super(NAME);

View File

@ -11,7 +11,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class GetLicenseAction extends Action<GetLicenseRequest, GetLicenseResponse, GetLicenseRequestBuilder> {
public static final GetLicenseAction INSTANCE = new GetLicenseAction();
public static final String NAME = "cluster:admin/plugin/license/get";
public static final String NAME = "cluster:monitor/xpack/license/get";
private GetLicenseAction() {
super(NAME);

View File

@ -11,7 +11,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class PutLicenseAction extends Action<PutLicenseRequest, PutLicenseResponse, PutLicenseRequestBuilder> {
public static final PutLicenseAction INSTANCE = new PutLicenseAction();
public static final String NAME = "cluster:admin/plugin/license/put";
public static final String NAME = "cluster:admin/xpack/license/put";
private PutLicenseAction() {
super(NAME);

View File

@ -134,7 +134,8 @@ public class MarvelSettings extends AbstractComponent {
module.registerSetting(ENABLED);
module.registerSetting(INDEX_TEMPLATE_VERSION);
module.registerSettingsFilter("xpack.monitoring.agent.exporters.*.auth.password");
module.registerSettingsFilter("xpack.monitoring.agent.exporters.*.auth.*");
module.registerSettingsFilter("xpack.monitoring.agent.exporters.*.ssl.*");
}

View File

@ -67,7 +67,9 @@ public class CleanerService extends AbstractLifecycleComponent<CleanerService> {
@Override
protected void doClose() {
logger.debug("closing cleaning service");
runnable.cancel();
if (runnable != null) {
runnable.cancel();
}
logger.debug("cleaning service closed");
}

View File

@ -132,14 +132,14 @@ public class ClusterStatsResolverTests extends MonitoringIndexNameResolverTestCa
*/
private ShardStats[] randomShardStats() {
Index index = new Index("test", UUID.randomUUID().toString());
Path shardPath = createTempDir().resolve("indices").resolve("test").resolve("0");
Path shardPath = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
ShardRouting shardRouting = ShardRouting.newUnassigned(index, 0, null, false,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo"));
CommonStats shardCommonStats = new CommonStats();
shardCommonStats.fieldData = new FieldDataStats();
shardCommonStats.queryCache = new QueryCacheStats();
return new ShardStats[]{
new ShardStats(shardRouting, new ShardPath(false, shardPath, shardPath, "", new ShardId(index, 0)), shardCommonStats, null)
new ShardStats(shardRouting, new ShardPath(false, shardPath, shardPath, new ShardId(index, 0)), shardCommonStats, null)
};
}
}

View File

@ -77,7 +77,7 @@ public class IndexStatsResolverTests extends MonitoringIndexNameResolverTestCase
private IndexStats randomIndexStats() {
Index index = new Index("test-" + randomIntBetween(0, 5), UUID.randomUUID().toString());
ShardId shardId = new ShardId(index, 0);
Path path = createTempDir().resolve("indices").resolve(index.getName()).resolve("0");
Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
ShardRouting shardRouting = ShardRouting.newUnassigned(index, 0, null, true,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
ShardRoutingTestUtils.initialize(shardRouting, "node-0");
@ -92,7 +92,7 @@ public class IndexStatsResolverTests extends MonitoringIndexNameResolverTestCase
stats.segments = new SegmentsStats();
stats.merge = new MergeStats();
stats.refresh = new RefreshStats();
ShardStats shardStats = new ShardStats(shardRouting, new ShardPath(false, path, path, null, shardId), stats, null);
ShardStats shardStats = new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null);
return new IndexStats(index.getName(), new ShardStats[]{shardStats});
}
}

View File

@ -85,7 +85,7 @@ public class IndicesStatsResolverTests extends MonitoringIndexNameResolverTestCa
List<ShardStats> shardStats = new ArrayList<>();
for (int i=0; i < randomIntBetween(2, 5); i++) {
ShardId shardId = new ShardId(index, i);
Path path = createTempDir().resolve("indices").resolve(index.getName()).resolve(String.valueOf(i));
Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve(String.valueOf(i));
ShardRouting shardRouting = ShardRouting.newUnassigned(index, i, null, true,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
ShardRoutingTestUtils.initialize(shardRouting, "node-0");
@ -100,7 +100,7 @@ public class IndicesStatsResolverTests extends MonitoringIndexNameResolverTestCa
stats.segments = new SegmentsStats();
stats.merge = new MergeStats();
stats.refresh = new RefreshStats();
shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, null, shardId), stats, null));
shardStats.add(new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null));
}
return IndicesStatsResponseTestUtils.newIndicesStatsResponse(shardStats.toArray(new ShardStats[shardStats.size()]),
shardStats.size(), shardStats.size(), 0, emptyList());

View File

@ -103,7 +103,7 @@ public class NodeStatsResolverTests extends MonitoringIndexNameResolverTestCase<
private NodeStats randomNodeStats() {
Index index = new Index("test-" + randomIntBetween(0, 5), UUID.randomUUID().toString());
ShardId shardId = new ShardId(index, 0);
Path path = createTempDir().resolve("indices").resolve(index.getName()).resolve("0");
Path path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0");
ShardRouting shardRouting = ShardRouting.newUnassigned(index, 0, null, true,
new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null));
ShardRoutingTestUtils.initialize(shardRouting, "node-0");
@ -116,7 +116,7 @@ public class NodeStatsResolverTests extends MonitoringIndexNameResolverTestCase<
stats.indexing = new IndexingStats();
stats.search = new SearchStats();
stats.segments = new SegmentsStats();
ShardStats shardStats = new ShardStats(shardRouting, new ShardPath(false, path, path, null, shardId), stats, null);
ShardStats shardStats = new ShardStats(shardRouting, new ShardPath(false, path, path, shardId), stats, null);
FsInfo.Path[] pathInfo = new FsInfo.Path[]{
new FsInfo.Path("/test", "/dev/sda", 10, -8, 0),
};

View File

@ -0,0 +1,98 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.marvel.shield;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.marvel.MarvelSettings;
import org.elasticsearch.marvel.test.MarvelIntegTestCase;
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
import org.elasticsearch.test.rest.client.http.HttpResponse;
import org.hamcrest.Matchers;
import org.junit.After;
import java.io.IOException;
import java.util.Map;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue;
import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER;
import static org.elasticsearch.shield.authc.support.UsernamePasswordToken.basicAuthHeaderValue;
import static org.hamcrest.CoreMatchers.nullValue;
public class MarvelSettingsFilterTests extends MarvelIntegTestCase {
private CloseableHttpClient httpClient = HttpClients.createDefault();
@After
public void cleanup() throws IOException {
httpClient.close();
}
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
.put(MarvelSettings.INTERVAL.getKey(), "-1")
.put("xpack.monitoring.agent.exporters._http.type", "http")
.put("xpack.monitoring.agent.exporters._http.enabled", false)
.put("xpack.monitoring.agent.exporters._http.auth.username", "_user")
.put("xpack.monitoring.agent.exporters._http.auth.password", "_passwd")
.put("xpack.monitoring.agent.exporters._http.ssl.truststore.path", "/path/to/truststore")
.put("xpack.monitoring.agent.exporters._http.ssl.truststore.password", "_passwd")
.put("xpack.monitoring.agent.exporters._http.ssl.hostname_verification", true)
.build();
}
public void testGetSettingsFiltered() throws Exception {
String body = executeRequest("GET", "/_nodes/settings", null, null).getBody();
Map<String, Object> response = JsonXContent.jsonXContent.createParser(body).map();
Map<String, Object> nodes = (Map<String, Object>) response.get("nodes");
for (Object node : nodes.values()) {
Map<String, Object> settings = (Map<String, Object>) ((Map<String, Object>) node).get("settings");
assertThat(extractValue("xpack.monitoring.agent.exporters._http.type", settings), Matchers.<Object>equalTo("http"));
assertThat(extractValue("xpack.monitoring.agent.exporters._http.enabled", settings), Matchers.<Object>equalTo("false"));
assertNullSetting(settings, "xpack.monitoring.agent.exporters._http.auth.username");
assertNullSetting(settings, "xpack.monitoring.agent.exporters._http.auth.password");
assertNullSetting(settings, "xpack.monitoring.agent.exporters._http.ssl.truststore.path");
assertNullSetting(settings, "xpack.monitoring.agent.exporters._http.ssl.truststore.password");
assertNullSetting(settings, "xpack.monitoring.agent.exporters._http.ssl.hostname_verification");
}
}
private void assertNullSetting(Map<String, Object> settings, String setting) {
assertThat(extractValue(setting, settings), nullValue());
}
protected HttpResponse executeRequest(String method, String path, String body, Map<String, String> params) throws IOException {
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class,
internalCluster().getMasterName());
HttpRequestBuilder requestBuilder = new HttpRequestBuilder(httpClient)
.httpTransport(httpServerTransport)
.method(method)
.path(path);
if (params != null) {
for (Map.Entry<String, String> entry : params.entrySet()) {
requestBuilder.addParam(entry.getKey(), entry.getValue());
}
}
if (body != null) {
requestBuilder.body(body);
}
if (shieldEnabled) {
requestBuilder.addHeader(BASIC_AUTH_HEADER,
basicAuthHeaderValue(ShieldSettings.TEST_USERNAME, new SecuredString(ShieldSettings.TEST_PASSWORD.toCharArray())));
}
return requestBuilder.execute();
}
}

View File

@ -16,7 +16,6 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.marvel.MarvelSettings;
import org.elasticsearch.marvel.MonitoredSystem;
@ -25,7 +24,6 @@ import org.elasticsearch.marvel.agent.exporter.MarvelTemplateUtils;
import org.elasticsearch.marvel.agent.exporter.MonitoringDoc;
import org.elasticsearch.marvel.agent.resolver.MonitoringIndexNameResolver;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.shield.Shield;
import org.elasticsearch.shield.authc.esusers.ESUsersRealm;
import org.elasticsearch.shield.authc.support.Hasher;
import org.elasticsearch.shield.authc.support.SecuredString;
@ -436,20 +434,21 @@ public abstract class MarvelIntegTestCase extends ESIntegTestCase {
public static final String ROLES =
"test:\n" + // a user for the test infra.
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/stats, cluster:monitor/state, " +
"cluster:monitor/health, cluster:monitor/stats, cluster:monitor/task, cluster:admin/settings/update, " +
"cluster:admin/repository/delete, cluster:monitor/nodes/liveness, indices:admin/template/get, " +
"indices:admin/template/put, indices:admin/template/delete\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/state', 'cluster:monitor/health', 'cluster:monitor/stats'," +
" 'cluster:admin/settings/update', 'cluster:admin/repository/delete', 'cluster:monitor/nodes/liveness'," +
" 'indices:admin/template/get', 'indices:admin/template/put', 'indices:admin/template/delete'," +
" 'cluster:monitor/task']\n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]\n" +
"\n" +
"admin:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
"transport_client:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
"\n" +
"monitor:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n"
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n"
;
@ -462,8 +461,6 @@ public abstract class MarvelIntegTestCase extends ESIntegTestCase {
Path folder = createTempDir().resolve("marvel_shield");
Files.createDirectories(folder);
builder.remove("index.queries.cache.type");
builder.put("shield.enabled", true)
.put("shield.authc.realms.esusers.type", ESUsersRealm.TYPE)
.put("shield.authc.realms.esusers.order", 0)
@ -472,10 +469,7 @@ public abstract class MarvelIntegTestCase extends ESIntegTestCase {
.put("shield.authz.store.files.roles", writeFile(folder, "roles.yml", ROLES))
.put("shield.system_key.file", writeFile(folder, "system_key.yml", systemKey))
.put("shield.authc.sign_user_header", false)
.put("shield.audit.enabled", auditLogsEnabled)
// Test framework sometimes randomily selects the 'index' or 'none' cache and that makes the
// validation in ShieldPlugin fail. Shield can only run with this query cache impl
.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), Shield.OPT_OUT_QUERY_CACHE);
.put("shield.audit.enabled", auditLogsEnabled);
} catch (IOException ex) {
throw new RuntimeException("failed to build settings for shield", ex);
}

View File

@ -1,73 +1,71 @@
admin:
cluster: all
cluster:
- all
indices:
'*':
privileges: all
- names: '*'
privileges:
- all
# monitoring cluster privileges
# All operations on all indices
power_user:
cluster: monitor
cluster:
- monitor
indices:
'*':
privileges: all
- names: '*'
privileges:
- all
# Read-only operations on indices
user:
indices:
'*':
privileges: read
- names: '*'
privileges:
- read
# Defines the required permissions for transport clients
transport_client:
cluster:
- cluster:monitor/nodes/liveness
#uncomment the following for sniffing
#- cluster:monitor/state
# The required permissions for kibana 4 users.
kibana4:
cluster:
- cluster:monitor/nodes/info
- cluster:monitor/health
indices:
'*':
privileges: indices:admin/mappings/fields/get, indices:admin/validate/query, indices:data/read/search, indices:data/read/msearch, indices:data/read/field_stats, indices:admin/get
'.kibana':
privileges: indices:admin/exists, indices:admin/mapping/put, indices:admin/mappings/fields/get, indices:admin/refresh, indices:admin/validate/query, indices:data/read/get, indices:data/read/mget, indices:data/read/search, indices:data/write/delete, indices:data/write/index, indices:data/write/update
- transport_client
# The required permissions for the kibana 4 server
kibana4_server:
cluster:
- cluster:monitor/nodes/info
- cluster:monitor/health
- monitor
indices:
'.kibana':
privileges: indices:admin/create, indices:admin/exists, indices:admin/mapping/put, indices:admin/mappings/fields/get, indices:admin/refresh, indices:admin/validate/query, indices:data/read/get, indices:data/read/mget, indices:data/read/search, indices:data/write/delete, indices:data/write/index, indices:data/write/update
- names: '.kibana'
privileges:
- all
# The required role for logstash users
logstash:
cluster: indices:admin/template/get, indices:admin/template/put
cluster:
- manage_index_templates
indices:
'logstash-*':
privileges: indices:data/write/bulk, indices:data/write/delete, indices:data/write/update, indices:data/read/search, indices:data/read/scroll, create_index
- names: 'logstash-*'
privileges:
- write
- read
- create_index
# Monitoring user role. Assign to monitoring users.
# Marvel user role. Assign to marvel users.
monitoring_user:
indices:
'.monitoring-*':
privileges: read
'.kibana':
privileges: indices:admin/exists, indices:admin/mappings/fields/get, indices:admin/validate/query, indices:data/read/get, indices:data/read/mget, indices:data/read/search
- names:
- '.marvel-es-*'
- '.monitoring-*'
privileges: [ "read" ]
- names: '.kibana'
privileges:
- view_index_metadata
- read
# Monitoring remote agent role. Assign to the agent user on the remote monitoring cluster
# to which the monitoring agent will export all its data
# Marvel remote agent role. Assign to the agent user on the remote marvel cluster
# to which the marvel agent will export all its data
remote_monitoring_agent:
cluster: indices:admin/template/put, indices:admin/template/get
cluster: [ "manage_index_templates" ]
indices:
'.monitoring-*':
privileges: all
# Allows all operations required to manage ingest pipelines
ingest_admin:
cluster: manage_pipeline
- names:
- '.marvel-es-*'
- '.monitoring-*'
privileges: [ "all" ]

View File

@ -6,11 +6,15 @@
package org.elasticsearch.shield;
import org.elasticsearch.action.ActionModule;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Module;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
@ -35,6 +39,8 @@ import org.elasticsearch.shield.action.user.TransportPutUserAction;
import org.elasticsearch.shield.action.user.TransportDeleteUserAction;
import org.elasticsearch.shield.action.user.TransportGetUsersAction;
import org.elasticsearch.shield.audit.AuditTrailModule;
import org.elasticsearch.shield.audit.index.IndexAuditTrail;
import org.elasticsearch.shield.audit.index.IndexNameResolver;
import org.elasticsearch.shield.audit.logfile.LoggingAuditTrail;
import org.elasticsearch.shield.authc.AuthenticationModule;
import org.elasticsearch.shield.authc.Realms;
@ -71,6 +77,8 @@ import org.elasticsearch.shield.transport.filter.IPFilter;
import org.elasticsearch.shield.transport.netty.ShieldNettyHttpServerTransport;
import org.elasticsearch.shield.transport.netty.ShieldNettyTransport;
import org.elasticsearch.xpack.XPackPlugin;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
import java.util.ArrayList;
import java.util.Arrays;
@ -101,7 +109,7 @@ public class Shield {
this.transportClientMode = XPackPlugin.transportClientMode(settings);
this.enabled = XPackPlugin.featureEnabled(settings, NAME, true);
if (enabled && !transportClientMode) {
failIfShieldQueryCacheIsNotActive(settings, true);
validateAutoCreateIndex(settings);
}
}
@ -163,7 +171,6 @@ public class Shield {
settingsBuilder.put(NetworkModule.HTTP_TYPE_SETTING.getKey(), Shield.NAME);
addUserSettings(settingsBuilder);
addTribeSettings(settingsBuilder);
addQueryCacheSettings(settingsBuilder);
return settingsBuilder.build();
}
@ -226,7 +233,11 @@ public class Shield {
}
if (transportClientMode == false) {
module.registerQueryCache(Shield.OPT_OUT_QUERY_CACHE, OptOutQueryCache::new);
failIfShieldQueryCacheIsNotActive(module.getSettings(), false);
/* We need to forcefully overwrite the query cache implementation to use Shield's opt out query cache implementation.
* This impl. disabled the query cache if field level security is used for a particular request. If we wouldn't do
* forcefully overwrite the query cache implementation then we leave the system vulnerable to leakages of data to
* unauthorized users. */
module.forceQueryCacheType(Shield.OPT_OUT_QUERY_CACHE);
}
}
@ -279,30 +290,6 @@ public class Shield {
}
}
public static void registerClusterPrivilege(String name, String... patterns) {
try {
ClusterPrivilege.addCustom(name, patterns);
} catch (Exception se) {
logger.warn("could not register cluster privilege [{}]", name);
// we need to prevent bubbling the shield exception here for the tests. In the tests
// we create multiple nodes in the same jvm and since the custom cluster is a static binding
// multiple nodes will try to add the same privileges multiple times.
}
}
public static void registerIndexPrivilege(String name, String... patterns) {
try {
IndexPrivilege.addCustom(name, patterns);
} catch (Exception se) {
logger.warn("could not register index privilege [{}]", name);
// we need to prevent bubbling the shield exception here for the tests. In the tests
// we create multiple nodes in the same jvm and since the custom cluster is a static binding
// multiple nodes will try to add the same privileges multiple times.
}
}
private void addUserSettings(Settings.Builder settingsBuilder) {
String authHeaderSettingName = ThreadContext.PREFIX + "." + UsernamePasswordToken.BASIC_AUTH_HEADER;
if (settings.get(authHeaderSettingName) != null) {
@ -377,16 +364,6 @@ public class Shield {
}
}
/**
* We need to forcefully overwrite the query cache implementation to use Shield's opt out query cache implementation.
* This impl. disabled the query cache if field level security is used for a particular request. If we wouldn't do
* forcefully overwrite the query cache implementation then we leave the system vulnerable to leakages of data to
* unauthorized users.
*/
private void addQueryCacheSettings(Settings.Builder settingsBuilder) {
settingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), OPT_OUT_QUERY_CACHE);
}
public static boolean enabled(Settings settings) {
return XPackPlugin.featureEnabled(settings, NAME, true);
}
@ -395,19 +372,72 @@ public class Shield {
return XPackPlugin.featureEnabled(settings, DLS_FLS_FEATURE, true);
}
private void failIfShieldQueryCacheIsNotActive(Settings settings, boolean nodeSettings) {
String queryCacheImplementation;
if (nodeSettings) {
// in case this are node settings then the plugin additional settings have not been applied yet,
// so we use 'opt_out_cache' as default. So in that case we only fail if the node settings contain
// another cache impl than 'opt_out_cache'.
queryCacheImplementation = settings.get(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), OPT_OUT_QUERY_CACHE);
} else {
queryCacheImplementation = settings.get(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey());
static void validateAutoCreateIndex(Settings settings) {
String value = settings.get("action.auto_create_index");
if (value == null) {
return;
}
if (OPT_OUT_QUERY_CACHE.equals(queryCacheImplementation) == false) {
throw new IllegalStateException("shield does not support a user specified query cache. remove the setting [" + IndexModule
.INDEX_QUERY_CACHE_TYPE_SETTING.getKey() + "] with value [" + queryCacheImplementation + "]");
final boolean indexAuditingEnabled = AuditTrailModule.indexAuditLoggingEnabled(settings);
final String auditIndex = indexAuditingEnabled ? "," + IndexAuditTrail.INDEX_NAME_PREFIX + "*" : "";
String errorMessage = LoggerMessageFormat.format("the [action.auto_create_index] setting value [{}] is too" +
" restrictive. disable [action.auto_create_index] or set it to " +
"[{}{}]", (Object) value, ShieldTemplateService.SECURITY_INDEX_NAME, auditIndex);
if (Booleans.isExplicitFalse(value)) {
throw new IllegalArgumentException(errorMessage);
}
if (Booleans.isExplicitTrue(value)) {
return;
}
String[] matches = Strings.commaDelimitedListToStringArray(value);
List<String> indices = new ArrayList<>();
indices.add(ShieldTemplateService.SECURITY_INDEX_NAME);
if (indexAuditingEnabled) {
DateTime now = new DateTime(DateTimeZone.UTC);
// just use daily rollover
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now, IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusDays(1), IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(1), IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(2), IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(3), IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(4), IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(5), IndexNameResolver.Rollover.DAILY));
indices.add(IndexNameResolver.resolve(IndexAuditTrail.INDEX_NAME_PREFIX, now.plusMonths(6), IndexNameResolver.Rollover.DAILY));
}
for (String index : indices) {
boolean matched = false;
for (String match : matches) {
char c = match.charAt(0);
if (c == '-') {
if (Regex.simpleMatch(match.substring(1), index)) {
throw new IllegalArgumentException(errorMessage);
}
} else if (c == '+') {
if (Regex.simpleMatch(match.substring(1), index)) {
matched = true;
break;
}
} else {
if (Regex.simpleMatch(match, index)) {
matched = true;
break;
}
}
}
if (!matched) {
throw new IllegalArgumentException(errorMessage);
}
}
if (indexAuditingEnabled) {
logger.warn("the [action.auto_create_index] setting is configured to be restrictive [{}]. " +
" for the next 6 months audit indices are allowed to be created, but please make sure" +
" that any future history indices after 6 months with the pattern " +
"[.shield_audit_log*] are allowed to be created", value);
}
}
}

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class ClearRealmCacheAction extends Action<ClearRealmCacheRequest, ClearRealmCacheResponse, ClearRealmCacheRequestBuilder> {
public static final ClearRealmCacheAction INSTANCE = new ClearRealmCacheAction();
public static final String NAME = "cluster:admin/shield/realm/cache/clear";
public static final String NAME = "cluster:admin/xpack/security/realm/cache/clear";
protected ClearRealmCacheAction() {
super(NAME);

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class ClearRolesCacheAction extends Action<ClearRolesCacheRequest, ClearRolesCacheResponse, ClearRolesCacheRequestBuilder> {
public static final ClearRolesCacheAction INSTANCE = new ClearRolesCacheAction();
public static final String NAME = "cluster:admin/shield/roles/cache/clear";
public static final String NAME = "cluster:admin/xpack/security/roles/cache/clear";
protected ClearRolesCacheAction() {
super(NAME);

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class DeleteRoleAction extends Action<DeleteRoleRequest, DeleteRoleResponse, DeleteRoleRequestBuilder> {
public static final DeleteRoleAction INSTANCE = new DeleteRoleAction();
public static final String NAME = "cluster:admin/shield/role/delete";
public static final String NAME = "cluster:admin/xpack/security/role/delete";
protected DeleteRoleAction() {

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class GetRolesAction extends Action<GetRolesRequest, GetRolesResponse, GetRolesRequestBuilder> {
public static final GetRolesAction INSTANCE = new GetRolesAction();
public static final String NAME = "cluster:admin/shield/role/get";
public static final String NAME = "cluster:admin/xpack/security/role/get";
protected GetRolesAction() {

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class PutRoleAction extends Action<PutRoleRequest, PutRoleResponse, PutRoleRequestBuilder> {
public static final PutRoleAction INSTANCE = new PutRoleAction();
public static final String NAME = "cluster:admin/shield/role/put";
public static final String NAME = "cluster:admin/xpack/security/role/put";
protected PutRoleAction() {

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class DeleteUserAction extends Action<DeleteUserRequest, DeleteUserResponse, DeleteUserRequestBuilder> {
public static final DeleteUserAction INSTANCE = new DeleteUserAction();
public static final String NAME = "cluster:admin/shield/user/delete";
public static final String NAME = "cluster:admin/xpack/security/user/delete";
protected DeleteUserAction() {
super(NAME);

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class GetUsersAction extends Action<GetUsersRequest, GetUsersResponse, GetUsersRequestBuilder> {
public static final GetUsersAction INSTANCE = new GetUsersAction();
public static final String NAME = "cluster:admin/shield/user/get";
public static final String NAME = "cluster:admin/xpack/security/user/get";
protected GetUsersAction() {
super(NAME);

View File

@ -14,7 +14,7 @@ import org.elasticsearch.client.ElasticsearchClient;
public class PutUserAction extends Action<PutUserRequest, PutUserResponse, PutUserRequestBuilder> {
public static final PutUserAction INSTANCE = new PutUserAction();
public static final String NAME = "cluster:admin/shield/user/put";
public static final String NAME = "cluster:admin/xpack/security/user/put";
protected PutUserAction() {
super(NAME);

View File

@ -56,8 +56,7 @@ public class Realms extends AbstractLifecycleComponent<Realms> implements Iterab
}
if (internalRealms.isEmpty()) {
// lets create a default one so they can do something
internalRealms.add(factories.get(ESUsersRealm.TYPE).createDefault("default_" + ESUsersRealm.TYPE));
addInternalRealms(internalRealms);
}
this.internalRealmsOnly = Collections.unmodifiableList(internalRealms);
@ -130,16 +129,8 @@ public class Realms extends AbstractLifecycleComponent<Realms> implements Iterab
return realms;
}
// there is no "realms" configuration, go over all the factories and try to create defaults
// for all the internal realms
Realm.Factory indexRealmFactory = factories.get(ESNativeRealm.TYPE);
if (indexRealmFactory != null) {
realms.add(indexRealmFactory.createDefault("default_" + ESNativeRealm.TYPE));
}
Realm.Factory esUsersRealm = factories.get(ESUsersRealm.TYPE);
if (esUsersRealm != null) {
realms.add(esUsersRealm.createDefault("default_" + ESUsersRealm.TYPE));
}
// there is no "realms" configuration, add the defaults
addInternalRealms(realms);
return realms;
}
@ -168,4 +159,14 @@ public class Realms extends AbstractLifecycleComponent<Realms> implements Iterab
return result != null ? result : Settings.EMPTY;
}
private void addInternalRealms(List<Realm> realms) {
Realm.Factory indexRealmFactory = factories.get(ESNativeRealm.TYPE);
if (indexRealmFactory != null) {
realms.add(indexRealmFactory.createDefault("default_" + ESNativeRealm.TYPE));
}
Realm.Factory esUsersRealm = factories.get(ESUsersRealm.TYPE);
if (esUsersRealm != null) {
realms.add(esUsersRealm.createDefault("default_" + ESUsersRealm.TYPE));
}
}
}

View File

@ -60,6 +60,7 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RemoteTransportException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
@ -148,7 +149,7 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
if (t instanceof IndexNotFoundException) {
logger.trace("failed to retrieve user [{}] since security index does not exist", username);
} else {
logger.info("failed to retrieve user [{}]", t, username);
logger.debug("failed to retrieve user [{}]", t, username);
}
// We don't invoke the onFailure listener here, instead
@ -186,8 +187,12 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
// This function is MADNESS! But it works, don't think about it too hard...
client.search(request, new ActionListener<SearchResponse>() {
private SearchResponse lastResponse = null;
@Override
public void onResponse(final SearchResponse resp) {
lastResponse = resp;
boolean hasHits = resp.getHits().getHits().length > 0;
if (hasHits) {
for (SearchHit hit : resp.getHits().getHits()) {
@ -200,19 +205,9 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
.setScroll(scrollKeepAlive).request();
client.searchScroll(scrollRequest, this);
} else {
ClearScrollRequest clearScrollRequest = client.prepareClearScroll().addScrollId(resp.getScrollId()).request();
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
// cool, it cleared, we don't really care though...
}
@Override
public void onFailure(Throwable t) {
// Not really much to do here except for warn about it...
logger.warn("failed to clear scroll [{}] after retrieving all users", t, resp.getScrollId());
}
});
if (resp.getScrollId() != null) {
clearScrollResponse(resp.getScrollId());
}
// Finally, return the list of users
listener.onResponse(Collections.unmodifiableList(users));
}
@ -220,23 +215,28 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
@Override
public void onFailure(Throwable t) {
// attempt to clear scroll response
if (lastResponse != null && lastResponse.getScrollId() != null) {
clearScrollResponse(lastResponse.getScrollId());
}
if (t instanceof IndexNotFoundException) {
logger.trace("could not retrieve users because security index does not exist");
// We don't invoke the onFailure listener here, instead just pass an empty list
listener.onResponse(Collections.emptyList());
} else {
logger.info("failed to retrieve users", t);
listener.onFailure(t);
}
// We don't invoke the onFailure listener here, instead
// we call the response with an empty list
listener.onResponse(Collections.emptyList());
}
});
} catch (Exception e) {
logger.error("unable to retrieve users", e);
logger.error("unable to retrieve users {}", e, Arrays.toString(usernames));
listener.onFailure(e);
}
}
private UserAndPassword getUserAndPassword(String username) {
private UserAndPassword getUserAndPassword(final String username) {
final AtomicReference<UserAndPassword> userRef = new AtomicReference<>(null);
final CountDownLatch latch = new CountDownLatch(1);
getUserAndPassword(username, new LatchedActionListener<>(new ActionListener<UserAndPassword>() {
@ -247,19 +247,23 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
@Override
public void onFailure(Throwable t) {
logger.info("failed to retrieve user", t);
if (t instanceof IndexNotFoundException) {
logger.trace("failed to retrieve user [{}] since security index does not exist", t, username);
} else {
logger.error("failed to retrieve user [{}]", t, username);
}
}
}, latch));
try {
latch.await(30, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.info("timed out retrieving user");
logger.error("timed out retrieving user [{}]", username);
return null;
}
return userRef.get();
}
private void getUserAndPassword(String user, final ActionListener<UserAndPassword> listener) {
private void getUserAndPassword(final String user, final ActionListener<UserAndPassword> listener) {
try {
GetRequest request = client.prepareGet(ShieldTemplateService.SECURITY_INDEX_NAME, USER_DOC_TYPE, user).request();
client.get(request, new ActionListener<GetResponse>() {
@ -271,9 +275,9 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
@Override
public void onFailure(Throwable t) {
if (t instanceof IndexNotFoundException) {
logger.trace("could not retrieve user because security index does not exist", t);
logger.trace("could not retrieve user [{}] because security index does not exist", t, user);
} else {
logger.info("failed to retrieve user", t);
logger.error("failed to retrieve user [{}]", t, user);
}
// We don't invoke the onFailure listener here, instead
// we call the response with a null user
@ -281,10 +285,10 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
}
});
} catch (IndexNotFoundException infe) {
logger.trace("could not retrieve user because security index does not exist");
logger.trace("could not retrieve user [{}] because security index does not exist", user);
listener.onResponse(null);
} catch (Exception e) {
logger.error("unable to retrieve user", e);
logger.error("unable to retrieve user [{}]", e, user);
listener.onFailure(e);
}
}
@ -501,6 +505,22 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
listeners.add(listener);
}
private void clearScrollResponse(String scrollId) {
ClearScrollRequest clearScrollRequest = client.prepareClearScroll().addScrollId(scrollId).request();
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
// cool, it cleared, we don't really care though...
}
@Override
public void onFailure(Throwable t) {
// Not really much to do here except for warn about it...
logger.warn("failed to clear scroll [{}]", t, scrollId);
}
});
}
private <Response> void clearRealmCache(String username, ActionListener<Response> listener, Response response) {
SecurityClient securityClient = new SecurityClient(client);
ClearRealmCacheRequest request = securityClient.prepareClearRealmCache()
@ -565,7 +585,7 @@ public class ESNativeUsersStore extends AbstractComponent implements ClusterStat
Map<String, Object> metadata = (Map<String, Object>) sourceMap.get(User.Fields.METADATA.getPreferredName());
return new UserAndPassword(new User(username, roles, fullName, email, metadata), password.toCharArray());
} catch (Exception e) {
logger.error("error in the format of get response for user", e);
logger.error("error in the format of data for user [{}]", e, username);
return null;
}
}

View File

@ -10,15 +10,19 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.shield.support.Validation;
import org.elasticsearch.xpack.common.xcontent.XContentUtils;
import java.io.IOException;
@ -132,48 +136,69 @@ public class RoleDescriptor implements ToXContent {
out.writeStringArray(descriptor.runAs);
}
public static RoleDescriptor parse(String name, BytesReference source) throws Exception {
public static RoleDescriptor parse(String name, BytesReference source) throws IOException {
assert name != null;
try (XContentParser parser = XContentHelper.createParser(source)) {
XContentParser.Token token = parser.nextToken(); // advancing to the START_OBJECT token
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse role [{}]. expected an object but found [{}] instead", name, token);
}
String currentFieldName = null;
IndicesPrivileges[] indicesPrivileges = null;
String[] clusterPrivileges = null;
String[] runAsUsers = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INDICES)) {
indicesPrivileges = parseIndices(name, parser);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.RUN_AS)) {
runAsUsers = XContentUtils.readStringArray(parser, true);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLUSTER)) {
clusterPrivileges = XContentUtils.readStringArray(parser, true);
} else {
throw new ElasticsearchParseException("failed to parse role [{}]. unexpected field [{}]", name, currentFieldName);
}
}
return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAsUsers);
return parse(name, parser);
}
}
private static RoleDescriptor.IndicesPrivileges[] parseIndices(String roleName, XContentParser parser) throws Exception {
public static RoleDescriptor parse(String name, XContentParser parser) throws IOException {
// validate name
Validation.Error validationError = Validation.Roles.validateRoleName(name);
if (validationError != null) {
ValidationException ve = new ValidationException();
ve.addValidationError(validationError.toString());
throw ve;
}
// advance to the START_OBJECT token if needed
XContentParser.Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse role [{}]. expected an object but found [{}] instead", name, token);
}
String currentFieldName = null;
IndicesPrivileges[] indicesPrivileges = null;
String[] clusterPrivileges = null;
String[] runAsUsers = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INDICES)) {
indicesPrivileges = parseIndices(name, parser);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.RUN_AS)) {
runAsUsers = readStringArray(name, parser, true);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLUSTER)) {
clusterPrivileges = readStringArray(name, parser, true);
} else {
throw new ElasticsearchParseException("failed to parse role [{}]. unexpected field [{}]", name, currentFieldName);
}
}
return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAsUsers);
}
private static String[] readStringArray(String roleName, XContentParser parser, boolean allowNull) throws IOException {
try {
return XContentUtils.readStringArray(parser, allowNull);
} catch (ElasticsearchParseException e) {
// re-wrap in order to add the role name
throw new ElasticsearchParseException("failed to parse role [{}]", e, roleName);
}
}
private static RoleDescriptor.IndicesPrivileges[] parseIndices(String roleName, XContentParser parser) throws IOException {
if (parser.currentToken() != XContentParser.Token.START_ARRAY) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] value " +
"to be an array, but found [{}] instead", roleName, parser.currentName(), parser.currentToken());
}
List<RoleDescriptor.IndicesPrivileges> privileges = new ArrayList<>();
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
privileges.add(parseIndex(roleName, parser));
}
return privileges.toArray(new IndicesPrivileges[privileges.size()]);
}
private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XContentParser parser) throws Exception {
private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] value to " +
@ -191,7 +216,7 @@ public class RoleDescriptor implements ToXContent {
if (token == XContentParser.Token.VALUE_STRING) {
names = new String[] { parser.text() };
} else if (token == XContentParser.Token.START_ARRAY) {
names = XContentUtils.readStringArray(parser, false);
names = readStringArray(roleName, parser, false);
if (names.length == 0) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. [{}] cannot be an empty " +
"array", roleName, currentFieldName);
@ -201,15 +226,21 @@ public class RoleDescriptor implements ToXContent {
"value to be a string or an array of strings, but found [{}] instead", roleName, currentFieldName, token);
}
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.QUERY)) {
query = parser.textOrNull();
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder builder = JsonXContent.contentBuilder();
XContentHelper.copyCurrentStructure(builder.generator(), parser);
query = builder.string();
} else {
query = parser.textOrNull();
}
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.PRIVILEGES)) {
privileges = XContentUtils.readStringArray(parser, false);
privileges = readStringArray(roleName, parser, true);
if (names.length == 0) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. [{}] cannot be an empty " +
"array", roleName, currentFieldName);
}
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.FIELDS)) {
fields = XContentUtils.readStringArray(parser, true);
fields = readStringArray(roleName, parser, true);
} else {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. unexpected field [{}]",
roleName, currentFieldName);

View File

@ -44,7 +44,6 @@ import org.elasticsearch.shield.action.role.ClearRolesCacheRequest;
import org.elasticsearch.shield.action.role.ClearRolesCacheResponse;
import org.elasticsearch.shield.action.role.DeleteRoleRequest;
import org.elasticsearch.shield.action.role.PutRoleRequest;
import org.elasticsearch.shield.authc.AuthenticationService;
import org.elasticsearch.shield.authz.RoleDescriptor;
import org.elasticsearch.shield.authz.permission.Role;
import org.elasticsearch.shield.authz.store.RolesStore;
@ -52,6 +51,7 @@ import org.elasticsearch.shield.client.SecurityClient;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
@ -101,8 +101,7 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
private volatile boolean shieldIndexExists = false;
@Inject
public ESNativeRolesStore(Settings settings, Provider<InternalClient> clientProvider,
Provider<AuthenticationService> authProvider, ThreadPool threadPool) {
public ESNativeRolesStore(Settings settings, Provider<InternalClient> clientProvider, ThreadPool threadPool) {
super(settings);
this.clientProvider = clientProvider;
this.threadPool = threadPool;
@ -192,8 +191,12 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
// This function is MADNESS! But it works, don't think about it too hard...
client.search(request, new ActionListener<SearchResponse>() {
private SearchResponse lastResponse = null;
@Override
public void onResponse(SearchResponse resp) {
lastResponse = resp;
boolean hasHits = resp.getHits().getHits().length > 0;
if (hasHits) {
for (SearchHit hit : resp.getHits().getHits()) {
@ -206,19 +209,9 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
.setScroll(scrollKeepAlive).request();
client.searchScroll(scrollRequest, this);
} else {
ClearScrollRequest clearScrollRequest = client.prepareClearScroll().addScrollId(resp.getScrollId()).request();
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
// cool, it cleared, we don't really care though...
}
@Override
public void onFailure(Throwable t) {
// Not really much to do here except for warn about it...
logger.warn("failed to clear scroll after retrieving all roles", t);
}
});
if (resp.getScrollId() != null) {
clearScollRequest(resp.getScrollId());
}
// Finally, return the list of users
listener.onResponse(Collections.unmodifiableList(roles));
}
@ -226,18 +219,22 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
@Override
public void onFailure(Throwable t) {
// attempt to clear the scroll request
if (lastResponse != null && lastResponse.getScrollId() != null) {
clearScollRequest(lastResponse.getScrollId());
}
if (t instanceof IndexNotFoundException) {
logger.trace("could not retrieve roles because security index does not exist");
// since this is expected to happen at times, we just call the listener with an empty list
listener.onResponse(Collections.<RoleDescriptor>emptyList());
} else {
logger.info("failed to retrieve roles", t);
listener.onFailure(t);
}
// We don't invoke the onFailure listener here, instead
// we call the response with an empty list
listener.onResponse(Collections.emptyList());
}
});
} catch (Exception e) {
logger.error("unable to retrieve roles", e);
logger.error("unable to retrieve roles {}", e, Arrays.toString(names));
listener.onFailure(e);
}
}
@ -283,7 +280,7 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
public void putRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener<Boolean> listener) {
if (state() != State.STARTED) {
logger.trace("attempted to put role before service was started");
logger.trace("attempted to put role [{}] before service was started", request.name());
listener.onResponse(false);
}
try {
@ -302,12 +299,12 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
@Override
public void onFailure(Throwable e) {
logger.error("failed to put role to the index", e);
logger.error("failed to put role [{}]", e, request.name());
listener.onFailure(e);
}
});
} catch (Exception e) {
logger.error("unable to put role", e);
logger.error("unable to put role [{}]", e, request.name());
listener.onFailure(e);
}
@ -336,14 +333,18 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
@Override
public void onFailure(Throwable t) {
logger.info("failed to retrieve role", t);
if (t instanceof IndexNotFoundException) {
logger.trace("failed to retrieve role [{}] since security index does not exist", t, roleId);
} else {
logger.error("failed to retrieve role [{}]", t, roleId);
}
}
}, latch));
try {
latch.await(30, TimeUnit.SECONDS);
} catch (InterruptedException e) {
logger.info("timed out retrieving role");
logger.error("timed out retrieving role [{}]", roleId);
}
GetResponse response = getRef.get();
@ -371,7 +372,7 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
GetRequest request = client.prepareGet(ShieldTemplateService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, role).request();
client.get(request, listener);
} catch (IndexNotFoundException e) {
logger.trace("security index does not exist", e);
logger.trace("unable to retrieve role [{}] since security index does not exist", e, role);
listener.onResponse(new GetResponse(
new GetResult(ShieldTemplateService.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, role, -1, false, null, null)));
} catch (Exception e) {
@ -380,6 +381,22 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
}
}
private void clearScollRequest(final String scrollId) {
ClearScrollRequest clearScrollRequest = client.prepareClearScroll().addScrollId(scrollId).request();
client.clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
@Override
public void onResponse(ClearScrollResponse response) {
// cool, it cleared, we don't really care though...
}
@Override
public void onFailure(Throwable t) {
// Not really much to do here except for warn about it...
logger.warn("failed to clear scroll [{}] after retrieving roles", t, scrollId);
}
});
}
// FIXME hack for testing
public void reset() {
final State state = state();
@ -452,7 +469,7 @@ public class ESNativeRolesStore extends AbstractComponent implements RolesStore,
try {
return RoleDescriptor.parse(name, sourceBytes);
} catch (Exception e) {
logger.warn("unable to deserialize role from response", e);
logger.error("error in the format of data for role [{}]", e, name);
return null;
}
}

View File

@ -78,6 +78,8 @@ public class DefaultIndicesAndAliasesResolver implements IndicesAndAliasesResolv
* the list of indices in there, if we do so it will result in an invalid request and the update will fail.
*/
indices = Collections.singleton(((PutMappingRequest) indicesRequest).getConcreteIndex().getName());
assert indicesRequest.indices() == null || indicesRequest.indices().length == 0
: "indices are: " + Arrays.toString(indicesRequest.indices()); // Arrays.toString() can handle null values - all good
} else {
if (indicesRequest.indicesOptions().expandWildcardsOpen() || indicesRequest.indicesOptions().expandWildcardsClosed()) {
if (indicesRequest instanceof IndicesRequest.Replaceable) {

View File

@ -68,7 +68,11 @@ public class Role extends GlobalPermission {
private Builder(RoleDescriptor rd) {
this.name = rd.getName();
this.cluster(ClusterPrivilege.get((new Privilege.Name(rd.getClusterPrivileges()))));
if (rd.getClusterPrivileges().length == 0) {
cluster = ClusterPermission.Core.NONE;
} else {
this.cluster(ClusterPrivilege.get((new Privilege.Name(rd.getClusterPrivileges()))));
}
for (RoleDescriptor.IndicesPrivileges iGroup : rd.getIndicesPrivileges()) {
this.add(iGroup.getFields() == null ? null : Arrays.asList(iGroup.getFields()),
iGroup.getQuery(),

View File

@ -6,8 +6,8 @@
package org.elasticsearch.shield.authz.privilege;
import dk.brics.automaton.Automaton;
import dk.brics.automaton.BasicAutomata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.shield.support.Automatons;
import java.util.Locale;
import java.util.Set;
@ -15,16 +15,30 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.function.Predicate;
import static org.elasticsearch.shield.support.Automatons.minusAndDeterminize;
import static org.elasticsearch.shield.support.Automatons.patterns;
/**
*
*/
public class ClusterPrivilege extends AbstractAutomatonPrivilege<ClusterPrivilege> {
public static final ClusterPrivilege NONE = new ClusterPrivilege(Name.NONE, BasicAutomata.makeEmpty());
public static final ClusterPrivilege ALL = new ClusterPrivilege(Name.ALL, "cluster:*", "indices:admin/template/*");
public static final ClusterPrivilege MONITOR = new ClusterPrivilege("monitor", "cluster:monitor/*");
public static final ClusterPrivilege MANAGE_SHIELD = new ClusterPrivilege("manage_shield", "cluster:admin/shield/*");
public static final ClusterPrivilege MANAGE_PIPELINE = new ClusterPrivilege("manage_pipeline", "cluster:admin/ingest/pipeline/*");
// shared automatons
private static final Automaton MANAGE_SECURITY_AUTOMATON = patterns("cluster:admin/xpack/security/*");
private static final Automaton MONITOR_AUTOMATON = patterns("cluster:monitor/*");
private static final Automaton ALL_CLUSTER_AUTOMATON = patterns("cluster:*", "indices:admin/template/*");
private static final Automaton MANAGE_AUTOMATON = minusAndDeterminize(ALL_CLUSTER_AUTOMATON, MANAGE_SECURITY_AUTOMATON);
private static final Automaton TRANSPORT_CLIENT_AUTOMATON = patterns("cluster:monitor/nodes/liveness", "cluster:monitor/state");
private static final Automaton MANAGE_IDX_TEMPLATE_AUTOMATON = patterns("indices:admin/template/*");
public static final ClusterPrivilege NONE = new ClusterPrivilege(Name.NONE, Automatons.EMPTY);
public static final ClusterPrivilege ALL = new ClusterPrivilege(Name.ALL, ALL_CLUSTER_AUTOMATON);
public static final ClusterPrivilege MONITOR = new ClusterPrivilege("monitor", MONITOR_AUTOMATON);
public static final ClusterPrivilege MANAGE = new ClusterPrivilege("manage", MANAGE_AUTOMATON);
public static final ClusterPrivilege MANAGE_IDX_TEMPLATES =
new ClusterPrivilege("manage_index_templates", MANAGE_IDX_TEMPLATE_AUTOMATON);
public static final ClusterPrivilege TRANSPORT_CLIENT = new ClusterPrivilege("transport_client", TRANSPORT_CLIENT_AUTOMATON);
public static final ClusterPrivilege MANAGE_SECURITY = new ClusterPrivilege("manage_security", MANAGE_SECURITY_AUTOMATON);
public final static Predicate<String> ACTION_MATCHER = ClusterPrivilege.ALL.predicate();
@ -34,8 +48,10 @@ public class ClusterPrivilege extends AbstractAutomatonPrivilege<ClusterPrivileg
values.add(NONE);
values.add(ALL);
values.add(MONITOR);
values.add(MANAGE_SHIELD);
values.add(MANAGE_PIPELINE);
values.add(MANAGE);
values.add(MANAGE_IDX_TEMPLATES);
values.add(TRANSPORT_CLIENT);
values.add(MANAGE_SECURITY);
}
static Set<ClusterPrivilege> values() {
@ -48,8 +64,8 @@ public class ClusterPrivilege extends AbstractAutomatonPrivilege<ClusterPrivileg
super(name, patterns);
}
private ClusterPrivilege(Name name, String... patterns) {
super(name, patterns);
private ClusterPrivilege(String name, Automaton automaton) {
super(new Name(name), automaton);
}
private ClusterPrivilege(Name name, Automaton automaton) {

View File

@ -6,14 +6,21 @@
package org.elasticsearch.shield.authz.privilege;
import dk.brics.automaton.Automaton;
import dk.brics.automaton.BasicAutomata;
import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction;
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction;
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
import org.elasticsearch.action.get.GetAction;
import org.elasticsearch.action.get.MultiGetAction;
import org.elasticsearch.action.search.MultiSearchAction;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.suggest.SuggestAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction;
import org.elasticsearch.action.admin.indices.exists.types.TypesExistsAction;
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction;
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryAction;
import org.elasticsearch.common.Strings;
import org.elasticsearch.shield.support.Automatons;
import java.util.Locale;
import java.util.Set;
@ -21,29 +28,41 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.function.Predicate;
import static org.elasticsearch.shield.support.Automatons.patterns;
import static org.elasticsearch.shield.support.Automatons.unionAndDeterminize;
/**
*
*/
public class IndexPrivilege extends AbstractAutomatonPrivilege<IndexPrivilege> {
public static final IndexPrivilege NONE = new IndexPrivilege(Name.NONE, BasicAutomata.makeEmpty());
public static final IndexPrivilege ALL = new IndexPrivilege(Name.ALL, "indices:*");
public static final IndexPrivilege MANAGE = new IndexPrivilege("manage", "indices:monitor/*", "indices:admin/*");
public static final IndexPrivilege CREATE_INDEX = new IndexPrivilege("create_index", CreateIndexAction.NAME);
public static final IndexPrivilege MANAGE_ALIASES = new IndexPrivilege("manage_aliases", "indices:admin/aliases*");
public static final IndexPrivilege MONITOR = new IndexPrivilege("monitor", "indices:monitor/*");
public static final IndexPrivilege DATA_ACCESS = new IndexPrivilege("data_access", "indices:data/*", "indices:admin/mapping/put");
public static final IndexPrivilege CRUD =
new IndexPrivilege("crud", "indices:data/write/*", "indices:data/read/*", "indices:admin/mapping/put");
public static final IndexPrivilege READ = new IndexPrivilege("read", "indices:data/read/*");
public static final IndexPrivilege SEARCH =
new IndexPrivilege("search", SearchAction.NAME + "*", MultiSearchAction.NAME + "*", SuggestAction.NAME + "*");
public static final IndexPrivilege GET = new IndexPrivilege("get", GetAction.NAME + "*", MultiGetAction.NAME + "*");
public static final IndexPrivilege SUGGEST = new IndexPrivilege("suggest", SuggestAction.NAME + "*");
public static final IndexPrivilege INDEX =
new IndexPrivilege("index", "indices:data/write/index*", "indices:data/write/update*", "indices:admin/mapping/put");
public static final IndexPrivilege DELETE = new IndexPrivilege("delete", "indices:data/write/delete*");
public static final IndexPrivilege WRITE = new IndexPrivilege("write", "indices:data/write/*", "indices:admin/mapping/put");
private static final Automaton ALL_AUTOMATON = patterns("indices:*");
private static final Automaton READ_AUTOMATON = patterns("indices:data/read/*");
private static final Automaton CREATE_AUTOMATON = patterns("indices:data/write/index*", PutMappingAction.NAME);
private static final Automaton INDEX_AUTOMATON =
patterns("indices:data/write/index*", "indices:data/write/update*", PutMappingAction.NAME);
private static final Automaton DELETE_AUTOMATON = patterns("indices:data/write/delete*");
private static final Automaton WRITE_AUTOMATON = patterns("indices:data/write/*", PutMappingAction.NAME);
private static final Automaton MONITOR_AUTOMATON = patterns("indices:monitor/*");
private static final Automaton MANAGE_AUTOMATON = unionAndDeterminize(MONITOR_AUTOMATON, patterns("indices:admin/*"));
private static final Automaton CREATE_INDEX_AUTOMATON = patterns(CreateIndexAction.NAME);
private static final Automaton DELETE_INDEX_AUTOMATON = patterns(DeleteIndexAction.NAME);
private static final Automaton VIEW_METADATA_AUTOMATON = patterns(GetAliasesAction.NAME, AliasesExistAction.NAME,
GetIndexAction.NAME, IndicesExistsAction.NAME, GetFieldMappingsAction.NAME, GetMappingsAction.NAME,
ClusterSearchShardsAction.NAME, TypesExistsAction.NAME, ValidateQueryAction.NAME, GetSettingsAction.NAME);
public static final IndexPrivilege NONE = new IndexPrivilege(Name.NONE, Automatons.EMPTY);
public static final IndexPrivilege ALL = new IndexPrivilege(Name.ALL, ALL_AUTOMATON);
public static final IndexPrivilege READ = new IndexPrivilege("read", READ_AUTOMATON);
public static final IndexPrivilege CREATE = new IndexPrivilege("create", CREATE_AUTOMATON);
public static final IndexPrivilege INDEX = new IndexPrivilege("index", INDEX_AUTOMATON);
public static final IndexPrivilege DELETE = new IndexPrivilege("delete", DELETE_AUTOMATON);
public static final IndexPrivilege WRITE = new IndexPrivilege("write", WRITE_AUTOMATON);
public static final IndexPrivilege MONITOR = new IndexPrivilege("monitor", MONITOR_AUTOMATON);
public static final IndexPrivilege MANAGE = new IndexPrivilege("manage", MANAGE_AUTOMATON);
public static final IndexPrivilege DELETE_INDEX = new IndexPrivilege("delete_index", DELETE_INDEX_AUTOMATON);
public static final IndexPrivilege CREATE_INDEX = new IndexPrivilege("create_index", CREATE_INDEX_AUTOMATON);
public static final IndexPrivilege VIEW_METADATA = new IndexPrivilege("view_index_metadata", VIEW_METADATA_AUTOMATON);
private static final Set<IndexPrivilege> values = new CopyOnWriteArraySet<>();
@ -52,17 +71,14 @@ public class IndexPrivilege extends AbstractAutomatonPrivilege<IndexPrivilege> {
values.add(ALL);
values.add(MANAGE);
values.add(CREATE_INDEX);
values.add(MANAGE_ALIASES);
values.add(MONITOR);
values.add(DATA_ACCESS);
values.add(CRUD);
values.add(READ);
values.add(SEARCH);
values.add(GET);
values.add(SUGGEST);
values.add(INDEX);
values.add(DELETE);
values.add(WRITE);
values.add(CREATE);
values.add(DELETE_INDEX);
values.add(VIEW_METADATA);
}
public static final Predicate<String> ACTION_MATCHER = ALL.predicate();
@ -78,8 +94,8 @@ public class IndexPrivilege extends AbstractAutomatonPrivilege<IndexPrivilege> {
super(name, patterns);
}
private IndexPrivilege(Name name, String... patterns) {
super(name, patterns);
private IndexPrivilege(String name, Automaton automaton) {
super(new Name(name), automaton);
}
private IndexPrivilege(Name name, Automaton automaton) {

View File

@ -7,28 +7,20 @@ package org.elasticsearch.shield.authz.store;
import com.fasterxml.jackson.dataformat.yaml.snakeyaml.error.YAMLException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.common.xcontent.yaml.YamlXContent;
import org.elasticsearch.env.Environment;
import org.elasticsearch.shield.Shield;
import org.elasticsearch.shield.SystemUser;
import org.elasticsearch.shield.XPackUser;
import org.elasticsearch.shield.authc.support.RefreshListener;
import org.elasticsearch.shield.authz.RoleDescriptor;
import org.elasticsearch.shield.authz.permission.Role;
import org.elasticsearch.shield.authz.privilege.ClusterPrivilege;
import org.elasticsearch.shield.authz.privilege.GeneralPrivilege;
import org.elasticsearch.shield.authz.privilege.IndexPrivilege;
import org.elasticsearch.shield.authz.privilege.Privilege;
import org.elasticsearch.shield.support.NoOpLogger;
import org.elasticsearch.shield.support.Validation;
import org.elasticsearch.watcher.FileChangesListener;
@ -41,10 +33,7 @@ import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -59,7 +48,6 @@ import static java.util.Collections.unmodifiableMap;
*/
public class FileRolesStore extends AbstractLifecycleComponent<RolesStore> implements RolesStore {
private static final Pattern COMMA_DELIM = Pattern.compile("\\s*,\\s*");
private static final Pattern IN_SEGMENT_LINE = Pattern.compile("^\\s+.+");
private static final Pattern SKIP_LINE = Pattern.compile("(^#.*|^\\s*)");
@ -174,226 +162,40 @@ public class FileRolesStore extends AbstractLifecycleComponent<RolesStore> imple
return null;
}
Role.Builder role = Role.builder(roleName);
if (resolvePermissions == false) {
return role.build();
return Role.builder(roleName).build();
}
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("cluster".equals(currentFieldName)) {
Privilege.Name name = null;
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] names = COMMA_DELIM.split(namesStr);
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_ARRAY) {
Set<String> names = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
}
}
if (!names.isEmpty()) {
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.VALUE_NULL) {
continue;
} else {
logger.error("invalid role definition [{}] in roles file [{}]. [cluster] field value can either " +
"be a string or a list of strings, but [{}] was found instead. skipping role...",
roleName, path.toAbsolutePath(), token);
return null;
}
if (name != null) {
try {
role.cluster(ClusterPrivilege.get(name));
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not resolve cluster " +
"privileges [{}]. skipping role...", roleName, path.toAbsolutePath(), name);
return null;
}
}
} else if ("indices".equals(currentFieldName)) {
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (Strings.hasLength(currentFieldName)) {
String[] indices = COMMA_DELIM.split(currentFieldName);
Privilege.Name name = null;
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] names = COMMA_DELIM.split(parser.text());
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_ARRAY) {
Set<String> names = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
} else {
logger.error("invalid role definition [{}] in roles file [{}]. could not parse " +
"[{}] as index privilege. privilege names must be strings. skipping " +
"role...", roleName, path.toAbsolutePath(), token);
return null;
}
}
if (!names.isEmpty()) {
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_OBJECT) {
List<String> fields = null;
BytesReference query = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("fields".equals(currentFieldName)) {
if (token == XContentParser.Token.START_ARRAY) {
fields = (List) parser.list();
} else if (token.isValue()) {
String field = parser.text();
if (field.trim().isEmpty()) {
// The yaml parser doesn't emit null token if the key is empty...
fields = Collections.emptyList();
} else {
fields = Collections.singletonList(field);
}
} else if (token == XContentParser.Token.VALUE_NULL) {
fields = Collections.emptyList();
}
} else if ("query".equals(currentFieldName)) {
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder builder = JsonXContent.contentBuilder();
XContentHelper.copyCurrentStructure(builder.generator(), parser);
query = builder.bytes();
} else if (token == XContentParser.Token.VALUE_STRING) {
query = new BytesArray(parser.text());
}
} else if ("privileges".equals(currentFieldName)) {
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] names = COMMA_DELIM.split(parser.text());
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_ARRAY) {
Set<String> names = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
} else {
logger.error("invalid role definition [{}] in roles file [{}]. " +
"could not parse [{}] as index privilege. privilege " +
"names must be strings. skipping role...", roleName,
path.toAbsolutePath(), token);
return null;
}
}
if (!names.isEmpty()) {
name = new Privilege.Name(names);
}
}
}
}
if (name != null) {
if ((query != null || (fields != null && fields.isEmpty() == false)) &&
Shield.flsDlsEnabled(settings) == false) {
logger.error("invalid role definition [{}] in roles file [{}]. " +
"document and field level security is not enabled. " +
"set [{}] to [true] in the configuration file. skipping role...",
roleName, path.toAbsolutePath(),
XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE));
return null;
}
RoleDescriptor descriptor = RoleDescriptor.parse(roleName, parser);
try {
role.add(fields, query, IndexPrivilege.get(name), indices);
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not " +
"resolve indices privileges [{}]. skipping role...", roleName,
path.toAbsolutePath(), name);
return null;
}
}
continue;
} else if (token == XContentParser.Token.VALUE_NULL) {
continue;
} else {
logger.error("invalid role definition [{}] in roles file [{}]. " +
"could not parse [{}] as index privileges. privilege lists must either " +
"be a comma delimited string or an array of strings. skipping role...", roleName,
path.toAbsolutePath(), token);
return null;
}
if (name != null) {
try {
role.add(IndexPrivilege.get(name), indices);
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not resolve " +
"indices privileges [{}]. skipping role...", roleName, path.toAbsolutePath(),
name);
return null;
}
}
}
}
} else {
logger.error("invalid role definition [{}] in roles file [{}]. [indices] field value must be an array" +
" of indices-privileges mappings defined as a string" +
" in the form <comma-separated list of index name patterns>::<comma-separated list of" +
" privileges> , but [{}] was found instead. skipping role...",
roleName, path.toAbsolutePath(), token);
return null;
}
} else if ("run_as".equals(currentFieldName)) {
Set<String> names = new HashSet<>();
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] namesArr = COMMA_DELIM.split(namesStr);
names.addAll(Arrays.asList(namesArr));
}
} else if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
}
}
} else {
logger.error("invalid role definition [{}] in roles file [{}]. [run_as] field value can either " +
"be a string or a list of strings, but [{}] was found instead. skipping role...",
roleName, path.toAbsolutePath(), token);
return null;
}
if (!names.isEmpty()) {
Privilege.Name name = new Privilege.Name(names);
try {
role.runAs(new GeneralPrivilege(new Privilege.Name(names),
names.toArray(new String[names.size()])));
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not resolve run_as " +
"privileges [{}]. skipping role...", roleName, path.toAbsolutePath(), name);
return null;
}
}
} else {
logger.warn("unknown field [{}] found in role definition [{}] in roles file [{}]", currentFieldName,
roleName, path.toAbsolutePath());
// first check if FLS/DLS is enabled on the role...
for (RoleDescriptor.IndicesPrivileges privilege : descriptor.getIndicesPrivileges()) {
if ((privilege.getQuery() != null || privilege.getFields() != null)
&& Shield.flsDlsEnabled(settings) == false) {
logger.error("invalid role definition [{}] in roles file [{}]. document and field level security is not " +
"enabled. set [{}] to [true] in the configuration file. skipping role...", roleName, path
.toAbsolutePath(), XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE));
return null;
}
}
return role.build();
return Role.builder(descriptor).build();
} else {
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath());
return null;
}
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath());
}
}
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath());
} catch (ElasticsearchParseException e) {
assert roleName != null;
if (logger.isDebugEnabled()) {
logger.debug("parsing exception for role [{}]", e, roleName);
} else {
logger.error(e.getMessage() + ". skipping role...");
}
} catch (YAMLException | IOException e) {
if (roleName != null) {
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", e, roleName, path);

View File

@ -24,6 +24,8 @@ import static dk.brics.automaton.MinimizationOperations.minimize;
*/
public final class Automatons {
public static final Automaton EMPTY = BasicAutomata.makeEmpty();
static final char WILDCARD_STRING = '*'; // String equality with support for wildcards
static final char WILDCARD_CHAR = '?'; // Char equality with support for wildcards
static final char WILDCARD_ESCAPE = '\\'; // Escape character

View File

@ -7,7 +7,6 @@ package org.elasticsearch.shield.transport;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
@ -23,13 +22,11 @@ public class ShieldClientTransportService extends TransportService {
private final ClientTransportFilter clientFilter;
@Inject
public ShieldClientTransportService(Settings settings, Transport transport, ThreadPool threadPool, ClientTransportFilter clientFilter,
NamedWriteableRegistry namedWriteableRegistry) {
super(settings, transport, threadPool, namedWriteableRegistry);
public ShieldClientTransportService(Settings settings, Transport transport, ThreadPool threadPool, ClientTransportFilter clientFilter) {
super(settings, transport, threadPool);
this.clientFilter = clientFilter;
}
@Override
public <T extends TransportResponse> void sendRequest(DiscoveryNode node, String action, TransportRequest request,
TransportRequestOptions options, TransportResponseHandler<T> handler) {

View File

@ -63,9 +63,8 @@ public class ShieldServerTransportService extends TransportService {
AuthorizationService authzService,
ShieldActionMapper actionMapper,
ClientTransportFilter clientTransportFilter,
ShieldLicenseState licenseState,
NamedWriteableRegistry namedWriteableRegistry) {
super(settings, transport, threadPool, namedWriteableRegistry);
ShieldLicenseState licenseState) {
super(settings, transport, threadPool);
this.authcService = authcService;
this.authzService = authzService;
this.actionMapper = actionMapper;

View File

@ -8,11 +8,13 @@ package org.elasticsearch.bench;
import org.elasticsearch.common.Randomness;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.metrics.MeanMetric;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.shield.authc.support.Hasher;
import org.elasticsearch.shield.authc.support.SecuredString;
@SuppressForbidden(reason = "benchmark")
public class HasherBenchmark {
private static final int WARMING_ITERS = 1000;
@ -63,6 +65,7 @@ public class HasherBenchmark {
return metrics;
}
@SuppressForbidden(reason = "benchmark")
private static class Metrics {
final String name;

View File

@ -28,14 +28,15 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase {
public static final String ROLES =
"role_a:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
"\n" +
"role_b:\n" +
" cluster: monitor\n" +
" cluster: [ monitor ]\n" +
"\n" +
"role_c:\n" +
" indices:\n" +
" 'someindex': all\n";
" - names: 'someindex'\n" +
" privileges: [ all ]\n";
public static final String USERS =
"user_a:" + USERS_PASSWD_HASHED + "\n" +

View File

@ -52,25 +52,25 @@ public class DocumentAndFieldLevelSecurityTests extends ShieldIntegTestCase {
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field1 ]\n" +
" query: '{\"term\" : {\"field1\" : \"value1\"}}'\n" +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field2\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field2 ]\n" +
" query: '{\"term\" : {\"field2\" : \"value2\"}}'\n" +
"role3:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field1 ]\n" +
" query: '{\"term\" : {\"field2\" : \"value2\"}}'\n";
}

View File

@ -60,10 +60,11 @@ public class DocumentLevelSecurityRandomTests extends ShieldIntegTestCase {
builder.append('\n');
for (int i = 1; i <= numberOfRoles; i++) {
builder.append("role").append(i).append(":\n");
builder.append(" cluster: all\n");
builder.append(" cluster: [ all ]\n");
builder.append(" indices:\n");
builder.append(" '*':\n");
builder.append(" privileges: ALL\n");
builder.append(" - names: '*'\n");
builder.append(" privileges:\n");
builder.append(" - all\n");
builder.append(" query: \n");
builder.append(" term: \n");
builder.append(" field1: value").append(i).append('\n');

View File

@ -73,18 +73,20 @@ public class DocumentLevelSecurityTests extends ShieldIntegTestCase {
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster:\n" +
" - all\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" - names: '*'\n" +
" privileges:\n" +
" - all\n" +
" query: \n" +
" term: \n" +
" field1: value1\n" +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" query: '{\"term\" : {\"field2\" : \"value2\"}}'"; // <-- query defined as json in a string
}

View File

@ -80,30 +80,32 @@ public class FieldLevelSecurityRandomTests extends ShieldIntegTestCase {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields:\n" + roleFields.toString() +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +roleFields.toString() +
"role2:\n" +
" cluster: all\n" +
" cluster:\n" +
" - all\n" +
" indices:\n" +
" test:\n" +
" privileges: ALL\n" +
" - names: test\n" +
" privileges:\n" +
" - all\n" +
" fields:\n" +
" - field1\n" +
"role3:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" test:\n" +
" privileges: ALL\n" +
" - names: test\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field2\n" +
"role4:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" test:\n" +
" privileges: ALL\n" +
" - names: test\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field3\n";
}

View File

@ -80,41 +80,42 @@ public class FieldLevelSecurityTests extends ShieldIntegTestCase {
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field1 ]\n" +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field2\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field2 ]\n" +
"role3:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: \n" +
" - field1\n" +
" - field2\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field1\n" +
" - field2\n" +
"role4:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields:\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: []\n" +
"role5:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*': ALL\n" +
" - names: '*'\n" +
" privileges: [ALL]\n" +
"role6:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: 'field*'\n";
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ 'field*' ]\n";
}
@Override

View File

@ -29,56 +29,71 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase {
public static final String ROLES =
"all_cluster_role:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
"all_indices_role:\n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]\n" +
"all_a_role:\n" +
" indices:\n" +
" 'a': all\n" +
" - names: 'a'\n" +
" privileges: [ all ]\n" +
"read_a_role:\n" +
" indices:\n" +
" 'a': read\n" +
" - names: 'a'\n" +
" privileges: [ read ]\n" +
"write_a_role:\n" +
" indices:\n" +
" 'a': write\n" +
" - names: 'a'\n" +
" privileges: [ write ]\n" +
"read_ab_role:\n" +
" indices:\n" +
" 'a': read\n" +
" 'b': read\n" +
" - names: [ 'a', 'b' ]\n" +
" privileges: [ read ]\n" +
"get_b_role:\n" +
" indices:\n" +
" 'b': get\n" +
" - names: 'b'\n" +
" privileges: [ get ]\n" +
"search_b_role:\n" +
" indices:\n" +
" 'b': search\n" +
" - names: 'b'\n" +
" privileges: [ search ]\n" +
"all_regex_ab_role:\n" +
" indices:\n" +
" '/a|b/': all\n" +
" - names: '/a|b/'\n" +
" privileges: [ all ]\n" +
"manage_starts_with_a_role:\n" +
" indices:\n" +
" 'a*': manage\n" +
" - names: 'a*'\n" +
" privileges: [ manage ]\n" +
"data_access_all_role:\n" +
" indices:\n" +
" '*': data_access\n" +
" - names: '*'\n" +
" privileges: [ data_access ]\n" +
"create_c_role:\n" +
" indices:\n" +
" 'c': create_index\n" +
" - names: 'c'\n" +
" privileges: [ create_index ]\n" +
"monitor_b_role:\n" +
" indices:\n" +
" 'b': monitor\n" +
" - names: 'b'\n" +
" privileges: [ monitor ]\n" +
"crud_a_role:\n" +
" indices:\n" +
" 'a': crud\n" +
" - names: 'a'\n" +
" privileges: [ crud ]\n" +
"delete_b_role:\n" +
" indices:\n" +
" 'b': delete\n" +
" - names: 'b'\n" +
" privileges: [ delete ]\n" +
"index_a_role:\n" +
" indices:\n" +
" 'a': index\n" +
" - names: 'a'\n" +
" privileges: [ index ]\n" +
"search_a_role:\n" +
" indices:\n" +
" 'a': search\n" +
" - names: 'a'\n" +
" privileges: [ search ]\n" +
"\n";
public static final String USERS =

View File

@ -44,17 +44,17 @@ public class IndicesPermissionsWithAliasesWildcardsAndRegexsTests extends Shield
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" 't*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" 'my_alias':\n" +
" privileges: ALL\n" +
" fields: field2\n" +
" '/an_.*/':\n" +
" privileges: ALL\n" +
" fields: field3\n";
" - names: 't*'\n" +
" privileges: [ALL]\n" +
" fields: [ field1 ]\n" +
" - names: 'my_alias'\n" +
" privileges: [ALL]\n" +
" fields: [field2]\n" +
" - names: '/an_.*/'\n" +
" privileges: [ALL]\n" +
" fields: [field3]\n";
}
@Override

View File

@ -56,20 +56,26 @@ import static org.hamcrest.Matchers.notNullValue;
public class LicensingTests extends ShieldIntegTestCase {
public static final String ROLES =
ShieldSettingsSource.DEFAULT_ROLE + ":\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*': manage\n" +
" '/.*/': write\n" +
" 'test': read\n" +
" 'test1': read\n" +
" - names: '*'\n" +
" privileges: [manage]\n" +
" - names: '/.*/'\n" +
" privileges: [write]\n" +
" - names: 'test'\n" +
" privileges: [read]\n" +
" - names: 'test1'\n" +
" privileges: [read]\n" +
"\n" +
"role_a:\n" +
" indices:\n" +
" 'a': all\n" +
" - names: 'a'\n" +
" privileges: [all]\n" +
"\n" +
"role_b:\n" +
" indices:\n" +
" 'b': all\n";
" - names: 'b'\n" +
" privileges: [all]\n";
public static final String USERS =
ShieldSettingsSource.CONFIG_STANDARD_USER +

View File

@ -34,20 +34,26 @@ public class MultipleIndicesPermissionsTests extends ShieldIntegTestCase {
@Override
protected String configRoles() {
return ShieldSettingsSource.DEFAULT_ROLE + ":\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*': manage\n" +
" '/.*/': write\n" +
" 'test': read\n" +
" 'test1': read\n" +
" - names: '*'\n" +
" privileges: [manage]\n" +
" - names: '/.*/'\n" +
" privileges: [write]\n" +
" - names: 'test'\n" +
" privileges: [read]\n" +
" - names: 'test1'\n" +
" privileges: [read]\n" +
"\n" +
"role_a:\n" +
" indices:\n" +
" 'a': all\n" +
" - names: 'a'\n" +
" privileges: [all]\n" +
"\n" +
"role_b:\n" +
" indices:\n" +
" 'b': all\n";
" - names: 'b'\n" +
" privileges: [all]\n";
}
@Override

View File

@ -38,9 +38,10 @@ public class PermissionPrecedenceTests extends ShieldIntegTestCase {
@Override
protected String configRoles() {
return "admin:\n" +
" cluster: all\n" +
" cluster: [ all ] \n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]" +
"\n" +
"transport_client:\n" +
" cluster:\n" +
@ -49,7 +50,8 @@ public class PermissionPrecedenceTests extends ShieldIntegTestCase {
"\n" +
"user:\n" +
" indices:\n" +
" 'test_*': all\n";
" - names: 'test_*'\n" +
" privileges: [ all ]";
}
@Override

View File

@ -1,196 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.integration;
import org.elasticsearch.ElasticsearchSecurityException;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.suggest.SuggestResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.search.suggest.SuggestBuilders;
import org.elasticsearch.shield.authc.support.Hasher;
import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.shield.authc.support.SecuredStringTests;
import org.elasticsearch.shield.authc.support.UsernamePasswordToken;
import org.elasticsearch.test.ShieldIntegTestCase;
import java.util.Map;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.client.Requests.searchRequest;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.ShieldTestsUtils.assertAuthorizationException;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
public class SearchGetAndSuggestPermissionsTests extends ShieldIntegTestCase {
protected static final String USERS_PASSWD_HASHED = new String(Hasher.BCRYPT.hash(new SecuredString("passwd".toCharArray())));
@Override
protected String configRoles() {
return super.configRoles() + "\n" +
"\n" +
"search_role:\n" +
" indices:\n" +
" 'a': search\n" +
"\n" +
"get_role:\n" +
" indices:\n" +
" 'a': get\n" +
"\n" +
"suggest_role:\n" +
" indices:\n" +
" 'a': suggest\n";
}
@Override
protected String configUsers() {
return super.configUsers() +
"search_user:" + USERS_PASSWD_HASHED + "\n" +
"get_user:" + USERS_PASSWD_HASHED + "\n" +
"suggest_user:" + USERS_PASSWD_HASHED + "\n";
}
@Override
protected String configUsersRoles() {
return super.configUsersRoles() +
"search_role:search_user\n" +
"get_role:get_user\n" +
"suggest_role:suggest_user\n";
}
/**
* testing both "search" and "suggest" privileges can execute the suggest API
*/
public void testSuggestAPI() throws Exception {
IndexResponse indexResponse = index("a", "type", jsonBuilder()
.startObject()
.field("name", "value")
.endObject());
assertThat(indexResponse.isCreated(), is(true));
refresh();
Client client = internalCluster().transportClient();
Map<String, String> headers = singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("suggest_user", "passwd"));
SuggestResponse suggestResponse = client.filterWithHeader(headers)
.prepareSuggest("a")
.addSuggestion(SuggestBuilders.termSuggestion("name").field("name").text("val")).get();
assertNoFailures(suggestResponse);
assertThat(suggestResponse.getSuggest().size(), is(1));
suggestResponse = client
.filterWithHeader(singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("search_user", "passwd")))
.prepareSuggest("a")
.addSuggestion(SuggestBuilders.termSuggestion("name").field("name").text("val")).get();
assertNoFailures(suggestResponse);
assertThat(suggestResponse.getSuggest().size(), is(1));
try {
client.filterWithHeader(singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("suggest_user", "passwd")))
.prepareSearch("a")
.get();
fail("a user with only a suggest privilege cannot execute search");
} catch (ElasticsearchSecurityException e) {
logger.error("failed to search", e);
// expected
}
}
/**
* testing that "search" privilege cannot execute the get API
*/
public void testGetAPI() throws Exception {
IndexResponse indexResponse = index("a", "type", jsonBuilder()
.startObject()
.field("name", "value")
.endObject());
assertThat(indexResponse.isCreated(), is(true));
refresh();
Client client = internalCluster().transportClient();
try {
client.filterWithHeader(singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("search_user", "passwd")))
.prepareGet("a", "type", indexResponse.getId())
.get();
fail("a user with only search privilege should not be authorized for a get request");
} catch (ElasticsearchSecurityException e) {
// expected
assertAuthorizationException(e);
logger.error("could not get document", e);
}
}
/**
* testing that "get" privilege can execute the mget API, and "search" privilege cannot execute mget
*/
public void testMultiGetAPI() throws Exception {
IndexResponse indexResponse = index("a", "type", jsonBuilder()
.startObject()
.field("name", "value")
.endObject());
assertThat(indexResponse.isCreated(), is(true));
refresh();
Client client = internalCluster().transportClient();
MultiGetResponse response = client
.filterWithHeader(singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("get_user", "passwd")))
.prepareMultiGet().add("a", "type", indexResponse.getId())
.get();
assertNotNull(response);
assertThat(response.getResponses().length, is(1));
assertThat(response.getResponses()[0].getId(), equalTo(indexResponse.getId()));
try {
client.filterWithHeader(singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("search_user", "passwd")))
.prepareMultiGet().add("a", "type", indexResponse.getId())
.get();
fail("a user with only a search privilege should not be able to execute the mget API");
} catch (ElasticsearchSecurityException e) {
// expected
assertAuthorizationException(e);
logger.error("could not mget documents", e);
}
}
/**
* testing that "search" privilege can execute the msearch API
*/
public void testMultiSearchAPI() throws Exception {
IndexResponse indexResponse = index("a", "type", jsonBuilder()
.startObject()
.field("name", "value")
.endObject());
assertThat(indexResponse.isCreated(), is(true));
refresh();
Client client = internalCluster().transportClient();
MultiSearchResponse response = client
.filterWithHeader(singletonMap(UsernamePasswordToken.BASIC_AUTH_HEADER, userHeader("search_user", "passwd")))
.prepareMultiSearch().add(searchRequest("a").types("type"))
.get();
assertNotNull(response);
assertThat(response.getResponses().length, is(1));
SearchResponse first = response.getResponses()[0].getResponse();
assertNotNull(first);
assertNoFailures(first);
}
private static String userHeader(String username, String password) {
return UsernamePasswordToken.basicAuthHeaderValue(username, SecuredStringTests.build(password));
}
}

View File

@ -58,7 +58,8 @@ public class ShieldClearScrollTests extends ShieldIntegTestCase {
" - cluster:admin/indices/scroll/clear_all \n" +
"denied_role:\n" +
" indices:\n" +
" '*': ALL\n";
" - names: '*'" +
" privileges: [ALL]\n";
}
@Before

View File

@ -93,21 +93,25 @@ abstract public class AbstractAdLdapRealmTestCase extends ShieldIntegTestCase {
return super.configRoles() +
"\n" +
"Avengers:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" 'avengers': ALL\n" +
" - names: 'avengers'\n" +
" privileges: [ all ]\n" +
"SHIELD:\n" +
" cluster: NONE\n" +
" indices:\n " +
" '" + SHIELD_INDEX + "': ALL\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" - names: '" + SHIELD_INDEX + "'\n" +
" privileges: [ all ]\n" +
"Gods:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" '" + ASGARDIAN_INDEX + "': ALL\n" +
" - names: '" + ASGARDIAN_INDEX + "'\n" +
" privileges: [ all ]\n" +
"Philanthropists:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" '" + PHILANTHROPISTS_INDEX + "': ALL\n";
" - names: '" + PHILANTHROPISTS_INDEX + "'\n" +
" privileges: [ all ]\n";
}
protected void assertAccessAllowed(String user, String index) throws IOException {

View File

@ -13,7 +13,7 @@ import java.io.IOException;
* This tests the group to role mappings from LDAP sources provided by the super class - available from super.realmConfig.
* The super class will provide appropriate group mappings via configGroupMappings()
*/
@Network
//@Network
public class GroupMappingTests extends AbstractAdLdapRealmTestCase {
public void testAuthcAuthz() throws IOException {
String avenger = realmConfig.loginWithCommonName ? "Natasha Romanoff" : "blackwidow";

View File

@ -19,9 +19,10 @@ public class MultiGroupMappingTests extends AbstractAdLdapRealmTestCase {
return super.configRoles() +
"\n" +
"MarvelCharacters:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" 'marvel_comics': ALL\n";
" - names: 'marvel_comics'\n" +
" privileges: [ all ]\n";
}
@Override

View File

@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.node.MockNode;
import org.elasticsearch.node.Node;
import org.elasticsearch.shield.authc.esnative.ESNativeRealm;
@ -43,7 +42,6 @@ public class ShieldF {
settings.put("xpack.shield.enabled", "true");
// Disable Monitoring to prevent cluster activity
settings.put("xpack.monitoring.enabled", "false");
settings.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), Shield.OPT_OUT_QUERY_CACHE);
settings.put("cluster.name", ShieldF.class.getSimpleName());
String homeDir = System.getProperty("es.path.home");

View File

@ -7,6 +7,7 @@ package org.elasticsearch.shield;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.shield.audit.index.IndexAuditTrail;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.xpack.XPackPlugin;
@ -16,6 +17,7 @@ import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.notNullValue;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.Matchers.arrayContaining;
import static org.hamcrest.Matchers.not;
public class ShieldPluginSettingsTests extends ESTestCase {
@ -132,4 +134,59 @@ public class ShieldPluginSettingsTests extends ESTestCase {
assertThat(additionalSettings.get("tribe.t2.shield.bar"), is("foo"));
assertThat(additionalSettings.getAsArray("tribe.t2.shield.something.else.here"), arrayContaining("foo", "bar"));
}
public void testValidAutoCreateIndex() {
Shield.validateAutoCreateIndex(Settings.EMPTY);
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", true).build());
try {
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", false).build());
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(ShieldTemplateService.SECURITY_INDEX_NAME));
assertThat(e.getMessage(), not(containsString(IndexAuditTrail.INDEX_NAME_PREFIX)));
}
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".security").build());
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "*s*").build());
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".s*").build());
try {
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", "foo").build());
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(ShieldTemplateService.SECURITY_INDEX_NAME));
assertThat(e.getMessage(), not(containsString(IndexAuditTrail.INDEX_NAME_PREFIX)));
}
try {
Shield.validateAutoCreateIndex(Settings.builder().put("action.auto_create_index", ".shield_audit_log*").build());
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(ShieldTemplateService.SECURITY_INDEX_NAME));
}
Shield.validateAutoCreateIndex(Settings.builder()
.put("action.auto_create_index", ".security")
.put("shield.audit.enabled", true)
.build());
try {
Shield.validateAutoCreateIndex(Settings.builder()
.put("action.auto_create_index", ".security")
.put("shield.audit.enabled", true)
.put("shield.audit.outputs", randomFrom("index", "logfile,index"))
.build());
fail("IllegalArgumentException expected");
} catch (IllegalArgumentException e) {
assertThat(e.getMessage(), containsString(ShieldTemplateService.SECURITY_INDEX_NAME));
assertThat(e.getMessage(), containsString(IndexAuditTrail.INDEX_NAME_PREFIX));
}
Shield.validateAutoCreateIndex(Settings.builder()
.put("action.auto_create_index", ".shield_audit_log*,.security")
.put("shield.audit.enabled", true)
.put("shield.audit.outputs", randomFrom("index", "logfile,index"))
.build());
}
}

View File

@ -8,6 +8,7 @@ package org.elasticsearch.shield.audit;
import org.elasticsearch.Version;
import org.elasticsearch.common.inject.Guice;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@ -60,7 +61,7 @@ public class AuditTrailModuleTests extends ESTestCase {
settingsModule.registerSetting(Setting.boolSetting("shield.audit.enabled", true, Setting.Property.NodeScope));
Injector injector = Guice.createInjector(
settingsModule,
new NetworkModule(new NetworkService(settings), settings, false, null) {
new NetworkModule(new NetworkService(settings), settings, false, new NamedWriteableRegistry()) {
@Override
protected void configure() {
bind(Transport.class).to(LocalTransport.class).asEagerSingleton();

View File

@ -21,7 +21,6 @@ import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.search.SearchHit;
@ -168,14 +167,6 @@ public class IndexAuditTrailTests extends ShieldIntegTestCase {
Settings.Builder builder = Settings.builder()
.put(super.nodeSettings(nodeOrdinal))
.put(XPackPlugin.featureEnabledSetting(Shield.NAME), useShield);
// For tests we forcefully configure Shield's custom query cache because the test framework
// randomizes the query cache impl but if shield is disabled then we don't need to forcefully
// set the query cache
if (useShield == false) {
builder.remove(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey());
}
return builder.build();
}
};

View File

@ -50,7 +50,8 @@ public class AnonymousUserTests extends ShieldIntegTestCase {
return super.configRoles() + "\n" +
"anonymous:\n" +
" indices:\n" +
" '*': READ";
" - names: '*'" +
" privileges: [ READ ]\n";
}
public void testAnonymousViaHttp() throws Exception {

View File

@ -9,6 +9,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.env.Environment;
import org.elasticsearch.shield.User;
import org.elasticsearch.shield.authc.esnative.ESNativeRealm;
import org.elasticsearch.shield.authc.esusers.ESUsersRealm;
import org.elasticsearch.shield.authc.ldap.LdapRealm;
import org.elasticsearch.shield.license.ShieldLicenseState;
@ -26,6 +27,7 @@ import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isOneOf;
import static org.hamcrest.Matchers.notNullValue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -40,7 +42,8 @@ public class RealmsTests extends ESTestCase {
@Before
public void init() throws Exception {
factories = new HashMap<>();
factories.put("esusers", new DummyRealm.Factory("esusers", true));
factories.put(ESUsersRealm.TYPE, new DummyRealm.Factory(ESUsersRealm.TYPE, true));
factories.put(ESNativeRealm.TYPE, new DummyRealm.Factory(ESNativeRealm.TYPE, true));
for (int i = 0; i < randomIntBetween(1, 5); i++) {
DummyRealm.Factory factory = new DummyRealm.Factory("type_" + i, rarely());
factories.put("type_" + i, factory);
@ -52,13 +55,13 @@ public class RealmsTests extends ESTestCase {
public void testWithSettings() throws Exception {
Settings.Builder builder = Settings.builder()
.put("path.home", createTempDir());
List<Integer> orders = new ArrayList<>(factories.size() - 1);
for (int i = 0; i < factories.size() - 1; i++) {
List<Integer> orders = new ArrayList<>(factories.size() - 2);
for (int i = 0; i < factories.size() - 2; i++) {
orders.add(i);
}
Collections.shuffle(orders, random());
Map<Integer, Integer> orderToIndex = new HashMap<>();
for (int i = 0; i < factories.size() - 1; i++) {
for (int i = 0; i < factories.size() - 2; i++) {
builder.put("shield.authc.realms.realm_" + i + ".type", "type_" + i);
builder.put("shield.authc.realms.realm_" + i + ".order", orders.get(i));
orderToIndex.put(orders.get(i), i);
@ -102,6 +105,10 @@ public class RealmsTests extends ESTestCase {
assertThat(iter.hasNext(), is(true));
Realm realm = iter.next();
assertThat(realm, notNullValue());
assertThat(realm.type(), equalTo(ESNativeRealm.TYPE));
assertThat(realm.name(), equalTo("default_" + ESNativeRealm.TYPE));
assertThat(iter.hasNext(), is(true));
realm = iter.next();
assertThat(realm.type(), equalTo(ESUsersRealm.TYPE));
assertThat(realm.name(), equalTo("default_" + ESUsersRealm.TYPE));
assertThat(iter.hasNext(), is(false));
@ -110,13 +117,13 @@ public class RealmsTests extends ESTestCase {
public void testUnlicensedWithOnlyCustomRealms() throws Exception {
Settings.Builder builder = Settings.builder()
.put("path.home", createTempDir());
List<Integer> orders = new ArrayList<>(factories.size() - 1);
for (int i = 0; i < factories.size() - 1; i++) {
List<Integer> orders = new ArrayList<>(factories.size() - 2);
for (int i = 0; i < factories.size() - 2; i++) {
orders.add(i);
}
Collections.shuffle(orders, random());
Map<Integer, Integer> orderToIndex = new HashMap<>();
for (int i = 0; i < factories.size() - 1; i++) {
for (int i = 0; i < factories.size() - 2; i++) {
builder.put("shield.authc.realms.realm_" + i + ".type", "type_" + i);
builder.put("shield.authc.realms.realm_" + i + ".order", orders.get(i));
orderToIndex.put(orders.get(i), i);
@ -138,10 +145,10 @@ public class RealmsTests extends ESTestCase {
i = 0;
when(shieldLicenseState.customRealmsEnabled()).thenReturn(false);
for (Realm realm : realms) {
assertThat(realm.type, is(ESUsersRealm.TYPE));
assertThat(realm.type, isOneOf(ESUsersRealm.TYPE, ESNativeRealm.TYPE));
i++;
}
assertThat(i, is(1));
assertThat(i, is(2));
}
public void testUnlicensedWithInternalRealms() throws Exception {
@ -178,13 +185,13 @@ public class RealmsTests extends ESTestCase {
public void testDisabledRealmsAreNotAdded() throws Exception {
Settings.Builder builder = Settings.builder()
.put("path.home", createTempDir());
List<Integer> orders = new ArrayList<>(factories.size() - 1);
for (int i = 0; i < factories.size() - 1; i++) {
List<Integer> orders = new ArrayList<>(factories.size() - 2);
for (int i = 0; i < factories.size() - 2; i++) {
orders.add(i);
}
Collections.shuffle(orders, random());
Map<Integer, Integer> orderToIndex = new HashMap<>();
for (int i = 0; i < factories.size() - 1; i++) {
for (int i = 0; i < factories.size() - 2; i++) {
builder.put("shield.authc.realms.realm_" + i + ".type", "type_" + i);
builder.put("shield.authc.realms.realm_" + i + ".order", orders.get(i));
boolean enabled = randomBoolean();
@ -205,7 +212,11 @@ public class RealmsTests extends ESTestCase {
Realm realm = iterator.next();
Integer index = orderToIndex.get(realm.order());
if (index == null) {
// Default realm is inserted when factories size is 1 and enabled is false
// Default realms are inserted when factories size is 1 and enabled is false
assertThat(realm.type(), equalTo(ESNativeRealm.TYPE));
assertThat(realm.name(), equalTo("default_" + ESNativeRealm.TYPE));
assertThat(iterator.hasNext(), is(true));
realm = iterator.next();
assertThat(realm.type(), equalTo(ESUsersRealm.TYPE));
assertThat(realm.name(), equalTo("default_" + ESUsersRealm.TYPE));
assertThat(iterator.hasNext(), is(false));
@ -264,8 +275,8 @@ public class RealmsTests extends ESTestCase {
@Override
public DummyRealm createDefault(String name) {
if (type().equals("esusers")) {
return new DummyRealm("esusers", new RealmConfig(name, Settings.EMPTY,
if (type().equals(ESNativeRealm.TYPE) || type().equals(ESUsersRealm.TYPE)) {
return new DummyRealm(type(), new RealmConfig(name, Settings.EMPTY,
Settings.builder().put("path.home", createTempDir()).build()));
}
return null;

View File

@ -36,9 +36,9 @@ public class RunAsIntegTests extends ShieldIntegTestCase {
static final String TRANSPORT_CLIENT_USER = "transport_user";
static final String ROLES =
"transport_client:\n" +
" cluster: cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/liveness' ]\n" +
"run_as_role:\n" +
" run_as: " + ShieldSettingsSource.DEFAULT_USER_NAME + ",idontexist\n";
" run_as: [ '" + ShieldSettingsSource.DEFAULT_USER_NAME + "', 'idontexist' ]\n";
@Override
public Settings nodeSettings(int nodeOrdinal) {

View File

@ -5,6 +5,7 @@
*/
package org.elasticsearch.shield.authc.ldap.support;
import com.unboundid.ldap.listener.InMemoryDirectoryServer;
import com.unboundid.ldap.sdk.LDAPConnection;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.shield.authc.RealmConfig;
@ -12,8 +13,10 @@ import org.elasticsearch.shield.authc.support.SecuredString;
import org.elasticsearch.shield.ssl.ClientSSLService;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.not;
@ -43,6 +46,7 @@ public class SessionFactoryLoadBalancingTests extends LdapTestCase {
public void testRoundRobinWithFailures() throws Exception {
assumeTrue("at least one ldap server should be present for this test", ldapServers.length > 1);
logger.debug("using [{}] ldap servers, urls {}", ldapServers.length, ldapUrls());
TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.ROUND_ROBIN);
// create a list of ports
@ -50,19 +54,31 @@ public class SessionFactoryLoadBalancingTests extends LdapTestCase {
for (int i = 0; i < ldapServers.length; i++) {
ports.add(ldapServers[i].getListenPort());
}
logger.debug("list of all ports {}", ports);
int numberToKill = randomIntBetween(1, numberOfLdapServers - 1);
for (int i = 0; i < numberToKill; i++) {
int index = randomIntBetween(0, numberOfLdapServers - 1);
ports.remove(Integer.valueOf(ldapServers[index].getListenPort()));
final int numberToKill = randomIntBetween(1, numberOfLdapServers - 1);
logger.debug("killing [{}] servers", numberToKill);
// get a subset to kil
final List<InMemoryDirectoryServer> ldapServersToKill = randomSubsetOf(numberToKill, ldapServers);
final List<InMemoryDirectoryServer> ldapServersList = Arrays.asList(ldapServers);
for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) {
final int index = ldapServersList.indexOf(ldapServerToKill);
assertThat(index, greaterThanOrEqualTo(0));
final Integer port = Integer.valueOf(ldapServers[index].getListenPort());
logger.debug("shutting down server index [{}] listening on [{}]", index, port);
assertTrue(ports.remove(port));
ldapServers[index].shutDown(true);
assertThat(ldapServers[index].getListenPort(), is(-1));
}
final int numberOfIterations = randomIntBetween(1, 5);
for (int iteration = 0; iteration < numberOfIterations; iteration++) {
logger.debug("iteration [{}]", iteration);
for (Integer port : ports) {
LDAPConnection connection = null;
try {
logger.debug("attempting connection with expected port [{}]", port);
connection = testSessionFactory.getServerSet().getConnection();
assertThat(connection.getConnectedPort(), is(port));
} finally {
@ -76,6 +92,7 @@ public class SessionFactoryLoadBalancingTests extends LdapTestCase {
public void testFailover() throws Exception {
assumeTrue("at least one ldap server should be present for this test", ldapServers.length > 1);
logger.debug("using [{}] ldap servers, urls {}", ldapServers.length, ldapUrls());
TestSessionFactory testSessionFactory = createSessionFactory(LdapLoadBalancing.FAILOVER);
// first test that there is no round robin stuff going on
@ -92,32 +109,46 @@ public class SessionFactoryLoadBalancingTests extends LdapTestCase {
}
}
List<Integer> stoppedServers = new ArrayList<>();
// now we should kill some servers including the first one
int numberToKill = randomIntBetween(1, numberOfLdapServers - 1);
// always kill the first one, but don't add to the list
logger.debug("shutting down server index [0] listening on [{}]", ldapServers[0].getListenPort());
// always kill the first one
ldapServers[0].shutDown(true);
stoppedServers.add(0);
for (int i = 0; i < numberToKill - 1; i++) {
int index = randomIntBetween(1, numberOfLdapServers - 1);
ldapServers[index].shutDown(true);
stoppedServers.add(index);
assertThat(ldapServers[0].getListenPort(), is(-1));
// now randomly shutdown some others
if (ldapServers.length > 2) {
// kill at least one other server, but we need at least one good one. Hence the upper bound is number - 2 since we need at least
// one server to use!
final int numberToKill = randomIntBetween(1, numberOfLdapServers - 2);
InMemoryDirectoryServer[] allButFirstServer = Arrays.copyOfRange(ldapServers, 1, ldapServers.length);
// get a subset to kil
final List<InMemoryDirectoryServer> ldapServersToKill = randomSubsetOf(numberToKill, allButFirstServer);
final List<InMemoryDirectoryServer> ldapServersList = Arrays.asList(ldapServers);
for (InMemoryDirectoryServer ldapServerToKill : ldapServersToKill) {
final int index = ldapServersList.indexOf(ldapServerToKill);
assertThat(index, greaterThanOrEqualTo(1));
final Integer port = Integer.valueOf(ldapServers[index].getListenPort());
logger.debug("shutting down server index [{}] listening on [{}]", index, port);
ldapServers[index].shutDown(true);
assertThat(ldapServers[index].getListenPort(), is(-1));
}
}
int firstNonStoppedPort = -1;
// now we find the first that isn't stopped
for (int i = 0; i < numberOfLdapServers; i++) {
if (stoppedServers.contains(i) == false) {
if (ldapServers[i].getListenPort() != -1) {
firstNonStoppedPort = ldapServers[i].getListenPort();
break;
}
}
logger.debug("first non stopped port [{}]", firstNonStoppedPort);
assertThat(firstNonStoppedPort, not(-1));
final int numberOfIterations = randomIntBetween(1, 5);
for (int iteration = 0; iteration < numberOfIterations; iteration++) {
LDAPConnection connection = null;
try {
logger.debug("attempting connection with expected port [{}] iteration [{}]", firstNonStoppedPort, iteration);
connection = testSessionFactory.getServerSet().getConnection();
assertThat(connection.getConnectedPort(), is(firstNonStoppedPort));
} finally {

View File

@ -40,7 +40,8 @@ public class AnalyzeTests extends ShieldIntegTestCase {
//role that has analyze indices privileges only
"analyze_indices:\n" +
" indices:\n" +
" 'test_*': indices:admin/analyze\n" +
" - names: 'test_*'\n" +
" privileges: [ 'indices:admin/analyze' ]\n" +
"analyze_cluster:\n" +
" cluster:\n" +
" - cluster:admin/analyze\n";

View File

@ -58,25 +58,32 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
//role that has create index only privileges
"create_only:\n" +
" indices:\n" +
" '*': create_index\n" +
//role that has create index and managa aliases on test_*, not enough to manage aliases outside of test_* namespace
" - names: '*'\n" +
" privileges: [ create_index ]\n" +
//role that has create index and manage_aliases on test_*, not enough to manage_aliases aliases outside of test_* namespace
"create_test_aliases_test:\n" +
" indices:\n" +
" 'test_*': create_index,manage_aliases\n" +
//role that has create index on test_* and manage aliases on alias_*, can't create aliases pointing to test_* though
" - names: 'test_*'\n" +
" privileges: [ create_index, 'indices:admin/aliases*' ]\n" +
//role that has create index on test_* and manage_aliases on alias_*, can't create aliases pointing to test_* though
"create_test_aliases_alias:\n" +
" indices:\n" +
" 'test_*': create_index\n" +
" 'alias_*': manage_aliases\n" +
" - names: 'test_*'\n" +
" privileges: [ create_index ]\n" +
" - names: 'alias_*'\n" +
" privileges: [ 'indices:admin/aliases*' ]\n" +
//role that has create index on test_* and manage_aliases on both alias_* and test_*
"create_test_aliases_test_alias:\n" +
" indices:\n" +
" 'test_*': create_index\n" +
" 'alias_*,test_*': manage_aliases\n" +
" - names: 'test_*'\n" +
" privileges: [ create_index ]\n" +
" - names: [ 'alias_*', 'test_*' ]\n" +
" privileges: [ 'indices:admin/aliases*' ]\n" +
//role that has manage_aliases only on both test_* and alias_*
"aliases_only:\n" +
" indices:\n" +
" 'alias_*,test_*': manage_aliases\n";
" - names: [ 'alias_*', 'test_*']\n" +
" privileges: [ 'indices:admin/aliases*' ]\n";
}
@Before
@ -361,7 +368,7 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
assertAcked(client.admin().indices().prepareCreate("test_1"));
try {
//fails: user doesn't have manage aliases on test_1
//fails: user doesn't have manage_aliases aliases on test_1
client.admin().indices().prepareAliases().addAlias("test_1", "test_alias").get();
fail("add alias should have failed due to missing manage_aliases privileges on test_alias and test_1");
} catch(ElasticsearchSecurityException e) {
@ -370,7 +377,7 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
}
try {
//fails: user doesn't have manage aliases on test_1
//fails: user doesn't have manage_aliases aliases on test_1
client.admin().indices().prepareAliases().addAlias("test_1", "alias_1").get();
fail("add alias should have failed due to missing manage_aliases privileges on test_1");
} catch(ElasticsearchSecurityException e) {
@ -379,7 +386,7 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
}
try {
//fails: user doesn't have manage aliases on test_*, no matching indices to replace wildcards
//fails: user doesn't have manage_aliases aliases on test_*, no matching indices to replace wildcards
client.admin().indices().prepareAliases().addAlias("test_*", "alias_1").get();
fail("add alias should have failed due to missing manage_aliases privileges on test_1");
} catch(IndexNotFoundException e) {
@ -458,7 +465,7 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
assertAcked(client.admin().indices().prepareCreate("test_1"));
try {
//fails: user doesn't have manage aliases on test_1, nor test_alias
//fails: user doesn't have manage_aliases aliases on test_1, nor test_alias
client.admin().indices().prepareGetAliases().setAliases("test_alias").setIndices("test_1").get();
fail("get alias should have failed due to missing manage_aliases privileges on test_alias and test_1");
} catch(ElasticsearchSecurityException e) {
@ -467,7 +474,7 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
}
try {
//fails: user doesn't have manage aliases on test_*, no matching indices to replace wildcards
//fails: user doesn't have manage_aliases aliases on test_*, no matching indices to replace wildcards
client.admin().indices().prepareGetAliases().setIndices("test_*").setAliases("test_alias").get();
fail("get alias should have failed due to missing manage_aliases privileges on test_*");
} catch(IndexNotFoundException e) {

View File

@ -302,7 +302,7 @@ public class InternalAuthorizationServiceTests extends ESTestCase {
User user = new User("test user", "a_star", "b");
ClusterState state = mock(ClusterState.class);
when(rolesStore.role("a_star")).thenReturn(Role.builder("a_star").add(IndexPrivilege.ALL, "a*").build());
when(rolesStore.role("b")).thenReturn(Role.builder("a_star").add(IndexPrivilege.SEARCH, "b").build());
when(rolesStore.role("b")).thenReturn(Role.builder("a_star").add(IndexPrivilege.READ, "b").build());
when(clusterService.state()).thenReturn(state);
Settings indexSettings = Settings.builder().put("index.version.created", Version.CURRENT).build();
when(state.metaData()).thenReturn(MetaData.builder()

View File

@ -30,10 +30,12 @@ public class IndicesAndAliasesResolverIntegrationTests extends ShieldIntegTestCa
@Override
protected String configRoles() {
return ShieldSettingsSource.DEFAULT_ROLE + ":\n" +
" cluster: ALL\n" +
" cluster: [ ALL ]\n" +
" indices:\n" +
" '*': manage,write\n" +
" '/test.*/': read\n";
" - names: '*'\n" +
" privileges: [ manage, write ]\n" +
" - names: '/test.*/'\n" +
" privileges: [ read ]\n";
}
public void testSearchForAll() {

View File

@ -18,7 +18,6 @@ import java.util.function.Predicate;
import static org.elasticsearch.shield.authz.privilege.IndexPrivilege.MONITOR;
import static org.elasticsearch.shield.authz.privilege.IndexPrivilege.READ;
import static org.elasticsearch.shield.authz.privilege.IndexPrivilege.SEARCH;
import static org.elasticsearch.shield.authz.privilege.IndexPrivilege.union;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
@ -33,7 +32,7 @@ public class PermissionTests extends ESTestCase {
@Before
public void init() {
Role.Builder builder = Role.builder("test");
builder.add(union(SEARCH, MONITOR), "test_*", "/foo.*/");
builder.add(union(MONITOR), "test_*", "/foo.*/");
builder.add(union(READ), "baz_*foo", "/fool.*bar/");
builder.add(union(MONITOR), "/bar.*/");
permission = builder.build();

View File

@ -5,15 +5,10 @@
*/
package org.elasticsearch.shield.authz.privilege;
import org.elasticsearch.action.get.GetAction;
import org.elasticsearch.action.get.MultiGetAction;
import org.elasticsearch.action.ingest.DeletePipelineAction;
import org.elasticsearch.action.ingest.GetPipelineAction;
import org.elasticsearch.action.ingest.PutPipelineAction;
import org.elasticsearch.action.ingest.SimulatePipelineAction;
import org.elasticsearch.action.search.MultiSearchAction;
import org.elasticsearch.action.search.SearchAction;
import org.elasticsearch.action.suggest.SuggestAction;
import org.elasticsearch.shield.support.AutomatonPredicate;
import org.elasticsearch.shield.support.Automatons;
import org.elasticsearch.test.ESTestCase;
@ -77,16 +72,6 @@ public class PrivilegeTests extends ESTestCase {
assertThat(cluster, is(cluster2));
}
public void testIngestPrivilege() throws Exception {
Privilege.Name name = new Privilege.Name("manage_pipeline");
ClusterPrivilege cluster = ClusterPrivilege.get(name);
assertThat(cluster, is(ClusterPrivilege.MANAGE_PIPELINE));
assertThat(cluster.predicate().test(PutPipelineAction.NAME), is(true));
assertThat(cluster.predicate().test(DeletePipelineAction.NAME), is(true));
assertThat(cluster.predicate().test(GetPipelineAction.NAME), is(true));
assertThat(cluster.predicate().test(SimulatePipelineAction.NAME), is(true));
}
public void testClusterTemplateActions() throws Exception {
Privilege.Name name = new Privilege.Name("indices:admin/template/delete");
ClusterPrivilege cluster = ClusterPrivilege.get(name);
@ -262,27 +247,4 @@ public class PrivilegeTests extends ESTestCase {
assertThat(predicate.test("indices:admin/mapping/put"), is(false));
assertThat(predicate.test("indices:admin/mapping/whatever"), is(false));
}
public void testSearchPrivilege() throws Exception {
Predicate<String> predicate = IndexPrivilege.SEARCH.predicate();
assertThat(predicate.test(SearchAction.NAME), is(true));
assertThat(predicate.test(SearchAction.NAME + "/whatever"), is(true));
assertThat(predicate.test(MultiSearchAction.NAME), is(true));
assertThat(predicate.test(MultiSearchAction.NAME + "/whatever"), is(true));
assertThat(predicate.test(SuggestAction.NAME), is(true));
assertThat(predicate.test(SuggestAction.NAME + "/whatever"), is(true));
assertThat(predicate.test(GetAction.NAME), is(false));
assertThat(predicate.test(GetAction.NAME + "/whatever"), is(false));
assertThat(predicate.test(MultiGetAction.NAME), is(false));
assertThat(predicate.test(MultiGetAction.NAME + "/whatever"), is(false));
}
public void testGetPrivilege() throws Exception {
Predicate<String> predicate = IndexPrivilege.GET.predicate();
assertThat(predicate.test(GetAction.NAME), is(true));
assertThat(predicate.test(GetAction.NAME + "/whatever"), is(true));
assertThat(predicate.test(MultiGetAction.NAME), is(true));
assertThat(predicate.test(MultiGetAction.NAME + "/whatever"), is(true));
}
}

View File

@ -57,7 +57,7 @@ public class FileRolesStoreTests extends ESTestCase {
.put(XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE), true)
.build());
assertThat(roles, notNullValue());
assertThat(roles.size(), is(10));
assertThat(roles.size(), is(9));
Role role = roles.get("role1");
assertThat(role, notNullValue());
@ -82,7 +82,8 @@ public class FileRolesStoreTests extends ESTestCase {
assertThat(group.indices().length, is(1));
assertThat(group.indices()[0], equalTo("idx3"));
assertThat(group.privilege(), notNullValue());
assertThat(group.privilege(), is(IndexPrivilege.CRUD));
assertThat(group.privilege().implies(IndexPrivilege.READ), is(true));
assertThat(group.privilege().implies(IndexPrivilege.WRITE),is(true));
role = roles.get("role1.ab");
assertThat(role, notNullValue());
@ -121,12 +122,7 @@ public class FileRolesStoreTests extends ESTestCase {
assertThat(group.privilege().isAlias(IndexPrivilege.union(IndexPrivilege.READ, IndexPrivilege.WRITE)), is(true));
role = roles.get("role4");
assertThat(role, notNullValue());
assertThat(role.name(), equalTo("role4"));
assertThat(role.cluster(), notNullValue());
assertThat(role.cluster(), is(ClusterPermission.Core.NONE));
assertThat(role.indices(), is(IndicesPermission.Core.NONE));
assertThat(role.runAs(), is(RunAsPermission.Core.NONE));
assertThat(role, nullValue());
role = roles.get("role_run_as");
assertThat(role, notNullValue());
@ -214,7 +210,7 @@ public class FileRolesStoreTests extends ESTestCase {
.put(XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE), false)
.build());
assertThat(roles, notNullValue());
assertThat(roles.size(), is(7));
assertThat(roles.size(), is(6));
assertThat(roles.get("role_fields"), nullValue());
assertThat(roles.get("role_query"), nullValue());
assertThat(roles.get("role_query_fields"), nullValue());
@ -233,6 +229,7 @@ public class FileRolesStoreTests extends ESTestCase {
* This test is mainly to make sure we can read the default roles.yml config
*/
public void testDefaultRolesFile() throws Exception {
// TODO we should add the config dir to the resources so we don't copy this stuff around...
Path path = getDataPath("default_roles.yml");
Map<String, Role> roles = FileRolesStore.parseFile(path, logger, Settings.EMPTY);
assertThat(roles, notNullValue());
@ -241,11 +238,11 @@ public class FileRolesStoreTests extends ESTestCase {
assertThat(roles, hasKey("admin"));
assertThat(roles, hasKey("power_user"));
assertThat(roles, hasKey("user"));
assertThat(roles, hasKey("kibana3"));
assertThat(roles, hasKey("kibana4"));
assertThat(roles, hasKey("transport_client"));
assertThat(roles, hasKey("kibana4_server"));
assertThat(roles, hasKey("logstash"));
assertThat(roles, hasKey("monitoring_user"));
assertThat(roles, hasKey("monitoring_agent"));
assertThat(roles, hasKey("remote_monitoring_agent"));
}
public void testAutoReload() throws Exception {
@ -288,7 +285,8 @@ public class FileRolesStoreTests extends ESTestCase {
writer.newLine();
writer.newLine();
writer.append("role5:").append(System.lineSeparator());
writer.append(" cluster: 'MONITOR'");
writer.append(" cluster:").append(System.lineSeparator());
writer.append(" - 'MONITOR'");
}
if (!latch.await(5, TimeUnit.SECONDS)) {
@ -327,24 +325,22 @@ public class FileRolesStoreTests extends ESTestCase {
assertThat(role.name(), equalTo("valid_role"));
List<CapturingLogger.Msg> entries = logger.output(CapturingLogger.Level.ERROR);
assertThat(entries, hasSize(5));
assertThat(entries, hasSize(6));
assertThat(entries.get(0).text, startsWith("invalid role definition [$dlk39] in roles file [" + path.toAbsolutePath() +
"]. invalid role name"));
assertThat(entries.get(1).text, startsWith("invalid role definition [role1] in roles file [" + path.toAbsolutePath() + "]"));
assertThat(entries.get(2).text, startsWith("invalid role definition [role2] in roles file [" + path.toAbsolutePath() +
"]. could not resolve cluster privileges [blkjdlkd]"));
assertThat(entries.get(3).text, startsWith("invalid role definition [role3] in roles file [" + path.toAbsolutePath() +
"]. [indices] field value must be an array"));
assertThat(entries.get(4).text, startsWith("invalid role definition [role4] in roles file [" + path.toAbsolutePath() +
"]. could not resolve indices privileges [al;kjdlkj;lkj]"));
assertThat(entries.get(2).text, startsWith("failed to parse role [role2]"));
assertThat(entries.get(3).text, startsWith("failed to parse role [role3]"));
assertThat(entries.get(4).text, startsWith("failed to parse role [role4]"));
assertThat(entries.get(5).text, startsWith("failed to parse indices privileges for role [role5]"));
}
public void testThatRoleNamesDoesNotResolvePermissions() throws Exception {
Path path = getDataPath("invalid_roles.yml");
CapturingLogger logger = new CapturingLogger(CapturingLogger.Level.ERROR);
Set<String> roleNames = FileRolesStore.parseFileForRoleNames(path, logger);
assertThat(roleNames.size(), is(5));
assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4"));
assertThat(roleNames.size(), is(6));
assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4", "role5"));
List<CapturingLogger.Msg> entries = logger.output(CapturingLogger.Level.ERROR);
assertThat(entries, hasSize(1));

View File

@ -309,9 +309,9 @@ public class TransportFilterTests extends ESIntegTestCase {
@Inject
public InternalPluginServerTransportService(Settings settings, Transport transport, ThreadPool threadPool,
AuthenticationService authcService, AuthorizationService authzService, ShieldActionMapper actionMapper,
ClientTransportFilter clientTransportFilter, NamedWriteableRegistry namedWriteableRegistry) {
ClientTransportFilter clientTransportFilter) {
super(settings, transport, threadPool, authcService, authzService, actionMapper, clientTransportFilter,
mock(ShieldLicenseState.class), namedWriteableRegistry);
mock(ShieldLicenseState.class));
when(licenseState.securityEnabled()).thenReturn(true);
}

View File

@ -9,10 +9,8 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.marvel.Marvel;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.shield.Shield;
import org.elasticsearch.shield.authc.esusers.ESUsersRealm;
import org.elasticsearch.shield.authc.esnative.ESNativeRealm;
import org.elasticsearch.shield.authc.support.Hasher;
@ -67,13 +65,13 @@ public class ShieldSettingsSource extends ClusterDiscoveryConfiguration.UnicastZ
public static final String CONFIG_ROLE_ALLOW_ALL =
DEFAULT_ROLE + ":\n" +
" cluster: ALL\n" +
" cluster: [ ALL ]\n" +
" indices:\n" +
" '*': ALL\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
DEFAULT_TRANSPORT_CLIENT_ROLE + ":\n" +
" cluster:\n" +
" - cluster:monitor/nodes/info\n" +
" - cluster:monitor/state";
" - transport_client";
private final Path parentFolder;
private final String subfolderPrefix;
@ -135,9 +133,6 @@ public class ShieldSettingsSource extends ClusterDiscoveryConfiguration.UnicastZ
.put("shield.authc.realms.index.type", ESNativeRealm.TYPE)
.put("shield.authc.realms.index.order", "1")
.put("shield.authz.store.files.roles", writeFile(folder, "roles.yml", configRoles()))
// Test framework sometimes randomly selects the 'index' or 'none' cache and that makes the
// validation in ShieldPlugin fail.
.put(IndexModule.INDEX_QUERY_CACHE_TYPE_SETTING.getKey(), Shield.OPT_OUT_QUERY_CACHE)
.put(getNodeSSLSettings());
return builder.build();

View File

@ -1,48 +1,71 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges:
- all
# monitoring cluster privileges
# All operations on all indices
power_user:
cluster: monitor
cluster:
- monitor
indices:
'*': all
- names: '*'
privileges:
- all
# Only operations on indices
# Read-only operations on indices
user:
indices:
'*': read
- names: '*'
privileges:
- read
# The required role for kibana 3 users
kibana3:
cluster: cluster:monitor/nodes/info
indices:
'*': indices:data/read/search, indices:data/read/get, indices:admin/get
'kibana-int': indices:data/read/search, indices:data/read/get, indices:data/write/delete, indices:data/write/index, create_index
# Defines the required permissions for transport clients
transport_client:
cluster:
- transport_client
# The required role for kibana 4 users
kibana4:
cluster: cluster:monitor/nodes/info
# The required permissions for the kibana 4 server
kibana4_server:
cluster:
- monitor
indices:
'*': indices:data/read/search, indices:data/read/get, indices:admin/get
'.kibana': indices:data/read/search, indices:data/read/get, indices:data/write/delete, indices:data/write/index, create_index
- names: '.kibana'
privileges:
- all
# The required role for logstash users
logstash:
cluster: indices:admin/template/get, indices:admin/template/put
cluster:
- manage_index_templates
indices:
'logstash-*': indices:data/write/bulk, indices:data/write/delete, indices:data/write/update, create_index
- names: 'logstash-*'
privileges:
- write
- read
- create_index
# Monitoring role, allowing all operations
# on the monitoring indices
# Marvel user role. Assign to marvel users.
monitoring_user:
indices:
'.monitoring-*': all
- names:
- '.marvel-es-*'
- '.monitoring-*'
privileges: [ "read" ]
- names: '.kibana'
privileges:
- view_index_metadata
- read
# Monitoring Agent users
monitoring_agent:
cluster: indices:admin/template/get, indices:admin/template/put
# Marvel remote agent role. Assign to the agent user on the remote marvel cluster
# to which the marvel agent will export all its data
remote_monitoring_agent:
cluster: [ "manage_index_templates" ]
indices:
'.monitoring-*': indices:data/write/bulk, create_index
- names:
- '.marvel-es-*'
- '.monitoring-*'
privileges: [ "all" ]

View File

@ -1,7 +1,10 @@
valid_role:
cluster: ALL
cluster:
- ALL
indices:
idx: ALL
- names: idx
privileges:
- ALL
"$dlk39":
cluster: all
@ -24,4 +27,21 @@ role3:
role4:
cluster: ALL
indices:
'*': al;kjdlkj;lkj
'*': al;kjdlkj;lkj
#dadfad
# role won't be available since empty privileges...
role5:
cluster:
indices:
- names:
#adfldkkd
- idx2
privileges:
- names:
- ''
privileges:
- READ
- names:
- 'idx1'
privileges: []

View File

@ -1,14 +1,22 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges: [ all ]
__es_system_role:
cluster: all
cluster:
- all
indices:
'*' : all
- names: '*'
privileges:
- all
__es_internal_role:
cluster: all
cluster:
- all
indices:
'*' : all
- names: '*'
privileges:
- all

View File

@ -1,31 +1,38 @@
role1:
cluster: ALL
cluster:
- ALL
indices:
'idx1,idx2': READ
idx3: crud
- names:
- idx1
- idx2
privileges:
- READ
- names: idx3
privileges:
- READ
- WRITE
role1.ab:
cluster: ALL
cluster:
- ALL
role2:
cluster: ALL, MONITOR
cluster:
- ALL
- MONITOR
role3:
indices:
'/.*_.*/': READ, WRITE
#dadfad
role4:
cluster:
indices:
#adfldkkd
'idx2':
'': READ
'idx1': []
- names: '/.*_.*/'
privileges:
- READ
- WRITE
# role with run_as permissions only
role_run_as:
run_as: "user1,user2"
run_as:
- user1
- user2
# role with more than run_as
role_run_as1:
@ -33,23 +40,31 @@ role_run_as1:
role_fields:
indices:
'field_idx':
privileges: READ
- names:
#23456789ohbh
- 'field_idx'
privileges:
- READ
fields:
- foo
- boo
role_query:
indices:
'query_idx':
privileges: READ
- names:
- 'query_idx'
privileges:
- READ
query: '{ "match_all": {} }'
role_query_fields:
indices:
'query_fields_idx':
privileges: READ
query: '{ "match_all": {} }'
- names:
- 'query_fields_idx'
privileges:
- READ
query:
match_all:
fields:
- foo
- boo

View File

@ -54,7 +54,7 @@ indices:monitor/upgrade
indices:data/read/explain
indices:data/read/field_stats
indices:data/read/get
indices:data/read/graph/explore
indices:data/read/xpack/graph/explore
indices:data/read/mget
indices:data/read/mpercolate
indices:data/read/msearch
@ -72,17 +72,17 @@ indices:data/write/index
indices:data/write/script/delete
indices:data/write/script/put
indices:data/write/update
cluster:admin/plugin/license/get
cluster:admin/plugin/license/delete
cluster:admin/plugin/license/put
cluster:admin/shield/realm/cache/clear
cluster:admin/shield/roles/cache/clear
cluster:admin/shield/user/put
cluster:admin/shield/user/delete
cluster:admin/shield/user/get
cluster:admin/shield/role/put
cluster:admin/shield/role/delete
cluster:admin/shield/role/get
cluster:monitor/xpack/license/get
cluster:admin/xpack/license/delete
cluster:admin/xpack/license/put
cluster:admin/xpack/security/realm/cache/clear
cluster:admin/xpack/security/roles/cache/clear
cluster:admin/xpack/security/user/put
cluster:admin/xpack/security/user/delete
cluster:admin/xpack/security/user/get
cluster:admin/xpack/security/role/put
cluster:admin/xpack/security/role/delete
cluster:admin/xpack/security/role/get
internal:indices/admin/upgrade
cluster:admin/ingest/pipeline/delete
cluster:admin/ingest/pipeline/get

View File

@ -8,16 +8,16 @@ cluster:monitor/nodes/liveness
cluster:monitor/nodes/stats[n]
cluster:monitor/stats[n]
cluster:monitor/tasks/lists[n]
cluster:admin/shield/realm/cache/clear
cluster:admin/shield/realm/cache/clear[n]
cluster:admin/shield/roles/cache/clear
cluster:admin/shield/roles/cache/clear[n]
cluster:admin/shield/role/put
cluster:admin/shield/role/delete
cluster:admin/shield/role/get
cluster:admin/shield/user/put
cluster:admin/shield/user/delete
cluster:admin/shield/user/get
cluster:admin/xpack/security/realm/cache/clear
cluster:admin/xpack/security/realm/cache/clear[n]
cluster:admin/xpack/security/roles/cache/clear
cluster:admin/xpack/security/roles/cache/clear[n]
cluster:admin/xpack/security/role/put
cluster:admin/xpack/security/role/delete
cluster:admin/xpack/security/role/get
cluster:admin/xpack/security/user/put
cluster:admin/xpack/security/user/delete
cluster:admin/xpack/security/user/get
indices:admin/analyze[s]
indices:admin/cache/clear[n]
indices:admin/forcemerge[n]
@ -36,7 +36,7 @@ indices:admin/validate/query[s]
indices:data/read/explain[s]
indices:data/read/field_stats[s]
indices:data/read/get[s]
indices:data/read/graph/explore
indices:data/read/xpack/graph/explore
indices:data/read/mget[shard][s]
indices:data/read/mpercolate[shard][s]
indices:data/read/mtv[shard][s]

View File

@ -4,7 +4,7 @@
"methods": [ "GET" ],
"url": {
"path": "/_shield/role/{name}",
"paths": [ "/_shield/role/{name}" ],
"paths": [ "/_shield/role/{name}", "/_shield/role" ],
"parts": {
"name": {
"type" : "string",

View File

@ -4,7 +4,7 @@
"methods": [ "GET" ],
"url": {
"path": "/_shield/user/{username}",
"paths": [ "/_shield/user/{username}" ],
"paths": [ "/_shield/user/{username}", "/_shield/user" ],
"parts": {
"username": {
"type" : "list",

View File

@ -21,7 +21,6 @@ import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.shield.Shield;
import org.elasticsearch.watcher.actions.WatcherActionModule;
import org.elasticsearch.watcher.actions.email.service.EmailService;
import org.elasticsearch.watcher.actions.email.service.InternalEmailService;
@ -120,12 +119,6 @@ public class Watcher {
transportClient = "transport".equals(settings.get(Client.CLIENT_TYPE_SETTING_S.getKey()));
enabled = enabled(settings);
validAutoCreateIndex(settings);
// adding the watcher privileges to shield
if (Shield.enabled(settings)) {
Shield.registerClusterPrivilege("manage_watcher", "cluster:admin/watcher/*", "cluster:monitor/watcher/*");
Shield.registerClusterPrivilege("monitor_watcher", "cluster:monitor/watcher/*");
}
}
public Collection<Module> nodeModules() {
@ -271,7 +264,7 @@ public class Watcher {
String errorMessage = LoggerMessageFormat.format("the [action.auto_create_index] setting value [{}] is too" +
" restrictive. disable [action.auto_create_index] or set it to " +
"[.watches,.triggered_watches,.watcher-history*]", (Object) settings);
"[.watches,.triggered_watches,.watcher-history*]", (Object) value);
if (Booleans.isExplicitFalse(value)) {
throw new IllegalArgumentException(errorMessage);
}

View File

@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.watcher.input.Input;
import org.elasticsearch.watcher.support.SearchRequestEquivalence;
import org.elasticsearch.watcher.support.WatcherDateTimeUtils;
@ -111,7 +112,8 @@ public class SearchInput implements Input {
return builder;
}
public static SearchInput parse(String watchId, XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
public static SearchInput parse(String watchId, XContentParser parser, QueryParseContext context,
AggregatorParsers aggParsers, Suggesters suggesters)
throws IOException {
SearchRequest request = null;
Set<String> extract = null;
@ -125,7 +127,8 @@ public class SearchInput implements Input {
currentFieldName = parser.currentName();
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.REQUEST)) {
try {
request = WatcherUtils.readSearchRequest(parser, ExecutableSearchInput.DEFAULT_SEARCH_TYPE, context, aggParsers);
request = WatcherUtils.readSearchRequest(parser, ExecutableSearchInput.DEFAULT_SEARCH_TYPE, context,
aggParsers, suggesters);
} catch (ElasticsearchParseException srpe) {
throw new ElasticsearchParseException("could not parse [{}] input for watch [{}]. failed to parse [{}]", srpe, TYPE,
watchId, currentFieldName);

View File

@ -13,6 +13,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.watcher.input.InputFactory;
import org.elasticsearch.watcher.input.simple.ExecutableSimpleInput;
import org.elasticsearch.watcher.support.init.proxy.WatcherClientProxy;
@ -26,16 +27,18 @@ public class SearchInputFactory extends InputFactory<SearchInput, SearchInput.Re
private final WatcherClientProxy client;
private final TimeValue defaultTimeout;
private IndicesQueriesRegistry queryRegistry;
private AggregatorParsers aggParsers;
private final IndicesQueriesRegistry queryRegistry;
private final AggregatorParsers aggParsers;
private final Suggesters suggesters;
@Inject
public SearchInputFactory(Settings settings, WatcherClientProxy client, IndicesQueriesRegistry queryRegistry,
AggregatorParsers aggParsers) {
AggregatorParsers aggParsers, Suggesters suggesters) {
super(Loggers.getLogger(ExecutableSimpleInput.class, settings));
this.client = client;
this.queryRegistry = queryRegistry;
this.aggParsers = aggParsers;
this.suggesters = suggesters;
this.defaultTimeout = settings.getAsTime("watcher.input.search.default_timeout", null);
}
@ -48,7 +51,7 @@ public class SearchInputFactory extends InputFactory<SearchInput, SearchInput.Re
public SearchInput parseInput(String watchId, XContentParser parser) throws IOException {
QueryParseContext context = new QueryParseContext(queryRegistry);
context.reset(parser);
return SearchInput.parse(watchId, parser, context, aggParsers);
return SearchInput.parse(watchId, parser, context, aggParsers, suggesters);
}
@Override

View File

@ -22,6 +22,7 @@ import org.elasticsearch.script.ScriptService.ScriptType;
import org.elasticsearch.script.Template;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.watcher.execution.WatchExecutionContext;
import org.elasticsearch.watcher.watch.Payload;
import org.joda.time.DateTime;
@ -101,7 +102,7 @@ public final class WatcherUtils {
* Reads a new search request instance for the specified parser.
*/
public static SearchRequest readSearchRequest(XContentParser parser, SearchType searchType, QueryParseContext context,
AggregatorParsers aggParsers)
AggregatorParsers aggParsers, Suggesters suggesters)
throws IOException {
IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS;
SearchRequest searchRequest = new SearchRequest();
@ -112,7 +113,7 @@ public final class WatcherUtils {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
if (ParseFieldMatcher.STRICT.match(currentFieldName, BODY_FIELD)) {
searchRequest.source(SearchSourceBuilder.parseSearchSource(parser, context, aggParsers));
searchRequest.source(SearchSourceBuilder.parseSearchSource(parser, context, aggParsers, suggesters));
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (ParseFieldMatcher.STRICT.match(currentFieldName, INDICES_FIELD)) {

View File

@ -281,7 +281,7 @@ public class HttpClient extends AbstractLifecycleComponent<HttpClient> {
if (keyStore == null) {
return null;
}
Path path = env.binFile().getParent().resolve(keyStore);
Path path = env.configFile().resolve(keyStore);
if (Files.notExists(path)) {
return null;
}
@ -304,7 +304,7 @@ public class HttpClient extends AbstractLifecycleComponent<HttpClient> {
// Load TrustStore
KeyStore ks = null;
if (trustStore != null) {
Path trustStorePath = env.binFile().getParent().resolve(trustStore);
Path trustStorePath = env.configFile().resolve(trustStore);
if (Files.exists(trustStorePath)) {
ks = readKeystore(trustStorePath, trustStorePassword);
}

View File

@ -15,6 +15,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.watcher.support.SearchRequestEquivalence;
import org.elasticsearch.watcher.support.WatcherDateTimeUtils;
import org.elasticsearch.watcher.support.WatcherUtils;
@ -93,7 +94,8 @@ public class SearchTransform implements Transform {
return builder;
}
public static SearchTransform parse(String watchId, XContentParser parser, QueryParseContext context, AggregatorParsers aggParsers)
public static SearchTransform parse(String watchId, XContentParser parser, QueryParseContext context,
AggregatorParsers aggParsers, Suggesters suggesters)
throws IOException {
SearchRequest request = null;
TimeValue timeout = null;
@ -106,7 +108,8 @@ public class SearchTransform implements Transform {
currentFieldName = parser.currentName();
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.REQUEST)) {
try {
request = WatcherUtils.readSearchRequest(parser, ExecutableSearchTransform.DEFAULT_SEARCH_TYPE, context, aggParsers);
request = WatcherUtils.readSearchRequest(parser, ExecutableSearchTransform.DEFAULT_SEARCH_TYPE, context,
aggParsers, suggesters);
} catch (ElasticsearchParseException srpe) {
throw new ElasticsearchParseException("could not parse [{}] transform for watch [{}]. failed to parse [{}]", srpe,
TYPE, watchId, currentFieldName);

View File

@ -14,6 +14,7 @@ import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.watcher.support.init.proxy.WatcherClientProxy;
import org.elasticsearch.search.suggest.Suggesters;
import org.elasticsearch.watcher.transform.TransformFactory;
import java.io.IOException;
@ -25,16 +26,18 @@ public class SearchTransformFactory extends TransformFactory<SearchTransform, Se
protected final WatcherClientProxy client;
private final TimeValue defaultTimeout;
private IndicesQueriesRegistry queryRegistry;
private AggregatorParsers aggParsers;
private final IndicesQueriesRegistry queryRegistry;
private final AggregatorParsers aggParsers;
private final Suggesters suggesters;
@Inject
public SearchTransformFactory(Settings settings, WatcherClientProxy client, IndicesQueriesRegistry queryRegistry,
AggregatorParsers aggParsers) {
AggregatorParsers aggParsers, Suggesters suggesters) {
super(Loggers.getLogger(ExecutableSearchTransform.class, settings));
this.client = client;
this.queryRegistry = queryRegistry;
this.aggParsers = aggParsers;
this.suggesters = suggesters;
this.defaultTimeout = settings.getAsTime("watcher.transform.search.default_timeout", null);
}
@ -47,7 +50,7 @@ public class SearchTransformFactory extends TransformFactory<SearchTransform, Se
public SearchTransform parseTransform(String watchId, XContentParser parser) throws IOException {
QueryParseContext context = new QueryParseContext(queryRegistry);
context.reset(parser);
return SearchTransform.parse(watchId, parser, context, aggParsers);
return SearchTransform.parse(watchId, parser, context, aggParsers, suggesters);
}
@Override

Some files were not shown because too many files have changed in this diff Show More