security: file parsing only supports the new format

This commit remove the pre-existing file parsing code and replaces it with the updated
code in the RoleDescriptor class. This unifies the parsing for the files and API for roles.

Closes elastic/elasticsearch#1596

Original commit: elastic/x-pack-elasticsearch@9e0b58fcf1
This commit is contained in:
jaymode 2016-03-15 13:10:40 -04:00
parent 1161edca2c
commit 9e08579d4f
36 changed files with 658 additions and 546 deletions

View File

@ -1,51 +1,77 @@
admin:
cluster: all
cluster:
- all
indices:
'*':
privileges: all
run_as: '*'
- names: '*'
privileges: [ all ]
run_as:
- '*'
# Search and write on both source and destination indices. It should work if you could just search on the source and
# write to the destination but that isn't how shield works.
minimal:
indices:
source:
privileges: search, write, create_index, indices:admin/refresh
dest:
privileges: search, write, create_index, indices:admin/refresh
- names: source
privileges:
- search
- write
- create_index
- indices:admin/refresh
- names: dest
privileges:
- search
- write
- create_index
- indices:admin/refresh
# Read only operations on indices
readonly:
indices:
'*':
privileges: search
- names: '*'
privileges: [ search ]
# Write operations on destination index, none on source index
dest_only:
indices:
dest:
privileges: write
- names: dest
privileges: [ write ]
# Search and write on both source and destination indices with document level security filtering out some docs.
can_not_see_hidden_docs:
indices:
source:
privileges: search, write, create_index, indices:admin/refresh
- names: source
privileges:
- search
- write
- create_index
- indices:admin/refresh
query:
bool:
must_not:
match:
hidden: true
dest:
privileges: search, write, create_index, indices:admin/refresh
- names: dest
privileges:
- search
- write
- create_index
- indices:admin/refresh
# Search and write on both source and destination indices with field level security.
can_not_see_hidden_fields:
indices:
source:
privileges: search, write, create_index, indices:admin/refresh
- names: source
privileges:
- search
- write
- create_index
- indices:admin/refresh
fields:
- foo
- bar
dest:
privileges: search, write, create_index, indices:admin/refresh
- names: dest
privileges:
- search
- write
- create_index
- indices:admin/refresh

View File

@ -1,17 +1,30 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges:
- all
graph_explorer:
cluster: cluster:monitor/health
cluster:
- cluster:monitor/health
indices:
'*':
privileges: graph, indices:data/write/index, indices:admin/refresh, indices:admin/create
- names: '*'
privileges:
- graph
- indices:data/write/index
- indices:admin/refresh
- indices:admin/create
no_graph_explorer:
cluster: cluster:monitor/health
cluster:
- cluster:monitor/health
indices:
'*':
privileges: indices:data/read/search, indices:data/write/index, indices:admin/refresh, indices:admin/create
- names: '*'
privileges:
- indices:data/read/search
- indices:data/write/index
- indices:admin/refresh
- indices:admin/create

View File

@ -1,18 +1,31 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges:
- all
watcher_manager:
cluster: manage_watcher, cluster:monitor/nodes/info, cluster:monitor/health
cluster:
- manage_watcher
- cluster:monitor/nodes/info
- cluster:monitor/health
indices:
'.watcher-history-*': all
run_as: powerless_user, watcher_manager
- names: '.watcher-history-*'
privileges:
- all
run_as:
- powerless_user
- watcher_manager
watcher_monitor:
cluster: monitor_watcher
cluster:
- monitor_watcher
indices:
'.watcher-history-*': read
- names: '.watcher-history-*'
privileges:
- read
crappy_role:
cluster:

View File

@ -436,20 +436,21 @@ public abstract class MarvelIntegTestCase extends ESIntegTestCase {
public static final String ROLES =
"test:\n" + // a user for the test infra.
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/stats, cluster:monitor/state, " +
"cluster:monitor/health, cluster:monitor/stats, cluster:monitor/task, cluster:admin/settings/update, " +
"cluster:admin/repository/delete, cluster:monitor/nodes/liveness, indices:admin/template/get, " +
"indices:admin/template/put, indices:admin/template/delete\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/state', 'cluster:monitor/health', 'cluster:monitor/stats'," +
" 'cluster:admin/settings/update', 'cluster:admin/repository/delete', 'cluster:monitor/nodes/liveness'," +
" 'indices:admin/template/get', 'indices:admin/template/put', 'indices:admin/template/delete'," +
" 'cluster:monitor/task']\n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]\n" +
"\n" +
"admin:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
"transport_client:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
"\n" +
"monitor:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n"
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n"
;

View File

@ -1,22 +1,27 @@
admin:
cluster: all
cluster:
- all
indices:
'*':
privileges: all
- names: '*'
privileges:
- all
# monitoring cluster privileges
# All operations on all indices
power_user:
cluster: monitor
cluster:
- monitor
indices:
'*':
privileges: all
- names: '*'
privileges:
- all
# Read-only operations on indices
user:
indices:
'*':
privileges: read
- names: '*'
privileges:
- read
# Defines the required permissions for transport clients
transport_client:
@ -31,10 +36,27 @@ kibana4:
- cluster:monitor/nodes/info
- cluster:monitor/health
indices:
'*':
privileges: indices:admin/mappings/fields/get, indices:admin/validate/query, indices:data/read/search, indices:data/read/msearch, indices:data/read/field_stats, indices:admin/get
'.kibana':
privileges: indices:admin/exists, indices:admin/mapping/put, indices:admin/mappings/fields/get, indices:admin/refresh, indices:admin/validate/query, indices:data/read/get, indices:data/read/mget, indices:data/read/search, indices:data/write/delete, indices:data/write/index, indices:data/write/update
- names: '*'
privileges:
- indices:admin/mappings/fields/get
- indices:admin/validate/query
- indices:data/read/search
- indices:data/read/msearch
- indices:data/read/field_stats
- indices:admin/get
- names: '.kibana'
privileges:
- indices:admin/exists
- indices:admin/mapping/put
- indices:admin/mappings/fields/get
- indices:admin/refresh
- indices:admin/validate/query
- indices:data/read/get
- indices:data/read/mget
- indices:data/read/search
- indices:data/write/delete
- indices:data/write/index
- indices:data/write/update
# The required permissions for the kibana 4 server
kibana4_server:
@ -42,32 +64,63 @@ kibana4_server:
- cluster:monitor/nodes/info
- cluster:monitor/health
indices:
'.kibana':
privileges: indices:admin/create, indices:admin/exists, indices:admin/mapping/put, indices:admin/mappings/fields/get, indices:admin/refresh, indices:admin/validate/query, indices:data/read/get, indices:data/read/mget, indices:data/read/search, indices:data/write/delete, indices:data/write/index, indices:data/write/update
- names: '.kibana'
privileges:
- indices:admin/create
- indices:admin/exists
- indices:admin/mapping/put
- indices:admin/mappings/fields/get
- indices:admin/refresh
- indices:admin/validate/query
- indices:data/read/get
- indices:data/read/mget
- indices:data/read/search
- indices:data/write/delete
- indices:data/write/index
- indices:data/write/update
# The required role for logstash users
logstash:
cluster: indices:admin/template/get, indices:admin/template/put
cluster:
- indices:admin/template/get
- indices:admin/template/put
indices:
'logstash-*':
privileges: indices:data/write/bulk, indices:data/write/delete, indices:data/write/update, indices:data/read/search, indices:data/read/scroll, create_index
- names: 'logstash-*'
privileges:
- indices:data/write/bulk
- indices:data/write/delete
- indices:data/write/update
- indices:data/read/search
- indices:data/read/scroll
- create_index
# Monitoring user role. Assign to monitoring users.
monitoring_user:
indices:
'.monitoring-*':
privileges: read
'.kibana':
privileges: indices:admin/exists, indices:admin/mappings/fields/get, indices:admin/validate/query, indices:data/read/get, indices:data/read/mget, indices:data/read/search
- names: '.monitoring-*'
privileges:
- read
- names: '.kibana'
privileges:
- indices:admin/exists
- indices:admin/mappings/fields/get
- indices:admin/validate/query
- indices:data/read/get
- indices:data/read/mget
- indices:data/read/search
# Monitoring remote agent role. Assign to the agent user on the remote monitoring cluster
# to which the monitoring agent will export all its data
remote_monitoring_agent:
cluster: indices:admin/template/put, indices:admin/template/get
cluster:
- indices:admin/template/put
- indices:admin/template/get
indices:
'.monitoring-*':
privileges: all
- names: '.monitoring-*'
privileges:
- all
# Allows all operations required to manage ingest pipelines
ingest_admin:
cluster: manage_pipeline
cluster:
- manage_pipeline

View File

@ -10,15 +10,19 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.ValidationException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.shield.support.Validation;
import org.elasticsearch.xpack.common.xcontent.XContentUtils;
import java.io.IOException;
@ -132,48 +136,69 @@ public class RoleDescriptor implements ToXContent {
out.writeStringArray(descriptor.runAs);
}
public static RoleDescriptor parse(String name, BytesReference source) throws Exception {
public static RoleDescriptor parse(String name, BytesReference source) throws IOException {
assert name != null;
try (XContentParser parser = XContentHelper.createParser(source)) {
XContentParser.Token token = parser.nextToken(); // advancing to the START_OBJECT token
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse role [{}]. expected an object but found [{}] instead", name, token);
}
String currentFieldName = null;
IndicesPrivileges[] indicesPrivileges = null;
String[] clusterPrivileges = null;
String[] runAsUsers = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INDICES)) {
indicesPrivileges = parseIndices(name, parser);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.RUN_AS)) {
runAsUsers = XContentUtils.readStringArray(parser, true);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLUSTER)) {
clusterPrivileges = XContentUtils.readStringArray(parser, true);
} else {
throw new ElasticsearchParseException("failed to parse role [{}]. unexpected field [{}]", name, currentFieldName);
}
}
return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAsUsers);
return parse(name, parser);
}
}
private static RoleDescriptor.IndicesPrivileges[] parseIndices(String roleName, XContentParser parser) throws Exception {
public static RoleDescriptor parse(String name, XContentParser parser) throws IOException {
// validate name
Validation.Error validationError = Validation.Roles.validateRoleName(name);
if (validationError != null) {
ValidationException ve = new ValidationException();
ve.addValidationError(validationError.toString());
throw ve;
}
// advance to the START_OBJECT token if needed
XContentParser.Token token = parser.currentToken() == null ? parser.nextToken() : parser.currentToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse role [{}]. expected an object but found [{}] instead", name, token);
}
String currentFieldName = null;
IndicesPrivileges[] indicesPrivileges = null;
String[] clusterPrivileges = null;
String[] runAsUsers = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INDICES)) {
indicesPrivileges = parseIndices(name, parser);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.RUN_AS)) {
runAsUsers = readStringArray(name, parser, true);
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLUSTER)) {
clusterPrivileges = readStringArray(name, parser, true);
} else {
throw new ElasticsearchParseException("failed to parse role [{}]. unexpected field [{}]", name, currentFieldName);
}
}
return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAsUsers);
}
private static String[] readStringArray(String roleName, XContentParser parser, boolean allowNull) throws IOException {
try {
return XContentUtils.readStringArray(parser, allowNull);
} catch (ElasticsearchParseException e) {
// re-wrap in order to add the role name
throw new ElasticsearchParseException("failed to parse role [{}]", e, roleName);
}
}
private static RoleDescriptor.IndicesPrivileges[] parseIndices(String roleName, XContentParser parser) throws IOException {
if (parser.currentToken() != XContentParser.Token.START_ARRAY) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] value " +
"to be an array, but found [{}] instead", roleName, parser.currentName(), parser.currentToken());
}
List<RoleDescriptor.IndicesPrivileges> privileges = new ArrayList<>();
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
privileges.add(parseIndex(roleName, parser));
}
return privileges.toArray(new IndicesPrivileges[privileges.size()]);
}
private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XContentParser parser) throws Exception {
private static RoleDescriptor.IndicesPrivileges parseIndex(String roleName, XContentParser parser) throws IOException {
XContentParser.Token token = parser.currentToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] value to " +
@ -191,7 +216,7 @@ public class RoleDescriptor implements ToXContent {
if (token == XContentParser.Token.VALUE_STRING) {
names = new String[] { parser.text() };
} else if (token == XContentParser.Token.START_ARRAY) {
names = XContentUtils.readStringArray(parser, false);
names = readStringArray(roleName, parser, false);
if (names.length == 0) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. [{}] cannot be an empty " +
"array", roleName, currentFieldName);
@ -201,15 +226,21 @@ public class RoleDescriptor implements ToXContent {
"value to be a string or an array of strings, but found [{}] instead", roleName, currentFieldName, token);
}
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.QUERY)) {
query = parser.textOrNull();
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder builder = JsonXContent.contentBuilder();
XContentHelper.copyCurrentStructure(builder.generator(), parser);
query = builder.string();
} else {
query = parser.textOrNull();
}
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.PRIVILEGES)) {
privileges = XContentUtils.readStringArray(parser, false);
privileges = readStringArray(roleName, parser, true);
if (names.length == 0) {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. [{}] cannot be an empty " +
"array", roleName, currentFieldName);
}
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.FIELDS)) {
fields = XContentUtils.readStringArray(parser, true);
fields = readStringArray(roleName, parser, true);
} else {
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. unexpected field [{}]",
roleName, currentFieldName);

View File

@ -68,7 +68,11 @@ public class Role extends GlobalPermission {
private Builder(RoleDescriptor rd) {
this.name = rd.getName();
this.cluster(ClusterPrivilege.get((new Privilege.Name(rd.getClusterPrivileges()))));
if (rd.getClusterPrivileges().length == 0) {
cluster = ClusterPermission.Core.NONE;
} else {
this.cluster(ClusterPrivilege.get((new Privilege.Name(rd.getClusterPrivileges()))));
}
for (RoleDescriptor.IndicesPrivileges iGroup : rd.getIndicesPrivileges()) {
this.add(iGroup.getFields() == null ? null : Arrays.asList(iGroup.getFields()),
iGroup.getQuery(),

View File

@ -7,28 +7,20 @@ package org.elasticsearch.shield.authz.store;
import com.fasterxml.jackson.dataformat.yaml.snakeyaml.error.YAMLException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.common.xcontent.yaml.YamlXContent;
import org.elasticsearch.env.Environment;
import org.elasticsearch.shield.Shield;
import org.elasticsearch.shield.SystemUser;
import org.elasticsearch.shield.XPackUser;
import org.elasticsearch.shield.authc.support.RefreshListener;
import org.elasticsearch.shield.authz.RoleDescriptor;
import org.elasticsearch.shield.authz.permission.Role;
import org.elasticsearch.shield.authz.privilege.ClusterPrivilege;
import org.elasticsearch.shield.authz.privilege.GeneralPrivilege;
import org.elasticsearch.shield.authz.privilege.IndexPrivilege;
import org.elasticsearch.shield.authz.privilege.Privilege;
import org.elasticsearch.shield.support.NoOpLogger;
import org.elasticsearch.shield.support.Validation;
import org.elasticsearch.watcher.FileChangesListener;
@ -41,10 +33,7 @@ import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@ -59,7 +48,6 @@ import static java.util.Collections.unmodifiableMap;
*/
public class FileRolesStore extends AbstractLifecycleComponent<RolesStore> implements RolesStore {
private static final Pattern COMMA_DELIM = Pattern.compile("\\s*,\\s*");
private static final Pattern IN_SEGMENT_LINE = Pattern.compile("^\\s+.+");
private static final Pattern SKIP_LINE = Pattern.compile("(^#.*|^\\s*)");
@ -174,226 +162,40 @@ public class FileRolesStore extends AbstractLifecycleComponent<RolesStore> imple
return null;
}
Role.Builder role = Role.builder(roleName);
if (resolvePermissions == false) {
return role.build();
return Role.builder(roleName).build();
}
token = parser.nextToken();
if (token == XContentParser.Token.START_OBJECT) {
String currentFieldName = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("cluster".equals(currentFieldName)) {
Privilege.Name name = null;
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] names = COMMA_DELIM.split(namesStr);
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_ARRAY) {
Set<String> names = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
}
}
if (!names.isEmpty()) {
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.VALUE_NULL) {
continue;
} else {
logger.error("invalid role definition [{}] in roles file [{}]. [cluster] field value can either " +
"be a string or a list of strings, but [{}] was found instead. skipping role...",
roleName, path.toAbsolutePath(), token);
return null;
}
if (name != null) {
try {
role.cluster(ClusterPrivilege.get(name));
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not resolve cluster " +
"privileges [{}]. skipping role...", roleName, path.toAbsolutePath(), name);
return null;
}
}
} else if ("indices".equals(currentFieldName)) {
if (token == XContentParser.Token.START_OBJECT) {
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (Strings.hasLength(currentFieldName)) {
String[] indices = COMMA_DELIM.split(currentFieldName);
Privilege.Name name = null;
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] names = COMMA_DELIM.split(parser.text());
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_ARRAY) {
Set<String> names = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
} else {
logger.error("invalid role definition [{}] in roles file [{}]. could not parse " +
"[{}] as index privilege. privilege names must be strings. skipping " +
"role...", roleName, path.toAbsolutePath(), token);
return null;
}
}
if (!names.isEmpty()) {
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_OBJECT) {
List<String> fields = null;
BytesReference query = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("fields".equals(currentFieldName)) {
if (token == XContentParser.Token.START_ARRAY) {
fields = (List) parser.list();
} else if (token.isValue()) {
String field = parser.text();
if (field.trim().isEmpty()) {
// The yaml parser doesn't emit null token if the key is empty...
fields = Collections.emptyList();
} else {
fields = Collections.singletonList(field);
}
} else if (token == XContentParser.Token.VALUE_NULL) {
fields = Collections.emptyList();
}
} else if ("query".equals(currentFieldName)) {
if (token == XContentParser.Token.START_OBJECT) {
XContentBuilder builder = JsonXContent.contentBuilder();
XContentHelper.copyCurrentStructure(builder.generator(), parser);
query = builder.bytes();
} else if (token == XContentParser.Token.VALUE_STRING) {
query = new BytesArray(parser.text());
}
} else if ("privileges".equals(currentFieldName)) {
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] names = COMMA_DELIM.split(parser.text());
name = new Privilege.Name(names);
}
} else if (token == XContentParser.Token.START_ARRAY) {
Set<String> names = new HashSet<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
} else {
logger.error("invalid role definition [{}] in roles file [{}]. " +
"could not parse [{}] as index privilege. privilege " +
"names must be strings. skipping role...", roleName,
path.toAbsolutePath(), token);
return null;
}
}
if (!names.isEmpty()) {
name = new Privilege.Name(names);
}
}
}
}
if (name != null) {
if ((query != null || (fields != null && fields.isEmpty() == false)) &&
Shield.flsDlsEnabled(settings) == false) {
logger.error("invalid role definition [{}] in roles file [{}]. " +
"document and field level security is not enabled. " +
"set [{}] to [true] in the configuration file. skipping role...",
roleName, path.toAbsolutePath(),
XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE));
return null;
}
RoleDescriptor descriptor = RoleDescriptor.parse(roleName, parser);
try {
role.add(fields, query, IndexPrivilege.get(name), indices);
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not " +
"resolve indices privileges [{}]. skipping role...", roleName,
path.toAbsolutePath(), name);
return null;
}
}
continue;
} else if (token == XContentParser.Token.VALUE_NULL) {
continue;
} else {
logger.error("invalid role definition [{}] in roles file [{}]. " +
"could not parse [{}] as index privileges. privilege lists must either " +
"be a comma delimited string or an array of strings. skipping role...", roleName,
path.toAbsolutePath(), token);
return null;
}
if (name != null) {
try {
role.add(IndexPrivilege.get(name), indices);
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not resolve " +
"indices privileges [{}]. skipping role...", roleName, path.toAbsolutePath(),
name);
return null;
}
}
}
}
} else {
logger.error("invalid role definition [{}] in roles file [{}]. [indices] field value must be an array" +
" of indices-privileges mappings defined as a string" +
" in the form <comma-separated list of index name patterns>::<comma-separated list of" +
" privileges> , but [{}] was found instead. skipping role...",
roleName, path.toAbsolutePath(), token);
return null;
}
} else if ("run_as".equals(currentFieldName)) {
Set<String> names = new HashSet<>();
if (token == XContentParser.Token.VALUE_STRING) {
String namesStr = parser.text().trim();
if (Strings.hasLength(namesStr)) {
String[] namesArr = COMMA_DELIM.split(namesStr);
names.addAll(Arrays.asList(namesArr));
}
} else if (token == XContentParser.Token.START_ARRAY) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
if (token == XContentParser.Token.VALUE_STRING) {
names.add(parser.text());
}
}
} else {
logger.error("invalid role definition [{}] in roles file [{}]. [run_as] field value can either " +
"be a string or a list of strings, but [{}] was found instead. skipping role...",
roleName, path.toAbsolutePath(), token);
return null;
}
if (!names.isEmpty()) {
Privilege.Name name = new Privilege.Name(names);
try {
role.runAs(new GeneralPrivilege(new Privilege.Name(names),
names.toArray(new String[names.size()])));
} catch (IllegalArgumentException e) {
logger.error("invalid role definition [{}] in roles file [{}]. could not resolve run_as " +
"privileges [{}]. skipping role...", roleName, path.toAbsolutePath(), name);
return null;
}
}
} else {
logger.warn("unknown field [{}] found in role definition [{}] in roles file [{}]", currentFieldName,
roleName, path.toAbsolutePath());
// first check if FLS/DLS is enabled on the role...
for (RoleDescriptor.IndicesPrivileges privilege : descriptor.getIndicesPrivileges()) {
if ((privilege.getQuery() != null || privilege.getFields() != null)
&& Shield.flsDlsEnabled(settings) == false) {
logger.error("invalid role definition [{}] in roles file [{}]. document and field level security is not " +
"enabled. set [{}] to [true] in the configuration file. skipping role...", roleName, path
.toAbsolutePath(), XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE));
return null;
}
}
return role.build();
return Role.builder(descriptor).build();
} else {
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath());
return null;
}
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath());
}
}
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", roleName, path.toAbsolutePath());
} catch (ElasticsearchParseException e) {
assert roleName != null;
if (logger.isDebugEnabled()) {
logger.debug("parsing exception for role [{}]", e, roleName);
} else {
logger.error(e.getMessage() + ". skipping role...");
}
} catch (YAMLException | IOException e) {
if (roleName != null) {
logger.error("invalid role definition [{}] in roles file [{}]. skipping role...", e, roleName, path);

View File

@ -28,14 +28,15 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase {
public static final String ROLES =
"role_a:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
"\n" +
"role_b:\n" +
" cluster: monitor\n" +
" cluster: [ monitor ]\n" +
"\n" +
"role_c:\n" +
" indices:\n" +
" 'someindex': all\n";
" - names: 'someindex'\n" +
" privileges: [ all ]\n";
public static final String USERS =
"user_a:" + USERS_PASSWD_HASHED + "\n" +

View File

@ -52,25 +52,25 @@ public class DocumentAndFieldLevelSecurityTests extends ShieldIntegTestCase {
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field1 ]\n" +
" query: '{\"term\" : {\"field1\" : \"value1\"}}'\n" +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field2\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field2 ]\n" +
" query: '{\"term\" : {\"field2\" : \"value2\"}}'\n" +
"role3:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field1 ]\n" +
" query: '{\"term\" : {\"field2\" : \"value2\"}}'\n";
}

View File

@ -60,10 +60,10 @@ public class DocumentLevelSecurityRandomTests extends ShieldIntegTestCase {
builder.append('\n');
for (int i = 1; i <= numberOfRoles; i++) {
builder.append("role").append(i).append(":\n");
builder.append(" cluster: all\n");
builder.append(" cluster: [ all ]\n");
builder.append(" indices:\n");
builder.append(" '*':\n");
builder.append(" privileges: ALL\n");
builder.append(" - names: '*'\n");
builder.append(" privileges: [ ALL ]\n");
builder.append(" query: \n");
builder.append(" term: \n");
builder.append(" field1: value").append(i).append('\n');

View File

@ -73,18 +73,18 @@ public class DocumentLevelSecurityTests extends ShieldIntegTestCase {
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" query: \n" +
" term: \n" +
" field1: value1\n" +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" query: '{\"term\" : {\"field2\" : \"value2\"}}'"; // <-- query defined as json in a string
}

View File

@ -80,30 +80,30 @@ public class FieldLevelSecurityRandomTests extends ShieldIntegTestCase {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields:\n" + roleFields.toString() +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +roleFields.toString() +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" test:\n" +
" privileges: ALL\n" +
" - names: test\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field1\n" +
"role3:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" test:\n" +
" privileges: ALL\n" +
" - names: test\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field2\n" +
"role4:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" test:\n" +
" privileges: ALL\n" +
" - names: test\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field3\n";
}

View File

@ -80,41 +80,42 @@ public class FieldLevelSecurityTests extends ShieldIntegTestCase {
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field1 ]\n" +
"role2:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: field2\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ field2 ]\n" +
"role3:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: \n" +
" - field1\n" +
" - field2\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields:\n" +
" - field1\n" +
" - field2\n" +
"role4:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields:\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: []\n" +
"role5:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*': ALL\n" +
" - names: '*'\n" +
" privileges: [ALL]\n" +
"role6:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*':\n" +
" privileges: ALL\n" +
" fields: 'field*'\n";
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
" fields: [ 'field*' ]\n";
}
@Override

View File

@ -29,56 +29,71 @@ public class IndexPrivilegeTests extends AbstractPrivilegeTestCase {
public static final String ROLES =
"all_cluster_role:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
"all_indices_role:\n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]\n" +
"all_a_role:\n" +
" indices:\n" +
" 'a': all\n" +
" - names: 'a'\n" +
" privileges: [ all ]\n" +
"read_a_role:\n" +
" indices:\n" +
" 'a': read\n" +
" - names: 'a'\n" +
" privileges: [ read ]\n" +
"write_a_role:\n" +
" indices:\n" +
" 'a': write\n" +
" - names: 'a'\n" +
" privileges: [ write ]\n" +
"read_ab_role:\n" +
" indices:\n" +
" 'a': read\n" +
" 'b': read\n" +
" - names: [ 'a', 'b' ]\n" +
" privileges: [ read ]\n" +
"get_b_role:\n" +
" indices:\n" +
" 'b': get\n" +
" - names: 'b'\n" +
" privileges: [ get ]\n" +
"search_b_role:\n" +
" indices:\n" +
" 'b': search\n" +
" - names: 'b'\n" +
" privileges: [ search ]\n" +
"all_regex_ab_role:\n" +
" indices:\n" +
" '/a|b/': all\n" +
" - names: '/a|b/'\n" +
" privileges: [ all ]\n" +
"manage_starts_with_a_role:\n" +
" indices:\n" +
" 'a*': manage\n" +
" - names: 'a*'\n" +
" privileges: [ manage ]\n" +
"data_access_all_role:\n" +
" indices:\n" +
" '*': data_access\n" +
" - names: '*'\n" +
" privileges: [ data_access ]\n" +
"create_c_role:\n" +
" indices:\n" +
" 'c': create_index\n" +
" - names: 'c'\n" +
" privileges: [ create_index ]\n" +
"monitor_b_role:\n" +
" indices:\n" +
" 'b': monitor\n" +
" - names: 'b'\n" +
" privileges: [ monitor ]\n" +
"crud_a_role:\n" +
" indices:\n" +
" 'a': crud\n" +
" - names: 'a'\n" +
" privileges: [ crud ]\n" +
"delete_b_role:\n" +
" indices:\n" +
" 'b': delete\n" +
" - names: 'b'\n" +
" privileges: [ delete ]\n" +
"index_a_role:\n" +
" indices:\n" +
" 'a': index\n" +
" - names: 'a'\n" +
" privileges: [ index ]\n" +
"search_a_role:\n" +
" indices:\n" +
" 'a': search\n" +
" - names: 'a'\n" +
" privileges: [ search ]\n" +
"\n";
public static final String USERS =

View File

@ -44,17 +44,17 @@ public class IndicesPermissionsWithAliasesWildcardsAndRegexsTests extends Shield
protected String configRoles() {
return super.configRoles() +
"\nrole1:\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" 't*':\n" +
" privileges: ALL\n" +
" fields: field1\n" +
" 'my_alias':\n" +
" privileges: ALL\n" +
" fields: field2\n" +
" '/an_.*/':\n" +
" privileges: ALL\n" +
" fields: field3\n";
" - names: 't*'\n" +
" privileges: [ALL]\n" +
" fields: [ field1 ]\n" +
" - names: 'my_alias'\n" +
" privileges: [ALL]\n" +
" fields: [field2]\n" +
" - names: '/an_.*/'\n" +
" privileges: [ALL]\n" +
" fields: [field3]\n";
}
@Override

View File

@ -56,20 +56,26 @@ import static org.hamcrest.Matchers.notNullValue;
public class LicensingTests extends ShieldIntegTestCase {
public static final String ROLES =
ShieldSettingsSource.DEFAULT_ROLE + ":\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*': manage\n" +
" '/.*/': write\n" +
" 'test': read\n" +
" 'test1': read\n" +
" - names: '*'\n" +
" privileges: [manage]\n" +
" - names: '/.*/'\n" +
" privileges: [write]\n" +
" - names: 'test'\n" +
" privileges: [read]\n" +
" - names: 'test1'\n" +
" privileges: [read]\n" +
"\n" +
"role_a:\n" +
" indices:\n" +
" 'a': all\n" +
" - names: 'a'\n" +
" privileges: [all]\n" +
"\n" +
"role_b:\n" +
" indices:\n" +
" 'b': all\n";
" - names: 'b'\n" +
" privileges: [all]\n";
public static final String USERS =
ShieldSettingsSource.CONFIG_STANDARD_USER +

View File

@ -34,20 +34,26 @@ public class MultipleIndicesPermissionsTests extends ShieldIntegTestCase {
@Override
protected String configRoles() {
return ShieldSettingsSource.DEFAULT_ROLE + ":\n" +
" cluster: all\n" +
" cluster: [ all ]\n" +
" indices:\n" +
" '*': manage\n" +
" '/.*/': write\n" +
" 'test': read\n" +
" 'test1': read\n" +
" - names: '*'\n" +
" privileges: [manage]\n" +
" - names: '/.*/'\n" +
" privileges: [write]\n" +
" - names: 'test'\n" +
" privileges: [read]\n" +
" - names: 'test1'\n" +
" privileges: [read]\n" +
"\n" +
"role_a:\n" +
" indices:\n" +
" 'a': all\n" +
" - names: 'a'\n" +
" privileges: [all]\n" +
"\n" +
"role_b:\n" +
" indices:\n" +
" 'b': all\n";
" - names: 'b'\n" +
" privileges: [all]\n";
}
@Override

View File

@ -38,9 +38,10 @@ public class PermissionPrecedenceTests extends ShieldIntegTestCase {
@Override
protected String configRoles() {
return "admin:\n" +
" cluster: all\n" +
" cluster: [ all ] \n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]" +
"\n" +
"transport_client:\n" +
" cluster:\n" +
@ -49,7 +50,8 @@ public class PermissionPrecedenceTests extends ShieldIntegTestCase {
"\n" +
"user:\n" +
" indices:\n" +
" 'test_*': all\n";
" - names: 'test_*'\n" +
" privileges: [ all ]";
}
@Override

View File

@ -38,15 +38,18 @@ public class SearchGetAndSuggestPermissionsTests extends ShieldIntegTestCase {
"\n" +
"search_role:\n" +
" indices:\n" +
" 'a': search\n" +
" - names: 'a'\n" +
" privileges: [ search ]\n" +
"\n" +
"get_role:\n" +
" indices:\n" +
" 'a': get\n" +
" - names: 'a'\n" +
" privileges: [ get ]\n" +
"\n" +
"suggest_role:\n" +
" indices:\n" +
" 'a': suggest\n";
" - names: 'a'\n" +
" privileges: [ suggest ]\n";
}
@Override

View File

@ -58,7 +58,8 @@ public class ShieldClearScrollTests extends ShieldIntegTestCase {
" - cluster:admin/indices/scroll/clear_all \n" +
"denied_role:\n" +
" indices:\n" +
" '*': ALL\n";
" - names: '*'" +
" privileges: [ALL]\n";
}
@Before

View File

@ -93,21 +93,25 @@ abstract public class AbstractAdLdapRealmTestCase extends ShieldIntegTestCase {
return super.configRoles() +
"\n" +
"Avengers:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" 'avengers': ALL\n" +
" - names: 'avengers'\n" +
" privileges: [ all ]\n" +
"SHIELD:\n" +
" cluster: NONE\n" +
" indices:\n " +
" '" + SHIELD_INDEX + "': ALL\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" - names: '" + SHIELD_INDEX + "'\n" +
" privileges: [ all ]\n" +
"Gods:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" '" + ASGARDIAN_INDEX + "': ALL\n" +
" - names: '" + ASGARDIAN_INDEX + "'\n" +
" privileges: [ all ]\n" +
"Philanthropists:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" '" + PHILANTHROPISTS_INDEX + "': ALL\n";
" - names: '" + PHILANTHROPISTS_INDEX + "'\n" +
" privileges: [ all ]\n";
}
protected void assertAccessAllowed(String user, String index) throws IOException {

View File

@ -13,7 +13,7 @@ import java.io.IOException;
* This tests the group to role mappings from LDAP sources provided by the super class - available from super.realmConfig.
* The super class will provide appropriate group mappings via configGroupMappings()
*/
@Network
//@Network
public class GroupMappingTests extends AbstractAdLdapRealmTestCase {
public void testAuthcAuthz() throws IOException {
String avenger = realmConfig.loginWithCommonName ? "Natasha Romanoff" : "blackwidow";

View File

@ -19,9 +19,10 @@ public class MultiGroupMappingTests extends AbstractAdLdapRealmTestCase {
return super.configRoles() +
"\n" +
"MarvelCharacters:\n" +
" cluster: NONE\n" +
" cluster: [ NONE ]\n" +
" indices:\n" +
" 'marvel_comics': ALL\n";
" - names: 'marvel_comics'\n" +
" privileges: [ all ]\n";
}
@Override

View File

@ -50,7 +50,8 @@ public class AnonymousUserTests extends ShieldIntegTestCase {
return super.configRoles() + "\n" +
"anonymous:\n" +
" indices:\n" +
" '*': READ";
" - names: '*'" +
" privileges: [ READ ]\n";
}
public void testAnonymousViaHttp() throws Exception {

View File

@ -36,9 +36,9 @@ public class RunAsIntegTests extends ShieldIntegTestCase {
static final String TRANSPORT_CLIENT_USER = "transport_user";
static final String ROLES =
"transport_client:\n" +
" cluster: cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/liveness' ]\n" +
"run_as_role:\n" +
" run_as: " + ShieldSettingsSource.DEFAULT_USER_NAME + ",idontexist\n";
" run_as: [ '" + ShieldSettingsSource.DEFAULT_USER_NAME + "', 'idontexist' ]\n";
@Override
public Settings nodeSettings(int nodeOrdinal) {

View File

@ -40,7 +40,8 @@ public class AnalyzeTests extends ShieldIntegTestCase {
//role that has analyze indices privileges only
"analyze_indices:\n" +
" indices:\n" +
" 'test_*': indices:admin/analyze\n" +
" - names: 'test_*'\n" +
" privileges: [ 'indices:admin/analyze' ]\n" +
"analyze_cluster:\n" +
" cluster:\n" +
" - cluster:admin/analyze\n";

View File

@ -58,25 +58,32 @@ public class IndexAliasesTests extends ShieldIntegTestCase {
//role that has create index only privileges
"create_only:\n" +
" indices:\n" +
" '*': create_index\n" +
" - names: '*'\n" +
" privileges: [ create_index ]\n" +
//role that has create index and managa aliases on test_*, not enough to manage aliases outside of test_* namespace
"create_test_aliases_test:\n" +
" indices:\n" +
" 'test_*': create_index,manage_aliases\n" +
" - names: 'test_*'\n" +
" privileges: [ create_index, manage_aliases ]\n" +
//role that has create index on test_* and manage aliases on alias_*, can't create aliases pointing to test_* though
"create_test_aliases_alias:\n" +
" indices:\n" +
" 'test_*': create_index\n" +
" 'alias_*': manage_aliases\n" +
" - names: 'test_*'\n" +
" privileges: [ create_index ]\n" +
" - names: 'alias_*'\n" +
" privileges: [ manage_aliases ]\n" +
//role that has create index on test_* and manage_aliases on both alias_* and test_*
"create_test_aliases_test_alias:\n" +
" indices:\n" +
" 'test_*': create_index\n" +
" 'alias_*,test_*': manage_aliases\n" +
" - names: 'test_*'\n" +
" privileges: [ create_index ]\n" +
" - names: [ 'alias_*', 'test_*' ]\n" +
" privileges: [ manage_aliases ]\n" +
//role that has manage_aliases only on both test_* and alias_*
"aliases_only:\n" +
" indices:\n" +
" 'alias_*,test_*': manage_aliases\n";
" - names: [ 'alias_*', 'test_*']\n" +
" privileges: [ manage_aliases ]\n";
}
@Before

View File

@ -30,10 +30,12 @@ public class IndicesAndAliasesResolverIntegrationTests extends ShieldIntegTestCa
@Override
protected String configRoles() {
return ShieldSettingsSource.DEFAULT_ROLE + ":\n" +
" cluster: ALL\n" +
" cluster: [ ALL ]\n" +
" indices:\n" +
" '*': manage,write\n" +
" '/test.*/': read\n";
" - names: '*'\n" +
" privileges: [ manage, write ]\n" +
" - names: '/test.*/'\n" +
" privileges: [ read ]\n";
}
public void testSearchForAll() {

View File

@ -57,7 +57,7 @@ public class FileRolesStoreTests extends ESTestCase {
.put(XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE), true)
.build());
assertThat(roles, notNullValue());
assertThat(roles.size(), is(10));
assertThat(roles.size(), is(9));
Role role = roles.get("role1");
assertThat(role, notNullValue());
@ -121,12 +121,7 @@ public class FileRolesStoreTests extends ESTestCase {
assertThat(group.privilege().isAlias(IndexPrivilege.union(IndexPrivilege.READ, IndexPrivilege.WRITE)), is(true));
role = roles.get("role4");
assertThat(role, notNullValue());
assertThat(role.name(), equalTo("role4"));
assertThat(role.cluster(), notNullValue());
assertThat(role.cluster(), is(ClusterPermission.Core.NONE));
assertThat(role.indices(), is(IndicesPermission.Core.NONE));
assertThat(role.runAs(), is(RunAsPermission.Core.NONE));
assertThat(role, nullValue());
role = roles.get("role_run_as");
assertThat(role, notNullValue());
@ -214,7 +209,7 @@ public class FileRolesStoreTests extends ESTestCase {
.put(XPackPlugin.featureEnabledSetting(Shield.DLS_FLS_FEATURE), false)
.build());
assertThat(roles, notNullValue());
assertThat(roles.size(), is(7));
assertThat(roles.size(), is(6));
assertThat(roles.get("role_fields"), nullValue());
assertThat(roles.get("role_query"), nullValue());
assertThat(roles.get("role_query_fields"), nullValue());
@ -236,16 +231,18 @@ public class FileRolesStoreTests extends ESTestCase {
Path path = getDataPath("default_roles.yml");
Map<String, Role> roles = FileRolesStore.parseFile(path, logger, Settings.EMPTY);
assertThat(roles, notNullValue());
assertThat(roles.size(), is(8));
assertThat(roles.size(), is(10));
assertThat(roles, hasKey("admin"));
assertThat(roles, hasKey("power_user"));
assertThat(roles, hasKey("user"));
assertThat(roles, hasKey("kibana3"));
assertThat(roles, hasKey("kibana4"));
assertThat(roles, hasKey("kibana4_server"));
assertThat(roles, hasKey("logstash"));
assertThat(roles, hasKey("monitoring_user"));
assertThat(roles, hasKey("monitoring_agent"));
assertThat(roles, hasKey("remote_monitoring_agent"));
assertThat(roles, hasKey("ingest_admin"));
assertThat(roles, hasKey("transport_client"));
}
public void testAutoReload() throws Exception {
@ -288,7 +285,8 @@ public class FileRolesStoreTests extends ESTestCase {
writer.newLine();
writer.newLine();
writer.append("role5:").append(System.lineSeparator());
writer.append(" cluster: 'MONITOR'");
writer.append(" cluster:").append(System.lineSeparator());
writer.append(" - 'MONITOR'");
}
if (!latch.await(5, TimeUnit.SECONDS)) {
@ -327,24 +325,22 @@ public class FileRolesStoreTests extends ESTestCase {
assertThat(role.name(), equalTo("valid_role"));
List<CapturingLogger.Msg> entries = logger.output(CapturingLogger.Level.ERROR);
assertThat(entries, hasSize(5));
assertThat(entries, hasSize(6));
assertThat(entries.get(0).text, startsWith("invalid role definition [$dlk39] in roles file [" + path.toAbsolutePath() +
"]. invalid role name"));
assertThat(entries.get(1).text, startsWith("invalid role definition [role1] in roles file [" + path.toAbsolutePath() + "]"));
assertThat(entries.get(2).text, startsWith("invalid role definition [role2] in roles file [" + path.toAbsolutePath() +
"]. could not resolve cluster privileges [blkjdlkd]"));
assertThat(entries.get(3).text, startsWith("invalid role definition [role3] in roles file [" + path.toAbsolutePath() +
"]. [indices] field value must be an array"));
assertThat(entries.get(4).text, startsWith("invalid role definition [role4] in roles file [" + path.toAbsolutePath() +
"]. could not resolve indices privileges [al;kjdlkj;lkj]"));
assertThat(entries.get(2).text, startsWith("failed to parse role [role2]"));
assertThat(entries.get(3).text, startsWith("failed to parse role [role3]"));
assertThat(entries.get(4).text, startsWith("failed to parse role [role4]"));
assertThat(entries.get(5).text, startsWith("failed to parse indices privileges for role [role5]"));
}
public void testThatRoleNamesDoesNotResolvePermissions() throws Exception {
Path path = getDataPath("invalid_roles.yml");
CapturingLogger logger = new CapturingLogger(CapturingLogger.Level.ERROR);
Set<String> roleNames = FileRolesStore.parseFileForRoleNames(path, logger);
assertThat(roleNames.size(), is(5));
assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4"));
assertThat(roleNames.size(), is(6));
assertThat(roleNames, containsInAnyOrder("valid_role", "role1", "role2", "role3", "role4", "role5"));
List<CapturingLogger.Msg> entries = logger.output(CapturingLogger.Level.ERROR);
assertThat(entries, hasSize(1));

View File

@ -67,9 +67,10 @@ public class ShieldSettingsSource extends ClusterDiscoveryConfiguration.UnicastZ
public static final String CONFIG_ROLE_ALLOW_ALL =
DEFAULT_ROLE + ":\n" +
" cluster: ALL\n" +
" cluster: [ ALL ]\n" +
" indices:\n" +
" '*': ALL\n" +
" - names: '*'\n" +
" privileges: [ ALL ]\n" +
DEFAULT_TRANSPORT_CLIENT_ROLE + ":\n" +
" cluster:\n" +
" - cluster:monitor/nodes/info\n" +

View File

@ -1,48 +1,126 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges:
- all
# monitoring cluster privileges
# All operations on all indices
power_user:
cluster: monitor
cluster:
- monitor
indices:
'*': all
- names: '*'
privileges:
- all
# Only operations on indices
# Read-only operations on indices
user:
indices:
'*': read
- names: '*'
privileges:
- read
# The required role for kibana 3 users
kibana3:
cluster: cluster:monitor/nodes/info
indices:
'*': indices:data/read/search, indices:data/read/get, indices:admin/get
'kibana-int': indices:data/read/search, indices:data/read/get, indices:data/write/delete, indices:data/write/index, create_index
# Defines the required permissions for transport clients
transport_client:
cluster:
- cluster:monitor/nodes/liveness
#uncomment the following for sniffing
#- cluster:monitor/state
# The required role for kibana 4 users
# The required permissions for kibana 4 users.
kibana4:
cluster: cluster:monitor/nodes/info
cluster:
- cluster:monitor/nodes/info
- cluster:monitor/health
indices:
'*': indices:data/read/search, indices:data/read/get, indices:admin/get
'.kibana': indices:data/read/search, indices:data/read/get, indices:data/write/delete, indices:data/write/index, create_index
- names: '*'
privileges:
- indices:admin/mappings/fields/get
- indices:admin/validate/query
- indices:data/read/search
- indices:data/read/msearch
- indices:data/read/field_stats
- indices:admin/get
- names: '.kibana'
privileges:
- indices:admin/exists
- indices:admin/mapping/put
- indices:admin/mappings/fields/get
- indices:admin/refresh
- indices:admin/validate/query
- indices:data/read/get
- indices:data/read/mget
- indices:data/read/search
- indices:data/write/delete
- indices:data/write/index
- indices:data/write/update
# The required permissions for the kibana 4 server
kibana4_server:
cluster:
- cluster:monitor/nodes/info
- cluster:monitor/health
indices:
- names: '.kibana'
privileges:
- indices:admin/create
- indices:admin/exists
- indices:admin/mapping/put
- indices:admin/mappings/fields/get
- indices:admin/refresh
- indices:admin/validate/query
- indices:data/read/get
- indices:data/read/mget
- indices:data/read/search
- indices:data/write/delete
- indices:data/write/index
- indices:data/write/update
# The required role for logstash users
logstash:
cluster: indices:admin/template/get, indices:admin/template/put
cluster:
- indices:admin/template/get
- indices:admin/template/put
indices:
'logstash-*': indices:data/write/bulk, indices:data/write/delete, indices:data/write/update, create_index
- names: 'logstash-*'
privileges:
- indices:data/write/bulk
- indices:data/write/delete
- indices:data/write/update
- indices:data/read/search
- indices:data/read/scroll
- create_index
# Monitoring role, allowing all operations
# on the monitoring indices
# Monitoring user role. Assign to monitoring users.
monitoring_user:
indices:
'.monitoring-*': all
- names: '.monitoring-*'
privileges:
- read
- names: '.kibana'
privileges:
- indices:admin/exists
- indices:admin/mappings/fields/get
- indices:admin/validate/query
- indices:data/read/get
- indices:data/read/mget
- indices:data/read/search
# Monitoring Agent users
monitoring_agent:
cluster: indices:admin/template/get, indices:admin/template/put
# Monitoring remote agent role. Assign to the agent user on the remote monitoring cluster
# to which the monitoring agent will export all its data
remote_monitoring_agent:
cluster:
- indices:admin/template/put
- indices:admin/template/get
indices:
'.monitoring-*': indices:data/write/bulk, create_index
- names: '.monitoring-*'
privileges:
- all
# Allows all operations required to manage ingest pipelines
ingest_admin:
cluster:
- manage_pipeline

View File

@ -1,7 +1,10 @@
valid_role:
cluster: ALL
cluster:
- ALL
indices:
idx: ALL
- names: idx
privileges:
- ALL
"$dlk39":
cluster: all
@ -24,4 +27,21 @@ role3:
role4:
cluster: ALL
indices:
'*': al;kjdlkj;lkj
'*': al;kjdlkj;lkj
#dadfad
# role won't be available since empty privileges...
role5:
cluster:
indices:
- names:
#adfldkkd
- idx2
privileges:
- names:
- ''
privileges:
- READ
- names:
- 'idx1'
privileges: []

View File

@ -1,14 +1,22 @@
admin:
cluster: all
cluster:
- all
indices:
'*': all
- names: '*'
privileges: [ all ]
__es_system_role:
cluster: all
cluster:
- all
indices:
'*' : all
- names: '*'
privileges:
- all
__es_internal_role:
cluster: all
cluster:
- all
indices:
'*' : all
- names: '*'
privileges:
- all

View File

@ -1,31 +1,37 @@
role1:
cluster: ALL
cluster:
- ALL
indices:
'idx1,idx2': READ
idx3: crud
- names:
- idx1
- idx2
privileges:
- READ
- names: idx3
privileges:
- CRUD
role1.ab:
cluster: ALL
cluster:
- ALL
role2:
cluster: ALL, MONITOR
cluster:
- ALL
- MONITOR
role3:
indices:
'/.*_.*/': READ, WRITE
#dadfad
role4:
cluster:
indices:
#adfldkkd
'idx2':
'': READ
'idx1': []
- names: '/.*_.*/'
privileges:
- READ
- WRITE
# role with run_as permissions only
role_run_as:
run_as: "user1,user2"
run_as:
- user1
- user2
# role with more than run_as
role_run_as1:
@ -33,23 +39,31 @@ role_run_as1:
role_fields:
indices:
'field_idx':
privileges: READ
- names:
#23456789ohbh
- 'field_idx'
privileges:
- READ
fields:
- foo
- boo
role_query:
indices:
'query_idx':
privileges: READ
- names:
- 'query_idx'
privileges:
- READ
query: '{ "match_all": {} }'
role_query_fields:
indices:
'query_fields_idx':
privileges: READ
query: '{ "match_all": {} }'
- names:
- 'query_fields_idx'
privileges:
- READ
query:
match_all:
fields:
- foo
- boo

View File

@ -685,19 +685,20 @@ public abstract class AbstractWatcherIntegrationTestCase extends ESIntegTestCase
public static final String ROLES =
"test:\n" + // a user for the test infra.
" cluster: cluster:monitor/nodes/info, cluster:monitor/state, cluster:monitor/health, cluster:monitor/stats, " +
"cluster:admin/settings/update, cluster:admin/repository/delete, cluster:monitor/nodes/liveness, " +
"indices:admin/template/get, indices:admin/template/put, indices:admin/template/delete\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/state', 'cluster:monitor/health', 'cluster:monitor/stats'," +
" 'cluster:admin/settings/update', 'cluster:admin/repository/delete', 'cluster:monitor/nodes/liveness'," +
" 'indices:admin/template/get', 'indices:admin/template/put', 'indices:admin/template/delete' ]\n" +
" indices:\n" +
" '*': all\n" +
" - names: '*'\n" +
" privileges: [ all ]\n" +
"\n" +
"admin:\n" +
" cluster: manage_watcher, cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n" +
" cluster: [ 'manage_watcher', 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
"transport_client:\n" +
" cluster: cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n" +
" cluster: [ 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n" +
"\n" +
"monitor:\n" +
" cluster: monitor_watcher, cluster:monitor/nodes/info, cluster:monitor/nodes/liveness\n"
" cluster: [ 'monitor_watcher', 'cluster:monitor/nodes/info', 'cluster:monitor/nodes/liveness' ]\n"
;