Adjust required permissions for system schema (#7579)

* Adjust required permissions for system schema

* PR comments, fix current_size handling

* Checkstyle

* Set curr_size instead of current_size

* Adjust information schema docs

* Fix merge conflict

* Update tests
This commit is contained in:
Jonathan Wei 2019-05-02 07:18:02 -07:00 committed by Fangjin Yang
parent ec8562c885
commit a013350018
12 changed files with 595 additions and 63 deletions

View File

@ -310,6 +310,20 @@ For information on what HTTP methods are supported on a particular request endpo
GET requires READ permission, while POST and DELETE require WRITE permission. GET requires READ permission, while POST and DELETE require WRITE permission.
### SQL Permissions
Queries on Druid datasources require DATASOURCE READ permissions for the specified datasource.
Queries on the [INFORMATION_SCHEMA tables](../../querying/sql.html#information-schema) will
return information about datasources that the caller has DATASOURCE READ access to. Other
datasources will be omitted.
Queries on the [system schema tables](../../querying/sql.html#system-schema) require the following permissions:
- `segments`: Segments will be filtered based on DATASOURCE READ permissions.
- `servers`: The user requires STATE READ permissions.
- `server_segments`: The user requires STATE READ permissions and segments will be filtered based on DATASOURCE READ permissions.
- `tasks`: Tasks will be filtered based on DATASOURCE READ permissions.
## Configuration Propagation ## Configuration Propagation
To prevent excessive load on the Coordinator, the Authenticator and Authorizer user/role database state is cached on each Druid process. To prevent excessive load on the Coordinator, the Authenticator and Authorizer user/role database state is cached on each Druid process.

View File

@ -739,3 +739,7 @@ Broker will emit the following metrics for SQL.
|`sqlQuery/time`|Milliseconds taken to complete a SQL.|id, nativeQueryIds, dataSource, remoteAddress, success.|< 1s| |`sqlQuery/time`|Milliseconds taken to complete a SQL.|id, nativeQueryIds, dataSource, remoteAddress, success.|< 1s|
|`sqlQuery/bytes`|number of bytes returned in SQL response.|id, nativeQueryIds, dataSource, remoteAddress, success.| | |`sqlQuery/bytes`|number of bytes returned in SQL response.|id, nativeQueryIds, dataSource, remoteAddress, success.| |
## Authorization Permissions
Please see [Defining SQL permissions](../../development/extensions-core/druid-basic-security.html#sql-permissions) for information on what permissions are needed for making SQL queries in a secured cluster.

View File

@ -18,3 +18,5 @@ INSERT INTO druid_segments (id,dataSource,created_date,start,end,partitioned,ver
INSERT INTO druid_segments (id,dataSource,created_date,start,end,partitioned,version,used,payload) VALUES ('twitterstream_2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z_2013-01-04T04:09:13.590Z_v9','twitterstream','2013-05-13T00:03:48.807Z','2013-01-03T00:00:00.000Z','2013-01-04T00:00:00.000Z',0,'2013-01-04T04:09:13.590Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-03T00:00:00.000Z/2013-01-04T00:00:00.000Z\",\"version\":\"2013-01-04T04:09:13.590Z_v9\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/twitterstream/2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z/2013-01-04T04:09:13.590Z_v9/0/index.zip\"},\"dimensions\":\"has_links,first_hashtag,user_time_zone,user_location,has_mention,user_lang,rt_name,user_name,is_retweet,is_viral,has_geo,url_domain,user_mention_name,reply_to_name\",\"metrics\":\"count,tweet_length,num_followers,num_links,num_mentions,num_hashtags,num_favorites,user_total_tweets\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":411651320,\"identifier\":\"twitterstream_2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z_2013-01-04T04:09:13.590Z_v9\"}'); INSERT INTO druid_segments (id,dataSource,created_date,start,end,partitioned,version,used,payload) VALUES ('twitterstream_2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z_2013-01-04T04:09:13.590Z_v9','twitterstream','2013-05-13T00:03:48.807Z','2013-01-03T00:00:00.000Z','2013-01-04T00:00:00.000Z',0,'2013-01-04T04:09:13.590Z_v9',1,'{\"dataSource\":\"twitterstream\",\"interval\":\"2013-01-03T00:00:00.000Z/2013-01-04T00:00:00.000Z\",\"version\":\"2013-01-04T04:09:13.590Z_v9\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/twitterstream/2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z/2013-01-04T04:09:13.590Z_v9/0/index.zip\"},\"dimensions\":\"has_links,first_hashtag,user_time_zone,user_location,has_mention,user_lang,rt_name,user_name,is_retweet,is_viral,has_geo,url_domain,user_mention_name,reply_to_name\",\"metrics\":\"count,tweet_length,num_followers,num_links,num_mentions,num_hashtags,num_favorites,user_total_tweets\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":411651320,\"identifier\":\"twitterstream_2013-01-03T00:00:00.000Z_2013-01-04T00:00:00.000Z_2013-01-04T04:09:13.590Z_v9\"}');
INSERT INTO druid_segments (id,dataSource,created_date,start,end,partitioned,version,used,payload) VALUES ('wikipedia_editstream_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9','wikipedia_editstream','2013-03-15T20:49:52.348Z','2012-12-29T00:00:00.000Z','2013-01-10T08:00:00.000Z',0,'2013-01-10T08:13:47.830Z_v9',1,'{\"dataSource\":\"wikipedia_editstream\",\"interval\":\"2012-12-29T00:00:00.000Z/2013-01-10T08:00:00.000Z\",\"version\":\"2013-01-10T08:13:47.830Z_v9\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/wikipedia_editstream/2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z/2013-01-10T08:13:47.830Z_v9/0/index.zip\"},\"dimensions\":\"anonymous,area_code,city,continent_code,country_name,dma_code,geo,language,namespace,network,newpage,page,postal_code,region_lookup,robot,unpatrolled,user\",\"metrics\":\"added,count,deleted,delta,delta_hist,unique_users,variation\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":446027801,\"identifier\":\"wikipedia_editstream_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9\"}'); INSERT INTO druid_segments (id,dataSource,created_date,start,end,partitioned,version,used,payload) VALUES ('wikipedia_editstream_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9','wikipedia_editstream','2013-03-15T20:49:52.348Z','2012-12-29T00:00:00.000Z','2013-01-10T08:00:00.000Z',0,'2013-01-10T08:13:47.830Z_v9',1,'{\"dataSource\":\"wikipedia_editstream\",\"interval\":\"2012-12-29T00:00:00.000Z/2013-01-10T08:00:00.000Z\",\"version\":\"2013-01-10T08:13:47.830Z_v9\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/wikipedia_editstream/2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z/2013-01-10T08:13:47.830Z_v9/0/index.zip\"},\"dimensions\":\"anonymous,area_code,city,continent_code,country_name,dma_code,geo,language,namespace,network,newpage,page,postal_code,region_lookup,robot,unpatrolled,user\",\"metrics\":\"added,count,deleted,delta,delta_hist,unique_users,variation\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":446027801,\"identifier\":\"wikipedia_editstream_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9\"}');
INSERT INTO druid_segments (id, dataSource, created_date, start, end, partitioned, version, used, payload) VALUES ('wikipedia_2013-08-01T00:00:00.000Z_2013-08-02T00:00:00.000Z_2013-08-08T21:22:48.989Z', 'wikipedia', '2013-08-08T21:26:23.799Z', '2013-08-01T00:00:00.000Z', '2013-08-02T00:00:00.000Z', '0', '2013-08-08T21:22:48.989Z', '1', '{\"dataSource\":\"wikipedia\",\"interval\":\"2013-08-01T00:00:00.000Z/2013-08-02T00:00:00.000Z\",\"version\":\"2013-08-08T21:22:48.989Z\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/wikipedia/20130801T000000.000Z_20130802T000000.000Z/2013-08-08T21_22_48.989Z/0/index.zip\"},\"dimensions\":\"dma_code,continent_code,geo,area_code,robot,country_name,network,city,namespace,anonymous,unpatrolled,page,postal_code,language,newpage,user,region_lookup\",\"metrics\":\"count,delta,variation,added,deleted\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":24664730,\"identifier\":\"wikipedia_2013-08-01T00:00:00.000Z_2013-08-02T00:00:00.000Z_2013-08-08T21:22:48.989Z\"}'); INSERT INTO druid_segments (id, dataSource, created_date, start, end, partitioned, version, used, payload) VALUES ('wikipedia_2013-08-01T00:00:00.000Z_2013-08-02T00:00:00.000Z_2013-08-08T21:22:48.989Z', 'wikipedia', '2013-08-08T21:26:23.799Z', '2013-08-01T00:00:00.000Z', '2013-08-02T00:00:00.000Z', '0', '2013-08-08T21:22:48.989Z', '1', '{\"dataSource\":\"wikipedia\",\"interval\":\"2013-08-01T00:00:00.000Z/2013-08-02T00:00:00.000Z\",\"version\":\"2013-08-08T21:22:48.989Z\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/wikipedia/20130801T000000.000Z_20130802T000000.000Z/2013-08-08T21_22_48.989Z/0/index.zip\"},\"dimensions\":\"dma_code,continent_code,geo,area_code,robot,country_name,network,city,namespace,anonymous,unpatrolled,page,postal_code,language,newpage,user,region_lookup\",\"metrics\":\"count,delta,variation,added,deleted\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":24664730,\"identifier\":\"wikipedia_2013-08-01T00:00:00.000Z_2013-08-02T00:00:00.000Z_2013-08-08T21:22:48.989Z\"}');
INSERT INTO druid_tasks (id, created_date, datasource, payload, status_payload, active) VALUES ('index_auth_test_2030-04-30T01:13:31.893Z', '2030-04-30T01:13:31.893Z', 'auth_test', '{\"id\":\"index_auth_test_2030-04-30T01:13:31.893Z\",\"created_date\":\"2030-04-30T01:13:31.893Z\",\"datasource\":\"auth_test\",\"active\":0}', '{\"id\":\"index_auth_test_2030-04-30T01:13:31.893Z\",\"status\":\"SUCCESS\",\"duration\":1}', 0);
INSERT INTO druid_segments (id,dataSource,created_date,start,end,partitioned,version,used,payload) VALUES ('auth_test_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9','auth_test','2013-03-15T20:49:52.348Z','2012-12-29T00:00:00.000Z','2013-01-10T08:00:00.000Z',0,'2013-01-10T08:13:47.830Z_v9',1,'{\"dataSource\":\"auth_test\",\"interval\":\"2012-12-29T00:00:00.000Z/2013-01-10T08:00:00.000Z\",\"version\":\"2013-01-10T08:13:47.830Z_v9\",\"loadSpec\":{\"type\":\"s3_zip\",\"bucket\":\"static.druid.io\",\"key\":\"data/segments/wikipedia_editstream/2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z/2013-01-10T08:13:47.830Z_v9/0/index.zip\"},\"dimensions\":\"anonymous,area_code,city,continent_code,country_name,dma_code,geo,language,namespace,network,newpage,page,postal_code,region_lookup,robot,unpatrolled,user\",\"metrics\":\"added,count,deleted,delta,delta_hist,unique_users,variation\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":446027801,\"identifier\":\"auth_test_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9\"}');

View File

@ -21,6 +21,9 @@ package org.apache.druid.tests.security;
import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.inject.Inject; import com.google.inject.Inject;
import org.apache.calcite.avatica.AvaticaSqlException; import org.apache.calcite.avatica.AvaticaSqlException;
import org.apache.druid.guice.annotations.Client; import org.apache.druid.guice.annotations.Client;
@ -40,10 +43,14 @@ import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.server.security.ResourceType; import org.apache.druid.server.security.ResourceType;
import org.apache.druid.sql.avatica.DruidAvaticaHandler; import org.apache.druid.sql.avatica.DruidAvaticaHandler;
import org.apache.druid.testing.IntegrationTestingConfig; import org.apache.druid.testing.IntegrationTestingConfig;
import org.apache.druid.testing.clients.CoordinatorResourceTestClient;
import org.apache.druid.testing.guice.DruidTestModuleFactory; import org.apache.druid.testing.guice.DruidTestModuleFactory;
import org.apache.druid.testing.utils.RetryUtil;
import org.apache.druid.testing.utils.TestQueryHelper;
import org.jboss.netty.handler.codec.http.HttpMethod; import org.jboss.netty.handler.codec.http.HttpMethod;
import org.jboss.netty.handler.codec.http.HttpResponseStatus; import org.jboss.netty.handler.codec.http.HttpResponseStatus;
import org.testng.Assert; import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Guice; import org.testng.annotations.Guice;
import org.testng.annotations.Test; import org.testng.annotations.Test;
@ -55,9 +62,11 @@ import java.sql.DriverManager;
import java.sql.ResultSet; import java.sql.ResultSet;
import java.sql.Statement; import java.sql.Statement;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Properties; import java.util.Properties;
import java.util.stream.Collectors;
@Guice(moduleFactory = DruidTestModuleFactory.class) @Guice(moduleFactory = DruidTestModuleFactory.class)
public class ITBasicAuthConfigurationTest public class ITBasicAuthConfigurationTest
@ -69,6 +78,32 @@ public class ITBasicAuthConfigurationTest
{ {
}; };
private static final TypeReference SYS_SCHEMA_RESULTS_TYPE_REFERENCE =
new TypeReference<List<Map<String, Object>>>()
{
};
private static final String SYSTEM_SCHEMA_SEGMENTS_RESULTS_RESOURCE =
"/results/auth_test_sys_schema_segments.json";
private static final String SYSTEM_SCHEMA_SERVER_SEGMENTS_RESULTS_RESOURCE =
"/results/auth_test_sys_schema_server_segments.json";
private static final String SYSTEM_SCHEMA_SERVERS_RESULTS_RESOURCE =
"/results/auth_test_sys_schema_servers.json";
private static final String SYSTEM_SCHEMA_TASKS_RESULTS_RESOURCE =
"/results/auth_test_sys_schema_tasks.json";
private static final String SYS_SCHEMA_SEGMENTS_QUERY =
"SELECT * FROM sys.segments WHERE datasource IN ('auth_test')";
private static final String SYS_SCHEMA_SERVERS_QUERY =
"SELECT * FROM sys.servers";
private static final String SYS_SCHEMA_SERVER_SEGMENTS_QUERY =
"SELECT * FROM sys.server_segments WHERE segment_id LIKE 'auth_test%'";
private static final String SYS_SCHEMA_TASKS_QUERY =
"SELECT * FROM sys.tasks WHERE datasource IN ('auth_test')";
@Inject @Inject
IntegrationTestingConfig config; IntegrationTestingConfig config;
@ -81,6 +116,264 @@ public class ITBasicAuthConfigurationTest
StatusResponseHandler responseHandler = new StatusResponseHandler(StandardCharsets.UTF_8); StatusResponseHandler responseHandler = new StatusResponseHandler(StandardCharsets.UTF_8);
@Inject
private CoordinatorResourceTestClient coordinatorClient;
@BeforeMethod
public void before() throws Exception
{
// ensure that auth_test segments are loaded completely, we use them for testing system schema tables
RetryUtil.retryUntilTrue(
() -> coordinatorClient.areSegmentsLoaded("auth_test"), "auth_test segment load"
);
}
@Test
public void testSystemSchemaAccess() throws Exception
{
HttpClient adminClient = new CredentialedHttpClient(
new BasicCredentials("admin", "priest"),
httpClient
);
// check that admin access works on all nodes
checkNodeAccess(adminClient);
// create a new user+role that can only read 'auth_test'
List<ResourceAction> readDatasourceOnlyPermissions = Collections.singletonList(
new ResourceAction(
new Resource("auth_test", ResourceType.DATASOURCE),
Action.READ
)
);
createUserAndRoleWithPermissions(
adminClient,
"datasourceOnlyUser",
"helloworld",
"datasourceOnlyRole",
readDatasourceOnlyPermissions
);
HttpClient datasourceOnlyUserClient = new CredentialedHttpClient(
new BasicCredentials("datasourceOnlyUser", "helloworld"),
httpClient
);
// create a new user+role that can only read 'auth_test' + STATE read access
List<ResourceAction> readDatasourceWithStatePermissions = ImmutableList.of(
new ResourceAction(
new Resource("auth_test", ResourceType.DATASOURCE),
Action.READ
),
new ResourceAction(
new Resource(".*", ResourceType.STATE),
Action.READ
)
);
createUserAndRoleWithPermissions(
adminClient,
"datasourceWithStateUser",
"helloworld",
"datasourceWithStateRole",
readDatasourceWithStatePermissions
);
HttpClient datasourceWithStateUserClient = new CredentialedHttpClient(
new BasicCredentials("datasourceWithStateUser", "helloworld"),
httpClient
);
// create a new user+role with only STATE read access
List<ResourceAction> stateOnlyPermissions = ImmutableList.of(
new ResourceAction(
new Resource(".*", ResourceType.STATE),
Action.READ
)
);
createUserAndRoleWithPermissions(
adminClient,
"stateOnlyUser",
"helloworld",
"stateOnlyRole",
stateOnlyPermissions
);
HttpClient stateOnlyUserClient = new CredentialedHttpClient(
new BasicCredentials("stateOnlyUser", "helloworld"),
httpClient
);
// check that we can access a datasource-permission restricted resource on the broker
makeRequest(
datasourceOnlyUserClient,
HttpMethod.GET,
config.getBrokerUrl() + "/druid/v2/datasources/auth_test",
null
);
// check that we can access a state-permission restricted resource on the broker
makeRequest(datasourceWithStateUserClient, HttpMethod.GET, config.getBrokerUrl() + "/status", null);
makeRequest(stateOnlyUserClient, HttpMethod.GET, config.getBrokerUrl() + "/status", null);
// initial setup is done now, run the system schema response content tests
final List<Map<String, Object>> adminSegments = jsonMapper.readValue(
TestQueryHelper.class.getResourceAsStream(SYSTEM_SCHEMA_SEGMENTS_RESULTS_RESOURCE),
SYS_SCHEMA_RESULTS_TYPE_REFERENCE
);
final List<Map<String, Object>> adminServerSegments = jsonMapper.readValue(
TestQueryHelper.class.getResourceAsStream(SYSTEM_SCHEMA_SERVER_SEGMENTS_RESULTS_RESOURCE),
SYS_SCHEMA_RESULTS_TYPE_REFERENCE
);
final List<Map<String, Object>> adminServers = getServersWithoutCurrentSize(
jsonMapper.readValue(
TestQueryHelper.class.getResourceAsStream(SYSTEM_SCHEMA_SERVERS_RESULTS_RESOURCE),
SYS_SCHEMA_RESULTS_TYPE_REFERENCE
)
);
final List<Map<String, Object>> adminTasks = jsonMapper.readValue(
TestQueryHelper.class.getResourceAsStream(SYSTEM_SCHEMA_TASKS_RESULTS_RESOURCE),
SYS_SCHEMA_RESULTS_TYPE_REFERENCE
);
// as admin
LOG.info("Checking sys.segments query as admin...");
verifySystemSchemaQuery(
adminClient,
SYS_SCHEMA_SEGMENTS_QUERY,
adminSegments
);
LOG.info("Checking sys.servers query as admin...");
verifySystemSchemaServerQuery(
adminClient,
SYS_SCHEMA_SERVERS_QUERY,
getServersWithoutCurrentSize(adminServers)
);
LOG.info("Checking sys.server_segments query as admin...");
verifySystemSchemaQuery(
adminClient,
SYS_SCHEMA_SERVER_SEGMENTS_QUERY,
adminServerSegments
);
LOG.info("Checking sys.tasks query as admin...");
verifySystemSchemaQuery(
adminClient,
SYS_SCHEMA_TASKS_QUERY,
adminTasks
);
// as user that can only read auth_test
LOG.info("Checking sys.segments query as datasourceOnlyUser...");
verifySystemSchemaQuery(
datasourceOnlyUserClient,
SYS_SCHEMA_SEGMENTS_QUERY,
adminSegments.stream()
.filter((segmentEntry) -> {
return "auth_test".equals(segmentEntry.get("datasource"));
})
.collect(Collectors.toList())
);
LOG.info("Checking sys.servers query as datasourceOnlyUser...");
verifySystemSchemaQueryFailure(
datasourceOnlyUserClient,
SYS_SCHEMA_SERVERS_QUERY,
HttpResponseStatus.FORBIDDEN,
"{\"Access-Check-Result\":\"Insufficient permission to view servers : Allowed:false, Message:\"}"
);
LOG.info("Checking sys.server_segments query as datasourceOnlyUser...");
verifySystemSchemaQueryFailure(
datasourceOnlyUserClient,
SYS_SCHEMA_SERVER_SEGMENTS_QUERY,
HttpResponseStatus.FORBIDDEN,
"{\"Access-Check-Result\":\"Insufficient permission to view servers : Allowed:false, Message:\"}"
);
LOG.info("Checking sys.tasks query as datasourceOnlyUser...");
verifySystemSchemaQuery(
datasourceOnlyUserClient,
SYS_SCHEMA_TASKS_QUERY,
adminTasks.stream()
.filter((taskEntry) -> {
return "auth_test".equals(taskEntry.get("datasource"));
})
.collect(Collectors.toList())
);
// as user that can read auth_test and STATE
LOG.info("Checking sys.segments query as datasourceWithStateUser...");
verifySystemSchemaQuery(
datasourceWithStateUserClient,
SYS_SCHEMA_SEGMENTS_QUERY,
adminSegments.stream()
.filter((segmentEntry) -> {
return "auth_test".equals(segmentEntry.get("datasource"));
})
.collect(Collectors.toList())
);
LOG.info("Checking sys.servers query as datasourceWithStateUser...");
verifySystemSchemaServerQuery(
datasourceWithStateUserClient,
SYS_SCHEMA_SERVERS_QUERY,
adminServers
);
LOG.info("Checking sys.server_segments query as datasourceWithStateUser...");
verifySystemSchemaQuery(
datasourceWithStateUserClient,
SYS_SCHEMA_SERVER_SEGMENTS_QUERY,
adminServerSegments.stream()
.filter((serverSegmentEntry) -> {
return ((String) serverSegmentEntry.get("segment_id")).contains("auth_test");
})
.collect(Collectors.toList())
);
LOG.info("Checking sys.tasks query as datasourceWithStateUser...");
verifySystemSchemaQuery(
datasourceWithStateUserClient,
SYS_SCHEMA_TASKS_QUERY,
adminTasks.stream()
.filter((taskEntry) -> {
return "auth_test".equals(taskEntry.get("datasource"));
})
.collect(Collectors.toList())
);
// as user that can only read STATE
LOG.info("Checking sys.segments query as stateOnlyUser...");
verifySystemSchemaQuery(
stateOnlyUserClient,
SYS_SCHEMA_SEGMENTS_QUERY,
Collections.emptyList()
);
LOG.info("Checking sys.servers query as stateOnlyUser...");
verifySystemSchemaServerQuery(
stateOnlyUserClient,
SYS_SCHEMA_SERVERS_QUERY,
adminServers
);
LOG.info("Checking sys.server_segments query as stateOnlyUser...");
verifySystemSchemaQuery(
stateOnlyUserClient,
SYS_SCHEMA_SERVER_SEGMENTS_QUERY,
Collections.emptyList()
);
LOG.info("Checking sys.tasks query as stateOnlyUser...");
verifySystemSchemaQuery(
stateOnlyUserClient,
SYS_SCHEMA_TASKS_QUERY,
Collections.emptyList()
);
}
@Test @Test
public void testAuthConfiguration() throws Exception public void testAuthConfiguration() throws Exception
{ {
@ -110,54 +403,19 @@ public class ITBasicAuthConfigurationTest
// check that internal user works // check that internal user works
checkNodeAccess(internalSystemClient); checkNodeAccess(internalSystemClient);
// create a new user that can read /status // create a new user+role that can read /status
makeRequest(
adminClient,
HttpMethod.POST,
config.getCoordinatorUrl() + "/druid-ext/basic-security/authentication/db/basic/users/druid",
null
);
makeRequest(
adminClient,
HttpMethod.POST,
config.getCoordinatorUrl() + "/druid-ext/basic-security/authentication/db/basic/users/druid/credentials",
jsonMapper.writeValueAsBytes(new BasicAuthenticatorCredentialUpdate("helloworld", 5000))
);
makeRequest(
adminClient,
HttpMethod.POST,
config.getCoordinatorUrl() + "/druid-ext/basic-security/authorization/db/basic/users/druid",
null
);
makeRequest(
adminClient,
HttpMethod.POST,
config.getCoordinatorUrl() + "/druid-ext/basic-security/authorization/db/basic/roles/druidrole",
null
);
makeRequest(
adminClient,
HttpMethod.POST,
config.getCoordinatorUrl() + "/druid-ext/basic-security/authorization/db/basic/users/druid/roles/druidrole",
null
);
List<ResourceAction> permissions = Collections.singletonList( List<ResourceAction> permissions = Collections.singletonList(
new ResourceAction( new ResourceAction(
new Resource(".*", ResourceType.STATE), new Resource(".*", ResourceType.STATE),
Action.READ Action.READ
) )
); );
byte[] permissionsBytes = jsonMapper.writeValueAsBytes(permissions); createUserAndRoleWithPermissions(
makeRequest(
adminClient, adminClient,
HttpMethod.POST, "druid",
config.getCoordinatorUrl() + "/druid-ext/basic-security/authorization/db/basic/roles/druidrole/permissions", "helloworld",
permissionsBytes "druidrole",
permissions
); );
// check that the new user works // check that the new user works
@ -166,7 +424,6 @@ public class ITBasicAuthConfigurationTest
// check loadStatus // check loadStatus
checkLoadStatus(adminClient); checkLoadStatus(adminClient);
// create 100 users // create 100 users
for (int i = 0; i < 100; i++) { for (int i = 0; i < 100; i++) {
makeRequest( makeRequest(
@ -333,6 +590,23 @@ public class ITBasicAuthConfigurationTest
} }
private StatusResponseHolder makeRequest(HttpClient httpClient, HttpMethod method, String url, byte[] content) private StatusResponseHolder makeRequest(HttpClient httpClient, HttpMethod method, String url, byte[] content)
{
return makeRequestWithExpectedStatus(
httpClient,
method,
url,
content,
HttpResponseStatus.OK
);
}
private StatusResponseHolder makeRequestWithExpectedStatus(
HttpClient httpClient,
HttpMethod method,
String url,
byte[] content,
HttpResponseStatus expectedStatus
)
{ {
try { try {
Request request = new Request(method, new URL(url)); Request request = new Request(method, new URL(url));
@ -349,7 +623,7 @@ public class ITBasicAuthConfigurationTest
responseHandler responseHandler
).get(); ).get();
if (!response.getStatus().equals(HttpResponseStatus.OK)) { if (!response.getStatus().equals(expectedStatus)) {
String errMsg = StringUtils.format( String errMsg = StringUtils.format(
"Error while making request to url[%s] status[%s] content[%s]", "Error while making request to url[%s] status[%s] content[%s]",
url, url,
@ -357,7 +631,7 @@ public class ITBasicAuthConfigurationTest
response.getContent() response.getContent()
); );
// it can take time for the auth config to propagate, so we retry // it can take time for the auth config to propagate, so we retry
if (retryCount > 4) { if (retryCount > 10) {
throw new ISE(errMsg); throw new ISE(errMsg);
} else { } else {
LOG.error(errMsg); LOG.error(errMsg);
@ -375,4 +649,157 @@ public class ITBasicAuthConfigurationTest
throw new RuntimeException(e); throw new RuntimeException(e);
} }
} }
private void createUserAndRoleWithPermissions(
HttpClient adminClient,
String user,
String password,
String role,
List<ResourceAction> permissions
) throws Exception
{
makeRequest(
adminClient,
HttpMethod.POST,
StringUtils.format(
"%s/druid-ext/basic-security/authentication/db/basic/users/%s",
config.getCoordinatorUrl(),
user
),
null
);
makeRequest(
adminClient,
HttpMethod.POST,
StringUtils.format(
"%s/druid-ext/basic-security/authentication/db/basic/users/%s/credentials",
config.getCoordinatorUrl(),
user
),
jsonMapper.writeValueAsBytes(new BasicAuthenticatorCredentialUpdate(password, 5000))
);
makeRequest(
adminClient,
HttpMethod.POST,
StringUtils.format(
"%s/druid-ext/basic-security/authorization/db/basic/users/%s",
config.getCoordinatorUrl(),
user
),
null
);
makeRequest(
adminClient,
HttpMethod.POST,
StringUtils.format(
"%s/druid-ext/basic-security/authorization/db/basic/roles/%s",
config.getCoordinatorUrl(),
role
),
null
);
makeRequest(
adminClient,
HttpMethod.POST,
StringUtils.format(
"%s/druid-ext/basic-security/authorization/db/basic/users/%s/roles/%s",
config.getCoordinatorUrl(),
user,
role
),
null
);
byte[] permissionsBytes = jsonMapper.writeValueAsBytes(permissions);
makeRequest(
adminClient,
HttpMethod.POST,
StringUtils.format(
"%s/druid-ext/basic-security/authorization/db/basic/roles/%s/permissions",
config.getCoordinatorUrl(),
role
),
permissionsBytes
);
}
private StatusResponseHolder makeSQLQueryRequest(
HttpClient httpClient,
String query,
HttpResponseStatus expectedStatus
) throws Exception
{
Map<String, Object> queryMap = ImmutableMap.of(
"query", query
);
return makeRequestWithExpectedStatus(
httpClient,
HttpMethod.POST,
config.getBrokerUrl() + "/druid/v2/sql",
jsonMapper.writeValueAsBytes(queryMap),
expectedStatus
);
}
private void verifySystemSchemaQueryBase(
HttpClient client,
String query,
List<Map<String, Object>> expectedResults,
boolean isServerQuery
) throws Exception
{
StatusResponseHolder responseHolder = makeSQLQueryRequest(client, query, HttpResponseStatus.OK);
String content = responseHolder.getContent();
List<Map<String, Object>> responseMap = jsonMapper.readValue(content, SYS_SCHEMA_RESULTS_TYPE_REFERENCE);
if (isServerQuery) {
responseMap = getServersWithoutCurrentSize(responseMap);
}
Assert.assertEquals(responseMap, expectedResults);
}
private void verifySystemSchemaQuery(
HttpClient client,
String query,
List<Map<String, Object>> expectedResults
) throws Exception
{
verifySystemSchemaQueryBase(client, query, expectedResults, false);
}
private void verifySystemSchemaServerQuery(
HttpClient client,
String query,
List<Map<String, Object>> expectedResults
) throws Exception
{
verifySystemSchemaQueryBase(client, query, expectedResults, true);
}
private void verifySystemSchemaQueryFailure(
HttpClient client,
String query,
HttpResponseStatus expectedErrorStatus,
String expectedErrorMessage
) throws Exception
{
StatusResponseHolder responseHolder = makeSQLQueryRequest(client, query, expectedErrorStatus);
Assert.assertEquals(responseHolder.getStatus(), expectedErrorStatus);
Assert.assertEquals(responseHolder.getContent(), expectedErrorMessage);
}
/**
* curr_size on historicals changes because cluster state is not isolated across different
* integration tests, zero it out for consistent test results
*/
private static List<Map<String, Object>> getServersWithoutCurrentSize(List<Map<String, Object>> servers)
{
return Lists.transform(
servers,
(server) -> {
Map<String, Object> newServer = new HashMap<>();
newServer.putAll(server);
newServer.put("curr_size", 0);
return newServer;
}
);
}
} }

View File

@ -0,0 +1,18 @@
[
{
"segment_id": "auth_test_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9",
"datasource": "auth_test",
"start": "2012-12-29T00:00:00.000Z",
"end": "2013-01-10T08:00:00.000Z",
"size": 446027801,
"version": "2013-01-10T08:13:47.830Z_v9",
"partition_num": 0,
"num_replicas": 1,
"num_rows": 4462111,
"is_published": 1,
"is_available": 1,
"is_realtime": 0,
"is_overshadowed": 0,
"payload": "{\"dataSegment\":{\"dataSource\":\"auth_test\",\"interval\":\"2012-12-29T00:00:00.000Z/2013-01-10T08:00:00.000Z\",\"version\":\"2013-01-10T08:13:47.830Z_v9\",\"loadSpec\":{\"load spec is pruned, because it's not needed on Brokers, but eats a lot of heap space\":\"\"},\"dimensions\":\"anonymous,area_code,city,continent_code,country_name,dma_code,geo,language,namespace,network,newpage,page,postal_code,region_lookup,robot,unpatrolled,user\",\"metrics\":\"added,count,deleted,delta,delta_hist,unique_users,variation\",\"shardSpec\":{\"type\":\"none\"},\"binaryVersion\":9,\"size\":446027801,\"identifier\":\"auth_test_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9\"},\"overshadowed\":false}"
}
]

View File

@ -0,0 +1,6 @@
[
{
"server": "172.172.172.6:8283",
"segment_id": "auth_test_2012-12-29T00:00:00.000Z_2013-01-10T08:00:00.000Z_2013-01-10T08:13:47.830Z_v9"
}
]

View File

@ -0,0 +1,12 @@
[
{
"server": "172.172.172.6:8283",
"host": "172.172.172.6",
"plaintext_port": 8083,
"tls_port": 8283,
"server_type": "historical",
"tier": "_default_tier",
"curr_size": 2208932412,
"max_size": 5000000000
}
]

View File

@ -0,0 +1,17 @@
[
{
"task_id": "index_auth_test_2030-04-30T01:13:31.893Z",
"type": null,
"datasource": "auth_test",
"created_time": "2030-04-30T01:13:31.893Z",
"queue_insertion_time": "1970-01-01T00:00:00.000Z",
"status": "SUCCESS",
"runner_status": "NONE",
"duration": 1,
"location": null,
"host": null,
"plaintext_port": -1,
"tls_port": -1,
"error_msg": null
}
]

View File

@ -49,7 +49,6 @@ import org.apache.druid.server.security.AuthorizationUtils;
import org.apache.druid.server.security.AuthorizerMapper; import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.Resource;
import org.apache.druid.server.security.ResourceAction; import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.server.security.ResourceType;
import org.apache.druid.utils.Runnables; import org.apache.druid.utils.Runnables;
import org.joda.time.DateTime; import org.joda.time.DateTime;
@ -349,7 +348,7 @@ public class EventReceiverFirehoseFactory implements FirehoseFactory<InputRowPar
Access accessResult = AuthorizationUtils.authorizeResourceAction( Access accessResult = AuthorizationUtils.authorizeResourceAction(
req, req,
new ResourceAction( new ResourceAction(
new Resource("STATE", ResourceType.STATE), Resource.STATE_RESOURCE,
Action.WRITE Action.WRITE
), ),
authorizerMapper authorizerMapper
@ -538,7 +537,7 @@ public class EventReceiverFirehoseFactory implements FirehoseFactory<InputRowPar
Access accessResult = AuthorizationUtils.authorizeResourceAction( Access accessResult = AuthorizationUtils.authorizeResourceAction(
req, req,
new ResourceAction( new ResourceAction(
new Resource("STATE", ResourceType.STATE), Resource.STATE_RESOURCE,
Action.WRITE Action.WRITE
), ),
authorizerMapper authorizerMapper

View File

@ -27,7 +27,6 @@ import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException; import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.Resource;
import org.apache.druid.server.security.ResourceAction; import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.server.security.ResourceType;
/** /**
* Use this ResourceFilter at end points where Druid Cluster State is read or written * Use this ResourceFilter at end points where Druid Cluster State is read or written
@ -58,7 +57,7 @@ public class StateResourceFilter extends AbstractResourceFilter
public ContainerRequest filter(ContainerRequest request) public ContainerRequest filter(ContainerRequest request)
{ {
final ResourceAction resourceAction = new ResourceAction( final ResourceAction resourceAction = new ResourceAction(
new Resource("STATE", ResourceType.STATE), Resource.STATE_RESOURCE,
getAction(request) getAction(request)
); );

View File

@ -24,6 +24,8 @@ import com.fasterxml.jackson.annotation.JsonProperty;
public class Resource public class Resource
{ {
public static final Resource STATE_RESOURCE = new Resource("STATE", ResourceType.STATE);
private final String name; private final String name;
private final ResourceType type; private final ResourceType type;

View File

@ -64,7 +64,6 @@ import org.apache.druid.server.security.AuthorizerMapper;
import org.apache.druid.server.security.ForbiddenException; import org.apache.druid.server.security.ForbiddenException;
import org.apache.druid.server.security.Resource; import org.apache.druid.server.security.Resource;
import org.apache.druid.server.security.ResourceAction; import org.apache.druid.server.security.ResourceAction;
import org.apache.druid.server.security.ResourceType;
import org.apache.druid.sql.calcite.planner.PlannerContext; import org.apache.druid.sql.calcite.planner.PlannerContext;
import org.apache.druid.sql.calcite.table.RowSignature; import org.apache.druid.sql.calcite.table.RowSignature;
import org.apache.druid.timeline.DataSegment; import org.apache.druid.timeline.DataSegment;
@ -93,6 +92,17 @@ public class SystemSchema extends AbstractSchema
private static final String SERVER_SEGMENTS_TABLE = "server_segments"; private static final String SERVER_SEGMENTS_TABLE = "server_segments";
private static final String TASKS_TABLE = "tasks"; private static final String TASKS_TABLE = "tasks";
private static final Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>>
SEGMENT_WITH_OVERSHADOWED_STATUS_RA_GENERATOR = segment ->
Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(
segment.getDataSegment().getDataSource())
);
private static final Function<DataSegment, Iterable<ResourceAction>> SEGMENT_RA_GENERATOR =
segment -> Collections.singletonList(AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(
segment.getDataSource())
);
/** /**
* Booleans constants represented as long type, * Booleans constants represented as long type,
* where 1 = true and 0 = false to make it easy to count number of segments * where 1 = true and 0 = false to make it easy to count number of segments
@ -338,13 +348,10 @@ public class SystemSchema extends AbstractSchema
final AuthenticationResult authenticationResult = final AuthenticationResult authenticationResult =
(AuthenticationResult) root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT); (AuthenticationResult) root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT);
Function<SegmentWithOvershadowedStatus, Iterable<ResourceAction>> raGenerator = segment -> Collections.singletonList(
AuthorizationUtils.DATASOURCE_READ_RA_GENERATOR.apply(segment.getDataSegment().getDataSource()));
final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources( final Iterable<SegmentWithOvershadowedStatus> authorizedSegments = AuthorizationUtils.filterAuthorizedResources(
authenticationResult, authenticationResult,
() -> it, () -> it,
raGenerator, SEGMENT_WITH_OVERSHADOWED_STATUS_RA_GENERATOR,
authorizerMapper authorizerMapper
); );
return authorizedSegments.iterator(); return authorizedSegments.iterator();
@ -448,14 +455,9 @@ public class SystemSchema extends AbstractSchema
final List<ImmutableDruidServer> druidServers = serverView.getDruidServers(); final List<ImmutableDruidServer> druidServers = serverView.getDruidServers();
final AuthenticationResult authenticationResult = final AuthenticationResult authenticationResult =
(AuthenticationResult) root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT); (AuthenticationResult) root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT);
final Access access = AuthorizationUtils.authorizeAllResourceActions(
authenticationResult, checkStateReadAccessForServers(authenticationResult, authorizerMapper);
Collections.singletonList(new ResourceAction(new Resource("STATE", ResourceType.STATE), Action.READ)),
authorizerMapper
);
if (!access.isAllowed()) {
throw new ForbiddenException("Insufficient permission to view servers :" + access);
}
final FluentIterable<Object[]> results = FluentIterable final FluentIterable<Object[]> results = FluentIterable
.from(druidServers) .from(druidServers)
.transform(val -> new Object[]{ .transform(val -> new Object[]{
@ -501,11 +503,23 @@ public class SystemSchema extends AbstractSchema
@Override @Override
public Enumerable<Object[]> scan(DataContext root) public Enumerable<Object[]> scan(DataContext root)
{ {
final AuthenticationResult authenticationResult =
(AuthenticationResult) root.get(PlannerContext.DATA_CTX_AUTHENTICATION_RESULT);
checkStateReadAccessForServers(authenticationResult, authorizerMapper);
final List<Object[]> rows = new ArrayList<>(); final List<Object[]> rows = new ArrayList<>();
final List<ImmutableDruidServer> druidServers = serverView.getDruidServers(); final List<ImmutableDruidServer> druidServers = serverView.getDruidServers();
final int serverSegmentsTableSize = SERVER_SEGMENTS_SIGNATURE.getRowOrder().size(); final int serverSegmentsTableSize = SERVER_SEGMENTS_SIGNATURE.getRowOrder().size();
for (ImmutableDruidServer druidServer : druidServers) { for (ImmutableDruidServer druidServer : druidServers) {
for (DataSegment segment : druidServer.getLazyAllSegments()) { final Iterable<DataSegment> authorizedServerSegments = AuthorizationUtils.filterAuthorizedResources(
authenticationResult,
druidServer.getLazyAllSegments(),
SEGMENT_RA_GENERATOR,
authorizerMapper
);
for (DataSegment segment : authorizedServerSegments) {
Object[] row = new Object[serverSegmentsTableSize]; Object[] row = new Object[serverSegmentsTableSize];
row[0] = druidServer.getHost(); row[0] = druidServer.getHost();
row[1] = segment.getId(); row[1] = segment.getId();
@ -759,4 +773,22 @@ public class SystemSchema extends AbstractSchema
return object.toString(); return object.toString();
} }
/**
* Checks if an authenticated user has the STATE READ permissions needed to view server information.
*/
private static void checkStateReadAccessForServers(
AuthenticationResult authenticationResult,
AuthorizerMapper authorizerMapper
)
{
final Access stateAccess = AuthorizationUtils.authorizeAllResourceActions(
authenticationResult,
Collections.singletonList(new ResourceAction(Resource.STATE_RESOURCE, Action.READ)),
authorizerMapper
);
if (!stateAccess.isAllowed()) {
throw new ForbiddenException("Insufficient permission to view servers : " + stateAccess);
}
}
} }