Merge branch 'master' into enhancement/use_shard_bulk_for_single_ops
Original commit: elastic/x-pack-elasticsearch@089fa9977d
This commit is contained in:
commit
93720505b8
|
@ -51,7 +51,7 @@ gradle clean assemble
|
|||
gradle clean install
|
||||
-----
|
||||
|
||||
- If you don't work on the UI side of x-plugins, you can force gradle to skip building kibana by adding
|
||||
`xpack.kibana.build=false` to your `~/.gradle/gradle.properties`. Alternatively you add `-Pxpack.kibana.build=false`
|
||||
- If you don't work on the UI/Logstash side of x-plugins, you can force gradle to skip building kibana and/or Logstash by adding
|
||||
`xpack.kibana.build=false`/`xpack.logstash.build=false` to your `~/.gradle/gradle.properties`. Alternatively you add `-Pxpack.kibana.build=false` or `-Pxpack.logstash.build=false`
|
||||
on the command line if you only want to do this on individual builds (or `-Pxpack.kibana.build=true` if you need to
|
||||
override having added this to your `gradle.properties`).
|
||||
|
|
|
@ -32,10 +32,13 @@ subprojects {
|
|||
|
||||
task bundlePack(type: Zip) {
|
||||
onlyIf { project('kibana').bundlePlugin.enabled }
|
||||
onlyIf { project('logstash').bundlePlugin.enabled }
|
||||
dependsOn 'elasticsearch:bundlePlugin'
|
||||
dependsOn 'kibana:bundlePlugin'
|
||||
dependsOn 'logstash:bundlePlugin'
|
||||
from { zipTree(project('elasticsearch').bundlePlugin.outputs.files.singleFile) }
|
||||
from { zipTree(project('kibana').bundlePlugin.outputs.files.singleFile) }
|
||||
from { zipTree(project('logstash').bundlePlugin.outputs.files.singleFile) }
|
||||
destinationDir file('build/distributions')
|
||||
baseName = 'x-pack'
|
||||
version = VersionProperties.elasticsearch
|
||||
|
|
|
@ -52,6 +52,7 @@ dependencies {
|
|||
|
||||
// common test deps
|
||||
testCompile 'org.elasticsearch:securemock:1.2'
|
||||
testCompile "org.elasticsearch:mocksocket:${versions.mocksocket}"
|
||||
testCompile 'org.slf4j:slf4j-log4j12:1.6.2'
|
||||
testCompile 'org.slf4j:slf4j-api:1.6.2'
|
||||
}
|
||||
|
@ -239,3 +240,9 @@ thirdPartyAudit.excludes = [
|
|||
'javax.activation.UnsupportedDataTypeException'
|
||||
]
|
||||
|
||||
run {
|
||||
setting 'xpack.graph.enabled', 'true'
|
||||
setting 'xpack.security.enabled', 'true'
|
||||
setting 'xpack.monitoring.enabled', 'true'
|
||||
setting 'xpack.watcher.enabled', 'true'
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -29,7 +30,7 @@ import java.util.Locale;
|
|||
* Data structure for license. Use {@link Builder} to build a license.
|
||||
* Provides serialization/deserialization & validation methods for license object
|
||||
*/
|
||||
public class License implements ToXContent {
|
||||
public class License implements ToXContentObject {
|
||||
public static final int VERSION_START = 1;
|
||||
public static final int VERSION_NO_FEATURE_TYPE = 2;
|
||||
public static final int VERSION_START_DATE = 3;
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
package org.elasticsearch.license;
|
||||
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.AbstractNamedDiffable;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -20,7 +22,7 @@ import java.util.EnumSet;
|
|||
/**
|
||||
* Contains metadata about registered licenses
|
||||
*/
|
||||
class LicensesMetaData extends AbstractDiffable<MetaData.Custom> implements MetaData.Custom,
|
||||
class LicensesMetaData extends AbstractNamedDiffable<MetaData.Custom> implements MetaData.Custom,
|
||||
TribeService.MergableCustomMetaData<LicensesMetaData> {
|
||||
|
||||
public static final String TYPE = "licenses";
|
||||
|
@ -45,8 +47,6 @@ class LicensesMetaData extends AbstractDiffable<MetaData.Custom> implements Meta
|
|||
.expiryDate(0)
|
||||
.build();
|
||||
|
||||
public static final LicensesMetaData PROTO = new LicensesMetaData(null);
|
||||
|
||||
private License license;
|
||||
|
||||
public LicensesMetaData(License license) {
|
||||
|
@ -79,7 +79,7 @@ class LicensesMetaData extends AbstractDiffable<MetaData.Custom> implements Meta
|
|||
}
|
||||
|
||||
@Override
|
||||
public String type() {
|
||||
public String getWriteableName() {
|
||||
return TYPE;
|
||||
}
|
||||
|
||||
|
@ -88,8 +88,7 @@ class LicensesMetaData extends AbstractDiffable<MetaData.Custom> implements Meta
|
|||
return EnumSet.of(MetaData.XContentContext.GATEWAY);
|
||||
}
|
||||
|
||||
@Override
|
||||
public LicensesMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
public static LicensesMetaData fromXContent(XContentParser parser) throws IOException {
|
||||
License license = LICENSE_TOMBSTONE;
|
||||
XContentParser.Token token;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
|
@ -132,13 +131,16 @@ class LicensesMetaData extends AbstractDiffable<MetaData.Custom> implements Meta
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public LicensesMetaData readFrom(StreamInput streamInput) throws IOException {
|
||||
License license = LICENSE_TOMBSTONE;
|
||||
public LicensesMetaData(StreamInput streamInput) throws IOException {
|
||||
if (streamInput.readBoolean()) {
|
||||
license = License.readLicense(streamInput);
|
||||
} else {
|
||||
license = LICENSE_TOMBSTONE;
|
||||
}
|
||||
return new LicensesMetaData(license);
|
||||
}
|
||||
|
||||
public static NamedDiff<MetaData.Custom> readDiffFrom(StreamInput streamInput) throws IOException {
|
||||
return readDiffFrom(MetaData.Custom.class, TYPE, streamInput);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -7,12 +7,17 @@ package org.elasticsearch.license;
|
|||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.NamedDiff;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -28,16 +33,20 @@ public class Licensing implements ActionPlugin {
|
|||
protected final boolean isTransportClient;
|
||||
private final boolean isTribeNode;
|
||||
|
||||
static {
|
||||
// we have to make sure we don't override the prototype, if we already
|
||||
// registered. This causes class cast exceptions while casting license
|
||||
// meta data on tribe node, as the registration happens for every tribe
|
||||
// client nodes and the tribe node itself
|
||||
if (MetaData.lookupPrototype(LicensesMetaData.TYPE) == null) {
|
||||
MetaData.registerPrototype(LicensesMetaData.TYPE, LicensesMetaData.PROTO);
|
||||
}
|
||||
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.add(new NamedWriteableRegistry.Entry(MetaData.Custom.class, LicensesMetaData.TYPE, LicensesMetaData::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetaData.TYPE, LicensesMetaData::readDiffFrom));
|
||||
return entries;
|
||||
}
|
||||
|
||||
public List<NamedXContentRegistry.Entry> getNamedXContent() {
|
||||
List<NamedXContentRegistry.Entry> entries = new ArrayList<>();
|
||||
// Metadata
|
||||
entries.add(new NamedXContentRegistry.Entry(MetaData.Custom.class, new ParseField(LicensesMetaData.TYPE),
|
||||
LicensesMetaData::fromXContent));
|
||||
return entries;
|
||||
}
|
||||
public Licensing(Settings settings) {
|
||||
this.settings = settings;
|
||||
isTransportClient = transportClientMode(settings);
|
||||
|
|
|
@ -11,6 +11,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.XPackBuild;
|
||||
|
||||
|
@ -140,7 +141,7 @@ public class XPackInfoResponse extends ActionResponse {
|
|||
}
|
||||
}
|
||||
|
||||
public static class BuildInfo implements ToXContent, Writeable {
|
||||
public static class BuildInfo implements ToXContentObject, Writeable {
|
||||
|
||||
private final String hash;
|
||||
private final String timestamp;
|
||||
|
@ -180,7 +181,7 @@ public class XPackInfoResponse extends ActionResponse {
|
|||
}
|
||||
}
|
||||
|
||||
public static class FeatureSetsInfo implements ToXContent, Writeable {
|
||||
public static class FeatureSetsInfo implements ToXContentObject, Writeable {
|
||||
|
||||
private final Map<String, FeatureSet> featureSets;
|
||||
|
||||
|
|
|
@ -7,7 +7,10 @@ package org.elasticsearch.license;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -176,10 +179,17 @@ public class XPackLicenseState {
|
|||
}
|
||||
}
|
||||
private volatile Status status = new Status(OperationMode.TRIAL, true);
|
||||
private final List<Runnable> listeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
/** Updates the current state of the license, which will change what features are available. */
|
||||
void update(OperationMode mode, boolean active) {
|
||||
status = new Status(mode, active);
|
||||
listeners.forEach(Runnable::run);
|
||||
}
|
||||
|
||||
/** Add a listener to be notified on license change */
|
||||
public void addListener(Runnable runnable) {
|
||||
listeners.add(Objects.requireNonNull(runnable));
|
||||
}
|
||||
|
||||
/** Return the current license type. */
|
||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack;
|
|||
import org.elasticsearch.common.io.stream.NamedWriteable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -25,7 +25,7 @@ public interface XPackFeatureSet {
|
|||
|
||||
Usage usage();
|
||||
|
||||
abstract class Usage implements ToXContent, NamedWriteable {
|
||||
abstract class Usage implements ToXContentObject, NamedWriteable {
|
||||
|
||||
private static final String AVAILABLE_XFIELD = "available";
|
||||
private static final String ENABLED_XFIELD = "enabled";
|
||||
|
|
|
@ -408,12 +408,23 @@ public class XPackPlugin extends Plugin implements ScriptPlugin, ActionPlugin, I
|
|||
|
||||
@Override
|
||||
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
|
||||
return Arrays.asList(
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, SECURITY, SecurityFeatureSet.Usage::new),
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, WATCHER, WatcherFeatureSet.Usage::new),
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, MONITORING, MonitoringFeatureSet.Usage::new),
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, GRAPH, GraphFeatureSet.Usage::new)
|
||||
);
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.add(new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, SECURITY, SecurityFeatureSet.Usage::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, WATCHER, WatcherFeatureSet.Usage::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, MONITORING, MonitoringFeatureSet.Usage::new));
|
||||
entries.add(new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, GRAPH, GraphFeatureSet.Usage::new));
|
||||
entries.addAll(watcher.getNamedWriteables());
|
||||
entries.addAll(licensing.getNamedWriteables());
|
||||
return entries;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<NamedXContentRegistry.Entry> getNamedXContent() {
|
||||
List<NamedXContentRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(watcher.getNamedXContent());
|
||||
entries.addAll(licensing.getNamedXContent());
|
||||
return entries;
|
||||
|
||||
}
|
||||
|
||||
public void onIndexModule(IndexModule module) {
|
||||
|
|
|
@ -5,8 +5,31 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.common.http;
|
||||
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.client.AuthCache;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpRequestBase;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.client.utils.URIUtils;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
import org.apache.http.conn.ssl.DefaultHostnameVerifier;
|
||||
import org.apache.http.conn.ssl.NoopHostnameVerifier;
|
||||
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.auth.BasicScheme;
|
||||
import org.apache.http.impl.client.BasicAuthCache;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.message.BasicNameValuePair;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
|
@ -17,34 +40,27 @@ import org.elasticsearch.xpack.common.http.auth.HttpAuthRegistry;
|
|||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import javax.net.ssl.HostnameVerifier;
|
||||
import javax.net.ssl.HttpsURLConnection;
|
||||
import javax.net.ssl.SSLSession;
|
||||
import javax.net.ssl.SSLSocketFactory;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.URL;
|
||||
import java.net.URLEncoder;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Client class to wrap http connections
|
||||
*/
|
||||
public class HttpClient extends AbstractComponent {
|
||||
|
||||
private static final String SETTINGS_SSL_PREFIX = "xpack.http.ssl.";
|
||||
|
||||
private final HttpAuthRegistry httpAuthRegistry;
|
||||
private final CloseableHttpClient client;
|
||||
private final Integer proxyPort;
|
||||
private final String proxyHost;
|
||||
private final TimeValue defaultConnectionTimeout;
|
||||
private final TimeValue defaultReadTimeout;
|
||||
private final boolean isHostnameVerificationEnabled;
|
||||
private final SSLSocketFactory sslSocketFactory;
|
||||
private final HttpProxy proxy;
|
||||
|
||||
public HttpClient(Settings settings, HttpAuthRegistry httpAuthRegistry, SSLService sslService) {
|
||||
super(settings);
|
||||
|
@ -52,148 +68,158 @@ public class HttpClient extends AbstractComponent {
|
|||
this.defaultConnectionTimeout = HttpSettings.CONNECTION_TIMEOUT.get(settings);
|
||||
this.defaultReadTimeout = HttpSettings.READ_TIMEOUT.get(settings);
|
||||
|
||||
final Integer proxyPort;
|
||||
if (HttpSettings.PROXY_HOST.exists(settings)) {
|
||||
proxyPort = HttpSettings.PROXY_PORT.get(settings);
|
||||
} else {
|
||||
proxyPort = null;
|
||||
}
|
||||
final String proxyHost = HttpSettings.PROXY_HOST.get(settings);
|
||||
if (proxyPort != null && Strings.hasText(proxyHost)) {
|
||||
this.proxy = new HttpProxy(proxyHost, proxyPort);
|
||||
// proxy setup
|
||||
this.proxyHost = HttpSettings.PROXY_HOST.get(settings);
|
||||
this.proxyPort = HttpSettings.PROXY_PORT.get(settings);
|
||||
if (proxyPort != 0 && Strings.hasText(proxyHost)) {
|
||||
logger.info("Using default proxy for http input and slack/hipchat/pagerduty/webhook actions [{}:{}]", proxyHost, proxyPort);
|
||||
} else if (proxyPort == null && Strings.hasText(proxyHost) == false) {
|
||||
this.proxy = HttpProxy.NO_PROXY;
|
||||
} else {
|
||||
throw new IllegalArgumentException("HTTP Proxy requires both settings: [" + HttpSettings.PROXY_HOST_KEY + "] and [" +
|
||||
HttpSettings.PROXY_PORT_KEY + "]");
|
||||
} else if (proxyPort != 0 ^ Strings.hasText(proxyHost)) {
|
||||
throw new IllegalArgumentException("HTTP proxy requires both settings: [" + HttpSettings.PROXY_HOST.getKey() + "] and [" +
|
||||
HttpSettings.PROXY_PORT.getKey() + "]");
|
||||
}
|
||||
Settings sslSettings = settings.getByPrefix(HttpSettings.SSL_KEY_PREFIX);
|
||||
this.sslSocketFactory = sslService.sslSocketFactory(settings.getByPrefix(HttpSettings.SSL_KEY_PREFIX));
|
||||
this.isHostnameVerificationEnabled = sslService.getVerificationMode(sslSettings, Settings.EMPTY).isHostnameVerificationEnabled();
|
||||
|
||||
HttpClientBuilder clientBuilder = HttpClientBuilder.create();
|
||||
|
||||
// ssl setup
|
||||
Settings sslSettings = settings.getByPrefix(SETTINGS_SSL_PREFIX);
|
||||
boolean isHostnameVerificationEnabled = sslService.getVerificationMode(sslSettings, Settings.EMPTY).isHostnameVerificationEnabled();
|
||||
HostnameVerifier verifier = isHostnameVerificationEnabled ? new DefaultHostnameVerifier() : NoopHostnameVerifier.INSTANCE;
|
||||
SSLConnectionSocketFactory factory = new SSLConnectionSocketFactory(sslService.sslSocketFactory(sslSettings), verifier);
|
||||
clientBuilder.setSSLSocketFactory(factory);
|
||||
|
||||
client = clientBuilder.build();
|
||||
}
|
||||
|
||||
public HttpResponse execute(HttpRequest request) throws IOException {
|
||||
try {
|
||||
return doExecute(request);
|
||||
} catch (SocketTimeoutException ste) {
|
||||
throw new ElasticsearchTimeoutException("failed to execute http request. timeout expired", ste);
|
||||
URI uri = createURI(request);
|
||||
|
||||
HttpRequestBase internalRequest;
|
||||
if (request.method == HttpMethod.HEAD) {
|
||||
internalRequest = new HttpHead(uri);
|
||||
} else {
|
||||
HttpMethodWithEntity methodWithEntity = new HttpMethodWithEntity(uri, request.method.name());
|
||||
if (request.body != null) {
|
||||
methodWithEntity.setEntity(new StringEntity(request.body));
|
||||
}
|
||||
internalRequest = methodWithEntity;
|
||||
}
|
||||
internalRequest.setHeader(HttpHeaders.ACCEPT_CHARSET, StandardCharsets.UTF_8.name());
|
||||
|
||||
RequestConfig.Builder config = RequestConfig.custom();
|
||||
|
||||
// headers
|
||||
if (request.headers().isEmpty() == false) {
|
||||
for (Map.Entry<String, String> entry : request.headers.entrySet()) {
|
||||
internalRequest.setHeader(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public HttpResponse doExecute(HttpRequest request) throws IOException {
|
||||
String queryString = null;
|
||||
if (request.params() != null && !request.params().isEmpty()) {
|
||||
StringBuilder builder = new StringBuilder();
|
||||
for (Map.Entry<String, String> entry : request.params().entrySet()) {
|
||||
if (builder.length() != 0) {
|
||||
builder.append('&');
|
||||
}
|
||||
builder.append(URLEncoder.encode(entry.getKey(), "UTF-8"))
|
||||
.append('=')
|
||||
.append(URLEncoder.encode(entry.getValue(), "UTF-8"));
|
||||
}
|
||||
queryString = builder.toString();
|
||||
// proxy
|
||||
if (request.proxy != null && request.proxy.equals(HttpProxy.NO_PROXY) == false) {
|
||||
HttpHost proxy = new HttpHost(request.proxy.getHost(), request.proxy.getPort(), request.scheme.scheme());
|
||||
config.setProxy(proxy);
|
||||
} else if (proxyPort != null && Strings.hasText(proxyHost)) {
|
||||
HttpHost proxy = new HttpHost(proxyHost, proxyPort, request.scheme.scheme());
|
||||
config.setProxy(proxy);
|
||||
}
|
||||
|
||||
String path = Strings.hasLength(request.path) ? request.path : "";
|
||||
if (Strings.hasLength(queryString)) {
|
||||
path += "?" + queryString;
|
||||
}
|
||||
URL url = new URL(request.scheme.scheme(), request.host, request.port, path);
|
||||
|
||||
logger.debug("making [{}] request to [{}]", request.method().method(), url);
|
||||
logger.trace("sending [{}] as body of request", request.body());
|
||||
|
||||
// proxy configured in the request always wins!
|
||||
HttpProxy proxyToUse = request.proxy != null ? request.proxy : proxy;
|
||||
|
||||
HttpURLConnection urlConnection = (HttpURLConnection) url.openConnection(proxyToUse.proxy());
|
||||
if (urlConnection instanceof HttpsURLConnection) {
|
||||
final HttpsURLConnection httpsConn = (HttpsURLConnection) urlConnection;
|
||||
final SSLSocketFactory factory = sslSocketFactory;
|
||||
SecurityManager sm = System.getSecurityManager();
|
||||
if (sm != null) {
|
||||
sm.checkPermission(new SpecialPermission());
|
||||
}
|
||||
AccessController.doPrivileged(new PrivilegedAction<Void>() {
|
||||
@Override
|
||||
public Void run() {
|
||||
httpsConn.setSSLSocketFactory(factory);
|
||||
if (isHostnameVerificationEnabled == false) {
|
||||
httpsConn.setHostnameVerifier(NoopHostnameVerifier.INSTANCE);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
urlConnection.setRequestMethod(request.method().method());
|
||||
if (request.headers() != null) {
|
||||
for (Map.Entry<String, String> entry : request.headers().entrySet()) {
|
||||
urlConnection.setRequestProperty(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
HttpClientContext localContext = HttpClientContext.create();
|
||||
// auth
|
||||
if (request.auth() != null) {
|
||||
logger.trace("applying auth headers");
|
||||
ApplicableHttpAuth applicableAuth = httpAuthRegistry.createApplicable(request.auth);
|
||||
applicableAuth.apply(urlConnection);
|
||||
}
|
||||
urlConnection.setUseCaches(false);
|
||||
urlConnection.setRequestProperty("Accept-Charset", StandardCharsets.UTF_8.name());
|
||||
if (request.body() != null) {
|
||||
urlConnection.setDoOutput(true);
|
||||
byte[] bytes = request.body().getBytes(StandardCharsets.UTF_8.name());
|
||||
urlConnection.setRequestProperty("Content-Length", String.valueOf(bytes.length));
|
||||
urlConnection.getOutputStream().write(bytes);
|
||||
urlConnection.getOutputStream().close();
|
||||
CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
|
||||
applicableAuth.apply(credentialsProvider, new AuthScope(request.host, request.port));
|
||||
localContext.setCredentialsProvider(credentialsProvider);
|
||||
|
||||
// preemptive auth, no need to wait for a 401 first
|
||||
AuthCache authCache = new BasicAuthCache();
|
||||
BasicScheme basicAuth = new BasicScheme();
|
||||
authCache.put(new HttpHost(request.host, request.port, request.scheme.scheme()), basicAuth);
|
||||
localContext.setAuthCache(authCache);
|
||||
}
|
||||
|
||||
TimeValue connectionTimeout = request.connectionTimeout != null ? request.connectionTimeout : defaultConnectionTimeout;
|
||||
urlConnection.setConnectTimeout((int) connectionTimeout.millis());
|
||||
// timeouts
|
||||
if (request.connectionTimeout() != null) {
|
||||
|
||||
TimeValue readTimeout = request.readTimeout != null ? request.readTimeout : defaultReadTimeout;
|
||||
urlConnection.setReadTimeout((int) readTimeout.millis());
|
||||
|
||||
urlConnection.connect();
|
||||
|
||||
final int statusCode = urlConnection.getResponseCode();
|
||||
// no status code, not considered a valid HTTP response then
|
||||
if (statusCode == -1) {
|
||||
throw new IOException("Not a valid HTTP response, no status code in response");
|
||||
config.setConnectTimeout(Math.toIntExact(request.connectionTimeout.millis()));
|
||||
} else {
|
||||
config.setConnectTimeout(Math.toIntExact(defaultConnectionTimeout.millis()));
|
||||
}
|
||||
Map<String, String[]> responseHeaders = new HashMap<>(urlConnection.getHeaderFields().size());
|
||||
for (Map.Entry<String, List<String>> header : urlConnection.getHeaderFields().entrySet()) {
|
||||
// HttpURLConnection#getHeaderFields returns the first status line as a header
|
||||
// with a `null` key (facepalm)... so we have to skip that one.
|
||||
if (header.getKey() != null) {
|
||||
responseHeaders.put(header.getKey(), header.getValue().toArray(new String[header.getValue().size()]));
|
||||
|
||||
if (request.readTimeout() != null) {
|
||||
config.setSocketTimeout(Math.toIntExact(request.readTimeout.millis()));
|
||||
config.setConnectionRequestTimeout(Math.toIntExact(request.readTimeout.millis()));
|
||||
} else {
|
||||
config.setSocketTimeout(Math.toIntExact(defaultReadTimeout.millis()));
|
||||
config.setConnectionRequestTimeout(Math.toIntExact(defaultReadTimeout.millis()));
|
||||
}
|
||||
|
||||
internalRequest.setConfig(config.build());
|
||||
|
||||
try (CloseableHttpResponse response = client.execute(internalRequest, localContext)) {
|
||||
// headers
|
||||
Header[] headers = response.getAllHeaders();
|
||||
Map<String, String[]> responseHeaders = new HashMap<>(headers.length);
|
||||
for (Header header : headers) {
|
||||
if (responseHeaders.containsKey(header.getName())) {
|
||||
String[] old = responseHeaders.get(header.getName());
|
||||
String[] values = new String[old.length + 1];
|
||||
|
||||
System.arraycopy(old, 0, values, 0, old.length);
|
||||
values[values.length-1] = header.getValue();
|
||||
|
||||
responseHeaders.put(header.getName(), values);
|
||||
} else {
|
||||
responseHeaders.put(header.getName(), new String[]{header.getValue()});
|
||||
}
|
||||
}
|
||||
logger.debug("http status code [{}]", statusCode);
|
||||
|
||||
final byte[] body;
|
||||
// not every response has a content, i.e. 204
|
||||
if (response.getEntity() == null) {
|
||||
body = new byte[0];
|
||||
} else {
|
||||
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
|
||||
try (InputStream is = urlConnection.getInputStream()) {
|
||||
try (InputStream is = response.getEntity().getContent()) {
|
||||
Streams.copy(is, outputStream);
|
||||
} catch (Exception e) {
|
||||
if (urlConnection.getErrorStream() != null) {
|
||||
try (InputStream is = urlConnection.getErrorStream()) {
|
||||
Streams.copy(is, outputStream);
|
||||
}
|
||||
}
|
||||
}
|
||||
body = outputStream.toByteArray();
|
||||
}
|
||||
return new HttpResponse(statusCode, body, responseHeaders);
|
||||
}
|
||||
return new HttpResponse(response.getStatusLine().getStatusCode(), body, responseHeaders);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class NoopHostnameVerifier implements HostnameVerifier {
|
||||
private URI createURI(HttpRequest request) {
|
||||
// this could be really simple, as the apache http client has a UriBuilder class, however this class is always doing
|
||||
// url path escaping, and we have done this already, so this would result in double escaping
|
||||
try {
|
||||
List<NameValuePair> qparams = new ArrayList<>(request.params.size());
|
||||
request.params.forEach((k, v)-> qparams.add(new BasicNameValuePair(k, v)));
|
||||
URI uri = URIUtils.createURI(request.scheme.scheme(), request.host, request.port, request.path,
|
||||
URLEncodedUtils.format(qparams, "UTF-8"), null);
|
||||
|
||||
private static final HostnameVerifier INSTANCE = new NoopHostnameVerifier();
|
||||
return uri;
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IllegalArgumentException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper class to have all HTTP methods except HEAD allow for an body, including GET
|
||||
*/
|
||||
final class HttpMethodWithEntity extends HttpEntityEnclosingRequestBase {
|
||||
|
||||
private final String methodName;
|
||||
|
||||
HttpMethodWithEntity(final URI uri, String methodName) {
|
||||
this.methodName = methodName;
|
||||
setURI(uri);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean verify(String s, SSLSession sslSession) {
|
||||
return true;
|
||||
public String getMethod() {
|
||||
return methodName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,9 +96,9 @@ public class HttpProxy implements ToXContent, Streamable {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.HOST)) {
|
||||
} else if (Field.HOST.match(currentFieldName)) {
|
||||
host = parser.text();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.PORT)) {
|
||||
} else if (Field.PORT.match(currentFieldName)) {
|
||||
port = parser.intValue();
|
||||
if (port <= 0 || port >= 65535) {
|
||||
throw new ElasticsearchParseException("Proxy port must be between 1 and 65534, but was " + port);
|
||||
|
|
|
@ -8,10 +8,10 @@ package org.elasticsearch.xpack.common.http;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -35,7 +35,7 @@ import java.util.Objects;
|
|||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class HttpRequest implements ToXContent {
|
||||
public class HttpRequest implements ToXContentObject {
|
||||
|
||||
final String host;
|
||||
final int port;
|
||||
|
@ -256,17 +256,17 @@ public class HttpRequest implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.PROXY)) {
|
||||
} else if (Field.PROXY.match(currentFieldName)) {
|
||||
try {
|
||||
builder.proxy(HttpProxy.parse(parser));
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchParseException("could not parse http request. could not parse [{}] field", currentFieldName);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.AUTH)) {
|
||||
} else if (Field.AUTH.match(currentFieldName)) {
|
||||
builder.auth(httpAuthRegistry.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT)) {
|
||||
} else if (HttpRequest.Field.CONNECTION_TIMEOUT.match(currentFieldName)) {
|
||||
builder.connectionTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN)) {
|
||||
} else if (HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.match(currentFieldName)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser,
|
||||
|
@ -275,9 +275,9 @@ public class HttpRequest implements ToXContent {
|
|||
throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field",
|
||||
pe, currentFieldName);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT)) {
|
||||
} else if (HttpRequest.Field.READ_TIMEOUT.match(currentFieldName)) {
|
||||
builder.readTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT_HUMAN)) {
|
||||
} else if (HttpRequest.Field.READ_TIMEOUT_HUMAN.match(currentFieldName)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, HttpRequest.Field.READ_TIMEOUT.toString()));
|
||||
|
@ -286,35 +286,35 @@ public class HttpRequest implements ToXContent {
|
|||
pe, currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.HEADERS)) {
|
||||
if (Field.HEADERS.match(currentFieldName)) {
|
||||
builder.setHeaders((Map) WatcherUtils.flattenModel(parser.map()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.PARAMS)) {
|
||||
} else if (Field.PARAMS.match(currentFieldName)) {
|
||||
builder.setParams((Map) WatcherUtils.flattenModel(parser.map()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.BODY)) {
|
||||
} else if (Field.BODY.match(currentFieldName)) {
|
||||
builder.body(parser.text());
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http request. unexpected object field [{}]",
|
||||
currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.SCHEME)) {
|
||||
if (Field.SCHEME.match(currentFieldName)) {
|
||||
builder.scheme(Scheme.parse(parser.text()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.METHOD)) {
|
||||
} else if (Field.METHOD.match(currentFieldName)) {
|
||||
builder.method(HttpMethod.parse(parser.text()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.HOST)) {
|
||||
} else if (Field.HOST.match(currentFieldName)) {
|
||||
builder.host = parser.text();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.PATH)) {
|
||||
} else if (Field.PATH.match(currentFieldName)) {
|
||||
builder.path(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.BODY)) {
|
||||
} else if (Field.BODY.match(currentFieldName)) {
|
||||
builder.body(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.URL)) {
|
||||
} else if (Field.URL.match(currentFieldName)) {
|
||||
builder.fromUrl(parser.text());
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http request. unexpected string field [{}]",
|
||||
currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.PORT)) {
|
||||
if (Field.PORT.match(currentFieldName)) {
|
||||
builder.port = parser.intValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http request. unexpected numeric field [{}]",
|
||||
|
@ -460,8 +460,8 @@ public class HttpRequest implements ToXContent {
|
|||
scheme = Scheme.parse(uri.getScheme());
|
||||
port = uri.getPort() > 0 ? uri.getPort() : scheme.defaultPort();
|
||||
host = uri.getHost();
|
||||
if (Strings.hasLength(uri.getPath())) {
|
||||
path = uri.getPath();
|
||||
if (Strings.hasLength(uri.getRawPath())) {
|
||||
path = uri.getRawPath();
|
||||
}
|
||||
String rawQuery = uri.getRawQuery();
|
||||
if (Strings.hasLength(rawQuery)) {
|
||||
|
|
|
@ -8,11 +8,11 @@ package org.elasticsearch.xpack.common.http;
|
|||
import io.netty.handler.codec.http.HttpHeaders;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.RestUtils;
|
||||
|
@ -33,7 +33,7 @@ import static java.util.Collections.emptyMap;
|
|||
import static java.util.Collections.singletonMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class HttpRequestTemplate implements ToXContent {
|
||||
public class HttpRequestTemplate implements ToXContentObject {
|
||||
|
||||
private final Scheme scheme;
|
||||
private final String host;
|
||||
|
@ -277,21 +277,21 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.PROXY)) {
|
||||
} else if (HttpRequest.Field.PROXY.match(currentFieldName)) {
|
||||
builder.proxy(HttpProxy.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.PATH)) {
|
||||
} else if (HttpRequest.Field.PATH.match(currentFieldName)) {
|
||||
builder.path(parseFieldTemplate(currentFieldName, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.HEADERS)) {
|
||||
} else if (HttpRequest.Field.HEADERS.match(currentFieldName)) {
|
||||
builder.putHeaders(parseFieldTemplates(currentFieldName, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.PARAMS)) {
|
||||
} else if (HttpRequest.Field.PARAMS.match(currentFieldName)) {
|
||||
builder.putParams(parseFieldTemplates(currentFieldName, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.BODY)) {
|
||||
} else if (HttpRequest.Field.BODY.match(currentFieldName)) {
|
||||
builder.body(parseFieldTemplate(currentFieldName, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.URL)) {
|
||||
} else if (HttpRequest.Field.URL.match(currentFieldName)) {
|
||||
builder.fromUrl(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT)) {
|
||||
} else if (HttpRequest.Field.CONNECTION_TIMEOUT.match(currentFieldName)) {
|
||||
builder.connectionTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN)) {
|
||||
} else if (HttpRequest.Field.CONNECTION_TIMEOUT_HUMAN.match(currentFieldName)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.connectionTimeout(WatcherDateTimeUtils.parseTimeValue(parser,
|
||||
|
@ -300,9 +300,9 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
throw new ElasticsearchParseException("could not parse http request template. invalid time value for [{}] field",
|
||||
pe, currentFieldName);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT)) {
|
||||
} else if (HttpRequest.Field.READ_TIMEOUT.match(currentFieldName)) {
|
||||
builder.readTimeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.READ_TIMEOUT_HUMAN)) {
|
||||
} else if (HttpRequest.Field.READ_TIMEOUT_HUMAN.match(currentFieldName)) {
|
||||
// Users and 2.x specify the timeout this way
|
||||
try {
|
||||
builder.readTimeout(WatcherDateTimeUtils.parseTimeValue(parser, HttpRequest.Field.READ_TIMEOUT.toString()));
|
||||
|
@ -311,25 +311,25 @@ public class HttpRequestTemplate implements ToXContent {
|
|||
pe, currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.AUTH)) {
|
||||
if (HttpRequest.Field.AUTH.match(currentFieldName)) {
|
||||
builder.auth(httpAuthRegistry.parse(parser));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http request template. unexpected object field [{}]",
|
||||
currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.SCHEME)) {
|
||||
if (HttpRequest.Field.SCHEME.match(currentFieldName)) {
|
||||
builder.scheme(Scheme.parse(parser.text()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.METHOD)) {
|
||||
} else if (HttpRequest.Field.METHOD.match(currentFieldName)) {
|
||||
builder.method(HttpMethod.parse(parser.text()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.HOST)) {
|
||||
} else if (HttpRequest.Field.HOST.match(currentFieldName)) {
|
||||
builder.host = parser.text();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http request template. unexpected string field [{}]",
|
||||
currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, HttpRequest.Field.PORT)) {
|
||||
if (HttpRequest.Field.PORT.match(currentFieldName)) {
|
||||
builder.port = parser.intValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http request template. unexpected numeric field [{}]",
|
||||
|
|
|
@ -9,11 +9,10 @@ import io.netty.handler.codec.http.HttpHeaders;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -29,7 +28,7 @@ import java.util.Map;
|
|||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class HttpResponse implements ToXContent {
|
||||
public class HttpResponse implements ToXContentObject {
|
||||
|
||||
private final int status;
|
||||
private final Map<String, String[]> headers;
|
||||
|
@ -189,13 +188,13 @@ public class HttpResponse implements ToXContent {
|
|||
} else if (currentFieldName == null) {
|
||||
throw new ElasticsearchParseException("could not parse http response. expected a field name but found [{}] instead", token);
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.STATUS)) {
|
||||
if (Field.STATUS.match(currentFieldName)) {
|
||||
status = parser.intValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http response. unknown numeric field [{}]", currentFieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.BODY)) {
|
||||
if (Field.BODY.match(currentFieldName)) {
|
||||
body = parser.text();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse http response. unknown string field [{}]", currentFieldName);
|
||||
|
|
|
@ -5,15 +5,17 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.common.http.auth;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.HttpURLConnection;
|
||||
|
||||
public abstract class ApplicableHttpAuth<Auth extends HttpAuth> implements ToXContent {
|
||||
public abstract class ApplicableHttpAuth<Auth extends HttpAuth> implements ToXContentObject {
|
||||
|
||||
private final Auth auth;
|
||||
protected final Auth auth;
|
||||
|
||||
public ApplicableHttpAuth(Auth auth) {
|
||||
this.auth = auth;
|
||||
|
@ -25,6 +27,8 @@ public abstract class ApplicableHttpAuth<Auth extends HttpAuth> implements ToXCo
|
|||
|
||||
public abstract void apply(HttpURLConnection connection);
|
||||
|
||||
public abstract void apply(CredentialsProvider credsProvider, AuthScope authScope);
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return auth.toXContent(builder, params);
|
||||
|
|
|
@ -5,9 +5,9 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.common.http.auth;
|
||||
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
|
||||
public interface HttpAuth extends ToXContent {
|
||||
public interface HttpAuth extends ToXContentObject {
|
||||
|
||||
String type();
|
||||
|
||||
|
|
|
@ -5,20 +5,25 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.common.http.auth.basic;
|
||||
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.elasticsearch.xpack.common.http.auth.ApplicableHttpAuth;
|
||||
import org.elasticsearch.xpack.security.crypto.CryptoService;
|
||||
|
||||
import java.net.HttpURLConnection;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Base64;
|
||||
|
||||
import org.elasticsearch.xpack.common.http.auth.ApplicableHttpAuth;
|
||||
import org.elasticsearch.xpack.security.crypto.CryptoService;
|
||||
|
||||
public class ApplicableBasicAuth extends ApplicableHttpAuth<BasicAuth> {
|
||||
|
||||
private final String basicAuth;
|
||||
private final CryptoService cryptoService;
|
||||
|
||||
public ApplicableBasicAuth(BasicAuth auth, CryptoService service) {
|
||||
super(auth);
|
||||
basicAuth = headerValue(auth.username, auth.password.text(service));
|
||||
this.cryptoService = service;
|
||||
}
|
||||
|
||||
public static String headerValue(String username, char[] password) {
|
||||
|
@ -29,4 +34,10 @@ public class ApplicableBasicAuth extends ApplicableHttpAuth<BasicAuth> {
|
|||
connection.setRequestProperty("Authorization", basicAuth);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void apply(CredentialsProvider credsProvider, AuthScope authScope) {
|
||||
credsProvider.setCredentials(authScope,
|
||||
new UsernamePasswordCredentials(auth.username, new String(auth.password.text(cryptoService))));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
package org.elasticsearch.xpack.common.text;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -113,7 +112,7 @@ public class TextTemplate implements ToXContent {
|
|||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
return new TextTemplate(parser.text());
|
||||
} else {
|
||||
return new TextTemplate(Script.parse(parser, ParseFieldMatcher.STRICT, Script.DEFAULT_TEMPLATE_LANG));
|
||||
return new TextTemplate(Script.parse(parser, Script.DEFAULT_TEMPLATE_LANG));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,14 +6,13 @@
|
|||
package org.elasticsearch.xpack.graph.action;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.graph.action.Connection.ConnectionId;
|
||||
import org.elasticsearch.xpack.graph.action.Vertex.VertexId;
|
||||
|
@ -31,7 +30,7 @@ import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearch
|
|||
*
|
||||
* @see GraphExploreRequest
|
||||
*/
|
||||
public class GraphExploreResponse extends ActionResponse implements ToXContent {
|
||||
public class GraphExploreResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private long tookInMillis;
|
||||
private boolean timedOut = false;
|
||||
|
@ -159,6 +158,7 @@ public class GraphExploreResponse extends ActionResponse implements ToXContent {
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.TOOK, tookInMillis);
|
||||
builder.field(Fields.TIMED_OUT, timedOut);
|
||||
|
||||
|
@ -194,7 +194,7 @@ public class GraphExploreResponse extends ActionResponse implements ToXContent {
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,15 +114,15 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
}
|
||||
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, VERTICES_FIELD)) {
|
||||
if (VERTICES_FIELD.match(fieldName)) {
|
||||
parseVertices(parser, context, currentHop, graphRequest);
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, QUERY_FIELD)) {
|
||||
if (QUERY_FIELD.match(fieldName)) {
|
||||
currentHop.guidingQuery(context.parseInnerQueryBuilder());
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, CONNECTIONS_FIELD)) {
|
||||
} else if (CONNECTIONS_FIELD.match(fieldName)) {
|
||||
parseHop(parser, context, graphRequest.createNextHop(null), graphRequest);
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, CONTROLS_FIELD)) {
|
||||
} else if (CONTROLS_FIELD.match(fieldName)) {
|
||||
if (currentHop.getParentHop() != null) {
|
||||
throw new ElasticsearchParseException(
|
||||
"Controls are a global setting that can only be set in the root " + fieldName, token.name());
|
||||
|
@ -160,7 +160,7 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
token = parser.nextToken();
|
||||
}
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, INCLUDE_FIELD)) {
|
||||
if (INCLUDE_FIELD.match(fieldName)) {
|
||||
if (excludes != null) {
|
||||
throw new ElasticsearchParseException(
|
||||
"Graph vertices definition cannot contain both "+INCLUDE_FIELD.getPreferredName()+" and "
|
||||
|
@ -176,7 +176,7 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
fieldName = parser.currentName();
|
||||
} else {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, TERM_FIELD)) {
|
||||
if (TERM_FIELD.match(fieldName)) {
|
||||
includeTerm = parser.text();
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
|
@ -184,7 +184,7 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
" clause has invalid property:" + fieldName);
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, BOOST_FIELD)) {
|
||||
if (BOOST_FIELD.match(fieldName)) {
|
||||
boost = parser.floatValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
|
@ -215,7 +215,7 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
+ token.name());
|
||||
}
|
||||
}
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, EXCLUDE_FIELD)) {
|
||||
} else if (EXCLUDE_FIELD.match(fieldName)) {
|
||||
if (includes != null) {
|
||||
throw new ElasticsearchParseException(
|
||||
"Graph vertices definition cannot contain both "+ INCLUDE_FIELD.getPreferredName()+
|
||||
|
@ -231,18 +231,18 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
}
|
||||
}
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, FIELD_NAME_FIELD)) {
|
||||
if (FIELD_NAME_FIELD.match(fieldName)) {
|
||||
field = parser.text();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("Unknown string property: [" + fieldName + "]");
|
||||
}
|
||||
}
|
||||
if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, SIZE_FIELD)) {
|
||||
if (SIZE_FIELD.match(fieldName)) {
|
||||
size = parser.intValue();
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, MIN_DOC_COUNT_FIELD)) {
|
||||
} else if (MIN_DOC_COUNT_FIELD.match(fieldName)) {
|
||||
minDocCount = parser.intValue();
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, SHARD_MIN_DOC_COUNT_FIELD)) {
|
||||
} else if (SHARD_MIN_DOC_COUNT_FIELD.match(fieldName)) {
|
||||
shardMinDocCount = parser.intValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("Unknown numeric property: [" + fieldName + "]");
|
||||
|
@ -282,37 +282,37 @@ public class RestGraphAction extends XPackRestHandler {
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
fieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.VALUE_NUMBER) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, SAMPLE_SIZE_FIELD)) {
|
||||
if (SAMPLE_SIZE_FIELD.match(fieldName)) {
|
||||
graphRequest.sampleSize(parser.intValue());
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, TIMEOUT_FIELD)) {
|
||||
} else if (TIMEOUT_FIELD.match(fieldName)) {
|
||||
graphRequest.timeout(TimeValue.timeValueMillis(parser.longValue()));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("Unknown numeric property: [" + fieldName + "]");
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, SIGNIFICANCE_FIELD)) {
|
||||
if (SIGNIFICANCE_FIELD.match(fieldName)) {
|
||||
graphRequest.useSignificance(parser.booleanValue());
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, RETURN_DETAILED_INFO)) {
|
||||
} else if (RETURN_DETAILED_INFO.match(fieldName)) {
|
||||
graphRequest.returnDetailedInfo(parser.booleanValue());
|
||||
} else{
|
||||
throw new ElasticsearchParseException("Unknown boolean property: [" + fieldName + "]");
|
||||
}
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, TIMEOUT_FIELD)) {
|
||||
if (TIMEOUT_FIELD.match(fieldName)) {
|
||||
graphRequest.timeout(TimeValue.parseTimeValue(parser.text(), null, "timeout"));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("Unknown numeric property: [" + fieldName + "]");
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if (context.getParseFieldMatcher().match(fieldName, SAMPLE_DIVERSITY_FIELD)) {
|
||||
if (SAMPLE_DIVERSITY_FIELD.match(fieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
fieldName = parser.currentName();
|
||||
token = parser.nextToken();
|
||||
}
|
||||
if (context.getParseFieldMatcher().match(fieldName, FIELD_NAME_FIELD)) {
|
||||
if (FIELD_NAME_FIELD.match(fieldName)) {
|
||||
graphRequest.sampleDiversityField(parser.text());
|
||||
} else if (context.getParseFieldMatcher().match(fieldName, MAX_DOCS_PER_VALUE_FIELD)) {
|
||||
} else if (MAX_DOCS_PER_VALUE_FIELD.match(fieldName)) {
|
||||
graphRequest.maxDocsPerDiversityValue(parser.intValue());
|
||||
} else {
|
||||
throw new ElasticsearchParseException("Unknown property: [" + fieldName + "]");
|
||||
|
|
|
@ -1,230 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.ReleasableLock;
|
||||
import org.elasticsearch.xpack.monitoring.collector.Collector;
|
||||
import org.elasticsearch.xpack.monitoring.collector.cluster.ClusterStatsCollector;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.ExportException;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporters;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
/**
|
||||
* The {@code AgentService} is a service that does the work of publishing the details to the monitoring cluster.
|
||||
* <p>
|
||||
* If this service is stopped, then the attached, monitored node is not going to publish its details to the monitoring cluster. Given
|
||||
* service life cycles, the intended way to temporarily stop the publishing is using the start and stop collection methods.
|
||||
*
|
||||
* @see #stopCollection()
|
||||
* @see #startCollection()
|
||||
*/
|
||||
public class AgentService extends AbstractLifecycleComponent {
|
||||
|
||||
private volatile ExportingWorker exportingWorker;
|
||||
|
||||
private volatile Thread workerThread;
|
||||
private volatile long samplingIntervalMillis;
|
||||
private final Collection<Collector> collectors;
|
||||
private final String[] settingsCollectors;
|
||||
private final Exporters exporters;
|
||||
|
||||
public AgentService(Settings settings, ClusterSettings clusterSettings, Set<Collector> collectors, Exporters exporters) {
|
||||
super(settings);
|
||||
this.samplingIntervalMillis = MonitoringSettings.INTERVAL.get(settings).millis();
|
||||
this.settingsCollectors = MonitoringSettings.COLLECTORS.get(settings).toArray(new String[0]);
|
||||
this.collectors = Collections.unmodifiableSet(filterCollectors(collectors, settingsCollectors));
|
||||
this.exporters = exporters;
|
||||
|
||||
clusterSettings.addSettingsUpdateConsumer(MonitoringSettings.INTERVAL, this::setInterval);
|
||||
}
|
||||
|
||||
private void setInterval(TimeValue interval) {
|
||||
this.samplingIntervalMillis = interval.millis();
|
||||
applyIntervalSettings();
|
||||
}
|
||||
|
||||
protected Set<Collector> filterCollectors(Set<Collector> collectors, String[] filters) {
|
||||
if (CollectionUtils.isEmpty(filters)) {
|
||||
return collectors;
|
||||
}
|
||||
|
||||
Set<Collector> list = new HashSet<>();
|
||||
for (Collector collector : collectors) {
|
||||
if (Regex.simpleMatch(filters, collector.name().toLowerCase(Locale.ROOT))) {
|
||||
list.add(collector);
|
||||
} else if (collector instanceof ClusterStatsCollector) {
|
||||
list.add(collector);
|
||||
}
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
protected void applyIntervalSettings() {
|
||||
if (samplingIntervalMillis <= 0) {
|
||||
logger.info("data sampling is disabled due to interval settings [{}]", samplingIntervalMillis);
|
||||
if (workerThread != null) {
|
||||
|
||||
// notify worker to stop on its leisure, not to disturb an exporting operation
|
||||
exportingWorker.closed = true;
|
||||
|
||||
exportingWorker = null;
|
||||
workerThread = null;
|
||||
}
|
||||
} else if (workerThread == null || !workerThread.isAlive()) {
|
||||
|
||||
exportingWorker = new ExportingWorker();
|
||||
workerThread = new Thread(exportingWorker, EsExecutors.threadName(settings, "monitoring.exporters"));
|
||||
workerThread.setDaemon(true);
|
||||
workerThread.start();
|
||||
}
|
||||
}
|
||||
|
||||
/** stop collection and exporting. this method blocks until all background activity is guaranteed to be stopped */
|
||||
public void stopCollection() {
|
||||
final ExportingWorker worker = this.exportingWorker;
|
||||
if (worker != null) {
|
||||
worker.stopCollecting();
|
||||
}
|
||||
}
|
||||
|
||||
public void startCollection() {
|
||||
final ExportingWorker worker = this.exportingWorker;
|
||||
if (worker != null) {
|
||||
worker.collecting = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
logger.debug("monitoring service started");
|
||||
exporters.start();
|
||||
applyIntervalSettings();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
if (workerThread != null && workerThread.isAlive()) {
|
||||
exportingWorker.closed = true;
|
||||
workerThread.interrupt();
|
||||
try {
|
||||
workerThread.join(60000);
|
||||
} catch (InterruptedException e) {
|
||||
// we don't care...
|
||||
}
|
||||
}
|
||||
|
||||
exporters.stop();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
for (Exporter exporter : exporters) {
|
||||
try {
|
||||
exporter.close();
|
||||
} catch (Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to close exporter [{}]", exporter.name()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public TimeValue getSamplingInterval() {
|
||||
return TimeValue.timeValueMillis(samplingIntervalMillis);
|
||||
}
|
||||
|
||||
public String[] collectors() {
|
||||
return settingsCollectors;
|
||||
}
|
||||
|
||||
class ExportingWorker implements Runnable {
|
||||
|
||||
volatile boolean closed = false;
|
||||
volatile boolean collecting = true;
|
||||
|
||||
final ReleasableLock collectionLock = new ReleasableLock(new ReentrantLock(false));
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
while (!closed) {
|
||||
// sleep first to allow node to complete initialization before collecting the first start
|
||||
try {
|
||||
Thread.sleep(samplingIntervalMillis);
|
||||
|
||||
if (closed) {
|
||||
continue;
|
||||
}
|
||||
|
||||
try (Releasable ignore = collectionLock.acquire()) {
|
||||
|
||||
Collection<MonitoringDoc> docs = collect();
|
||||
|
||||
if ((docs.isEmpty() == false) && (closed == false)) {
|
||||
exporters.export(docs);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (ExportException e) {
|
||||
logger.error("exception when exporting documents", e);
|
||||
} catch (InterruptedException e) {
|
||||
logger.trace("interrupted");
|
||||
Thread.currentThread().interrupt();
|
||||
} catch (Exception e) {
|
||||
logger.error("background thread had an uncaught exception", e);
|
||||
}
|
||||
}
|
||||
logger.debug("worker shutdown");
|
||||
}
|
||||
|
||||
/** stop collection and exporting. this method will be block until background collection is actually stopped */
|
||||
public void stopCollecting() {
|
||||
collecting = false;
|
||||
collectionLock.acquire().close();
|
||||
}
|
||||
|
||||
private Collection<MonitoringDoc> collect() {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("collecting data - collectors [{}]", Strings.collectionToCommaDelimitedString(collectors));
|
||||
}
|
||||
|
||||
Collection<MonitoringDoc> docs = new ArrayList<>();
|
||||
for (Collector collector : collectors) {
|
||||
if (collecting) {
|
||||
Collection<MonitoringDoc> result = collector.collect();
|
||||
if (result != null) {
|
||||
logger.trace("adding [{}] collected docs from [{}] collector", result.size(), collector.name());
|
||||
docs.addAll(result);
|
||||
} else {
|
||||
logger.trace("skipping collected docs from [{}] collector", collector.name());
|
||||
}
|
||||
}
|
||||
if (closed) {
|
||||
// Stop collecting if the worker is marked as closed
|
||||
break;
|
||||
}
|
||||
}
|
||||
return docs;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -10,7 +10,8 @@ import java.util.Locale;
|
|||
public enum MonitoredSystem {
|
||||
|
||||
ES("es"),
|
||||
KIBANA("kibana");
|
||||
KIBANA("kibana"),
|
||||
LOGSTASH("logstash");
|
||||
|
||||
private final String system;
|
||||
|
||||
|
@ -28,6 +29,8 @@ public enum MonitoredSystem {
|
|||
return ES;
|
||||
case "kibana":
|
||||
return KIBANA;
|
||||
case "logstash":
|
||||
return LOGSTASH;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown monitoring system [" + system + "]");
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.rest.RestHandler;
|
|||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
||||
import java.time.Clock;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
@ -119,9 +120,10 @@ public class Monitoring implements ActionPlugin {
|
|||
collectors.add(new ShardsCollector(settings, clusterService, monitoringSettings, licenseState));
|
||||
collectors.add(new NodeStatsCollector(settings, clusterService, monitoringSettings, licenseState, client));
|
||||
collectors.add(new IndexRecoveryCollector(settings, clusterService, monitoringSettings, licenseState, client));
|
||||
final AgentService agentService = new AgentService(settings, clusterSettings, collectors, exporters);
|
||||
final MonitoringService monitoringService =
|
||||
new MonitoringService(settings, clusterSettings, threadPool, collectors, exporters);
|
||||
|
||||
return Arrays.asList(agentService, monitoringSettings, exporters, cleanerService);
|
||||
return Arrays.asList(monitoringService, monitoringSettings, exporters, cleanerService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,236 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.monitoring.collector.Collector;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporters;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
/**
|
||||
* The {@code MonitoringService} is a service that does the work of publishing the details to the monitoring cluster.
|
||||
* <p>
|
||||
* If this service is stopped, then the attached, monitored node is not going to publish its details to the monitoring cluster. Given
|
||||
* service life cycles, the intended way to temporarily stop the publishing is using the start and stop methods.
|
||||
*/
|
||||
public class MonitoringService extends AbstractLifecycleComponent {
|
||||
|
||||
/** State of the monitoring service, either started or stopped **/
|
||||
private final AtomicBoolean started = new AtomicBoolean(false);
|
||||
|
||||
/** Task in charge of collecting and exporting monitoring data **/
|
||||
private final MonitoringExecution monitor = new MonitoringExecution();
|
||||
|
||||
private final ThreadPool threadPool;
|
||||
private final Set<Collector> collectors;
|
||||
private final Exporters exporters;
|
||||
|
||||
private volatile TimeValue interval;
|
||||
private volatile ThreadPool.Cancellable scheduler;
|
||||
|
||||
MonitoringService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool,
|
||||
Set<Collector> collectors, Exporters exporters) {
|
||||
super(settings);
|
||||
this.threadPool = Objects.requireNonNull(threadPool);
|
||||
this.collectors = Objects.requireNonNull(collectors);
|
||||
this.exporters = Objects.requireNonNull(exporters);
|
||||
this.interval = MonitoringSettings.INTERVAL.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(MonitoringSettings.INTERVAL, this::setInterval);
|
||||
}
|
||||
|
||||
void setInterval(TimeValue interval) {
|
||||
this.interval = interval;
|
||||
scheduleExecution();
|
||||
}
|
||||
|
||||
public TimeValue getInterval() {
|
||||
return interval;
|
||||
}
|
||||
|
||||
boolean isMonitoringActive() {
|
||||
return isStarted()
|
||||
&& interval != null
|
||||
&& interval.millis() >= MonitoringSettings.MIN_INTERVAL.millis();
|
||||
}
|
||||
|
||||
private String threadPoolName() {
|
||||
return ThreadPool.Names.GENERIC;
|
||||
}
|
||||
|
||||
boolean isStarted() {
|
||||
return started.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
if (started.compareAndSet(false, true)) {
|
||||
try {
|
||||
logger.debug("monitoring service is starting");
|
||||
scheduleExecution();
|
||||
logger.debug("monitoring service started");
|
||||
} catch (Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to start monitoring service"), e);
|
||||
started.set(false);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
if (started.getAndSet(false)) {
|
||||
logger.debug("monitoring service is stopping");
|
||||
cancelExecution();
|
||||
logger.debug("monitoring service stopped");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
logger.debug("monitoring service is closing");
|
||||
closeExecution();
|
||||
|
||||
for (Exporter exporter : exporters) {
|
||||
try {
|
||||
exporter.close();
|
||||
} catch (Exception e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to close exporter [{}]", exporter.name()), e);
|
||||
}
|
||||
}
|
||||
logger.debug("monitoring service closed");
|
||||
}
|
||||
|
||||
void scheduleExecution() {
|
||||
if (scheduler != null) {
|
||||
cancelExecution();
|
||||
}
|
||||
if (isMonitoringActive()) {
|
||||
scheduler = threadPool.scheduleWithFixedDelay(monitor, interval, threadPoolName());
|
||||
}
|
||||
}
|
||||
|
||||
void cancelExecution() {
|
||||
if (scheduler != null) {
|
||||
try {
|
||||
scheduler.cancel();
|
||||
} finally {
|
||||
scheduler = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void closeExecution() {
|
||||
try {
|
||||
monitor.close();
|
||||
} catch (IOException e) {
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to close monitoring execution"), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link MonitoringExecution} is a scheduled {@link Runnable} that periodically checks if monitoring
|
||||
* data can be collected and exported. It runs at a given interval corresponding to the monitoring
|
||||
* sampling interval. It first checks if monitoring is still enabled (because it might have changed
|
||||
* since the last time the task was scheduled: interval set to -1 or the monitoring service is stopped).
|
||||
* Since collecting and exporting data can take time, it uses a semaphore to track the current execution.
|
||||
*/
|
||||
class MonitoringExecution extends AbstractRunnable implements Closeable {
|
||||
|
||||
/**
|
||||
* Binary semaphore used to wait for monitoring execution to terminate before closing or stopping
|
||||
* the monitoring service. A semaphore is preferred over a ReentrantLock because the lock is
|
||||
* obtained by a thread and released by another thread.
|
||||
**/
|
||||
private final Semaphore semaphore = new Semaphore(1);
|
||||
|
||||
@Override
|
||||
public void doRun() {
|
||||
if (isMonitoringActive() == false) {
|
||||
logger.debug("monitoring execution is skipped");
|
||||
return;
|
||||
}
|
||||
|
||||
if (semaphore.tryAcquire() == false) {
|
||||
logger.debug("monitoring execution is skipped until previous execution terminated");
|
||||
return;
|
||||
}
|
||||
|
||||
threadPool.executor(threadPoolName()).submit(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
Collection<MonitoringDoc> results = new ArrayList<>();
|
||||
for (Collector collector : collectors) {
|
||||
if (isStarted() == false) {
|
||||
// Do not collect more data if the the monitoring service is stopping
|
||||
// otherwise some collectors might just fail.
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
Collection<MonitoringDoc> result = collector.collect();
|
||||
if (result != null) {
|
||||
results.addAll(result);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn((Supplier<?>) () ->
|
||||
new ParameterizedMessage("monitoring collector [{}] failed to collect data", collector.name()), e);
|
||||
}
|
||||
}
|
||||
if (isMonitoringActive()) {
|
||||
exporters.export(results);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn("monitoring execution failed", e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(Exception e) {
|
||||
logger.warn("monitoring execution has been rejected", e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
semaphore.release();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn("monitoring execution failed", e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
// Block until the lock can be acquired
|
||||
semaphore.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -5,7 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.monitoring;
|
||||
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -34,11 +33,23 @@ public class MonitoringSettings extends AbstractComponent {
|
|||
*/
|
||||
public static final TimeValue HISTORY_DURATION_MINIMUM = TimeValue.timeValueHours(24);
|
||||
|
||||
/**
|
||||
* Minimum value for sampling interval (1 second)
|
||||
*/
|
||||
static final TimeValue MIN_INTERVAL = TimeValue.timeValueSeconds(1L);
|
||||
|
||||
/**
|
||||
* Sampling interval between two collections (default to 10s)
|
||||
*/
|
||||
public static final Setting<TimeValue> INTERVAL =
|
||||
timeSetting(collectionKey("interval"), TimeValue.timeValueSeconds(10), Property.Dynamic, Property.NodeScope);
|
||||
public static final Setting<TimeValue> INTERVAL = new Setting<>(collectionKey("interval"), "10s",
|
||||
(s) -> {
|
||||
TimeValue value = TimeValue.parseTimeValue(s, null, collectionKey("interval"));
|
||||
if (TimeValue.MINUS_ONE.equals(value) || value.millis() >= MIN_INTERVAL.millis()) {
|
||||
return value;
|
||||
}
|
||||
throw new IllegalArgumentException("Failed to parse monitoring interval [" + s + "], value must be >= " + MIN_INTERVAL);
|
||||
},
|
||||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* Timeout value when collecting index statistics (default to 10m)
|
||||
|
|
|
@ -44,10 +44,6 @@ public abstract class Exporter implements AutoCloseable {
|
|||
return config;
|
||||
}
|
||||
|
||||
public boolean masterOnly() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Returns true if only one instance of this exporter should be allowed. */
|
||||
public boolean isSingleton() {
|
||||
return false;
|
||||
|
|
|
@ -31,19 +31,15 @@ import static java.util.Collections.emptyMap;
|
|||
public class Exporters extends AbstractLifecycleComponent implements Iterable<Exporter> {
|
||||
|
||||
private final Map<String, Exporter.Factory> factories;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final AtomicReference<Map<String, Exporter>> exporters;
|
||||
|
||||
public Exporters(Settings settings, Map<String, Exporter.Factory> factories,
|
||||
ClusterService clusterService) {
|
||||
|
||||
public Exporters(Settings settings, Map<String, Exporter.Factory> factories, ClusterService clusterService) {
|
||||
super(settings);
|
||||
|
||||
this.factories = factories;
|
||||
this.clusterService = clusterService;
|
||||
this.exporters = new AtomicReference<>(emptyMap());
|
||||
clusterService.getClusterSettings().addSettingsUpdateConsumer(MonitoringSettings.EXPORTERS_SETTINGS,
|
||||
this::setExportersSetting);
|
||||
|
||||
clusterService.getClusterSettings().addSettingsUpdateConsumer(MonitoringSettings.EXPORTERS_SETTINGS, this::setExportersSetting);
|
||||
}
|
||||
|
||||
private void setExportersSetting(Settings exportersSetting) {
|
||||
|
@ -92,15 +88,10 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
|
|||
ExportBulk openBulk() {
|
||||
List<ExportBulk> bulks = new ArrayList<>();
|
||||
for (Exporter exporter : this) {
|
||||
if (exporter.masterOnly() && clusterService.state().nodes().isLocalNodeElectedMaster() == false) {
|
||||
// the exporter is supposed to only run on the master node, but we're not
|
||||
// the master node... so skipping
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
ExportBulk bulk = exporter.openBulk();
|
||||
if (bulk == null) {
|
||||
logger.info("skipping exporter [{}] as it isn't ready yet", exporter.name());
|
||||
logger.info("skipping exporter [{}] as it is not ready yet", exporter.name());
|
||||
} else {
|
||||
bulks.add(bulk);
|
||||
}
|
||||
|
@ -168,11 +159,9 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
|
|||
throw new ExportException("Export service is not started");
|
||||
}
|
||||
if (docs != null && docs.size() > 0) {
|
||||
ExportBulk bulk = openBulk();
|
||||
if (bulk == null) {
|
||||
throw new ExportException("exporters are either not ready or faulty");
|
||||
}
|
||||
final ExportBulk bulk = openBulk();
|
||||
|
||||
if (bulk != null) {
|
||||
try {
|
||||
bulk.add(docs);
|
||||
} finally {
|
||||
|
@ -181,3 +170,4 @@ public class Exporters extends AbstractLifecycleComponent implements Iterable<Ex
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,14 @@ public final class MonitoringTemplateUtils {
|
|||
|
||||
/** Current version of es and data templates **/
|
||||
public static final String TEMPLATE_VERSION = "2";
|
||||
/**
|
||||
* The name of the non-timestamped data index.
|
||||
*/
|
||||
public static final String DATA_INDEX = ".monitoring-data-" + TEMPLATE_VERSION;
|
||||
/**
|
||||
* Data types that should be supported by the {@linkplain #DATA_INDEX data index} that were not by the initial release.
|
||||
*/
|
||||
public static final String[] NEW_DATA_TYPES = { "kibana", "logstash" };
|
||||
|
||||
private MonitoringTemplateUtils() {
|
||||
}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.monitoring.exporter.http;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils.DATA_INDEX;
|
||||
|
||||
/**
|
||||
* {@linkplain DataTypeMappingHttpResource}s allow the checking and adding of index mapping's for new types that did not exist in previous
|
||||
* versions.
|
||||
* <p>
|
||||
* This allows the use of Monitoring's REST endpoint to publish Kibana data to the data index even if the "kibana" type did not
|
||||
* exist in their existing index mapping (e.g., they started with an early alpha release). Additionally, this also enables future types to
|
||||
* be added without issue.
|
||||
* <p>
|
||||
* The root need for this is because the index mapping started with an index setting: "index.mapper.dynamic" set to false. This prevents
|
||||
* new types from being dynamically added, which is obviously needed as new components (e.g., Kibana and Logstash) are monitored.
|
||||
* Unfortunately, this setting cannot be flipped without also closing and reopening the index, so the fix is to manually add any new types.
|
||||
*/
|
||||
public class DataTypeMappingHttpResource extends PublishableHttpResource {
|
||||
|
||||
private static final Logger logger = Loggers.getLogger(DataTypeMappingHttpResource.class);
|
||||
|
||||
/**
|
||||
* The name of the type that is created in the mappings on the remote cluster.
|
||||
*/
|
||||
private final String typeName;
|
||||
|
||||
/**
|
||||
* Create a new {@link DataTypeMappingHttpResource}.
|
||||
*
|
||||
* @param resourceOwnerName The user-recognizable name
|
||||
* @param masterTimeout Master timeout to use with any request.
|
||||
* @param typeName The name of the mapping type (e.g., "kibana").
|
||||
*/
|
||||
public DataTypeMappingHttpResource(final String resourceOwnerName, @Nullable final TimeValue masterTimeout,
|
||||
final String typeName) {
|
||||
// we need to inspect the mappings, so we don't use filter_path to get rid of them
|
||||
super(resourceOwnerName, masterTimeout, Collections.emptyMap());
|
||||
|
||||
this.typeName = Objects.requireNonNull(typeName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the current {@linkplain #typeName type} exists.
|
||||
*/
|
||||
@Override
|
||||
protected CheckResponse doCheck(final RestClient client) {
|
||||
final Tuple<CheckResponse, Response> resource =
|
||||
checkForResource(client, logger,
|
||||
"/" + DATA_INDEX + "/_mapping", typeName, "monitoring mapping type",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
|
||||
// depending on the content, we need to flip the actual response
|
||||
CheckResponse checkResponse = resource.v1();
|
||||
|
||||
if (checkResponse == CheckResponse.EXISTS && resource.v2().getEntity().getContentLength() <= 2) {
|
||||
// it "exists" if the index exists at all; it doesn't guarantee that the mapping exists
|
||||
// the content will be "{}" if no mapping exists
|
||||
checkResponse = CheckResponse.DOES_NOT_EXIST;
|
||||
} else if (checkResponse == CheckResponse.DOES_NOT_EXIST) {
|
||||
// DNE indicates that the entire index is missing, which means the template will create it; we only add types!
|
||||
checkResponse = CheckResponse.EXISTS;
|
||||
}
|
||||
|
||||
return checkResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the current {@linkplain #typeName type} to the index's mappings.
|
||||
*/
|
||||
@Override
|
||||
protected boolean doPublish(final RestClient client) {
|
||||
// this could be a class-level constant, but it does not need to live the entire duration of ES; only the few times it is used
|
||||
final HttpEntity disabledEntity = new StringEntity("{\"enabled\":false}", ContentType.APPLICATION_JSON);
|
||||
|
||||
return putResource(client, logger,
|
||||
"/" + DATA_INDEX + "/_mapping", typeName, () -> disabledEntity, "monitoring mapping type",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
||||
}
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
|
@ -520,6 +521,13 @@ public class HttpExporter extends Exporter {
|
|||
final TimeValue templateTimeout = config.settings().getAsTime(TEMPLATE_CHECK_TIMEOUT_SETTING, null);
|
||||
final Set<String> templateNames = new HashSet<>();
|
||||
|
||||
// add a resource to check the index mappings of the .monitoring-data-# index
|
||||
// We ensure (and add if it's not) that the kibana type is there for the index for those few customers that upgraded from alphas;
|
||||
// this step makes it very easy to add logstash in 5.2+ (and eventually beats)
|
||||
for (final String type : MonitoringTemplateUtils.NEW_DATA_TYPES) {
|
||||
resources.add(new DataTypeMappingHttpResource(resourceOwnerName, templateTimeout, type));
|
||||
}
|
||||
|
||||
for (final MonitoringIndexNameResolver resolver : resolvers) {
|
||||
final String templateName = resolver.templateName();
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class PipelineHttpResource extends PublishableHttpResource {
|
|||
*/
|
||||
@Override
|
||||
protected CheckResponse doCheck(final RestClient client) {
|
||||
return checkForResource(client, logger,
|
||||
return simpleCheckForResource(client, logger,
|
||||
"/_ingest/pipeline", pipelineName, "monitoring pipeline",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import org.elasticsearch.client.Response;
|
|||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
|
@ -144,7 +145,8 @@ public abstract class PublishableHttpResource extends HttpResource {
|
|||
/**
|
||||
* Determine if the current {@code resourceName} exists at the {@code resourceBasePath} endpoint.
|
||||
* <p>
|
||||
* This provides the base-level check for any resource that does not need to inspect its actual contents.
|
||||
* This provides the base-level check for any resource that does not need to care about its response beyond existence (and likely does
|
||||
* not need to inspect its contents).
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @param logger The logger to use for status messages.
|
||||
|
@ -155,7 +157,30 @@ public abstract class PublishableHttpResource extends HttpResource {
|
|||
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
|
||||
* @return Never {@code null}.
|
||||
*/
|
||||
protected CheckResponse checkForResource(final RestClient client, final Logger logger,
|
||||
protected CheckResponse simpleCheckForResource(final RestClient client, final Logger logger,
|
||||
final String resourceBasePath,
|
||||
final String resourceName, final String resourceType,
|
||||
final String resourceOwnerName, final String resourceOwnerType) {
|
||||
return checkForResource(client, logger, resourceBasePath, resourceName, resourceType, resourceOwnerName, resourceOwnerType).v1();
|
||||
}
|
||||
|
||||
/**
|
||||
* Determine if the current {@code resourceName} exists at the {@code resourceBasePath} endpoint.
|
||||
* <p>
|
||||
* This provides the base-level check for any resource that cares about existence and also its contents.
|
||||
*
|
||||
* @param client The REST client to make the request(s).
|
||||
* @param logger The logger to use for status messages.
|
||||
* @param resourceBasePath The base path/endpoint to check for the resource (e.g., "/_template").
|
||||
* @param resourceName The name of the resource (e.g., "template123").
|
||||
* @param resourceType The type of resource (e.g., "monitoring template").
|
||||
* @param resourceOwnerName The user-recognizeable resource owner.
|
||||
* @param resourceOwnerType The type of resource owner being dealt with (e.g., "monitoring cluster").
|
||||
* @return Never {@code null} pair containing the checked response and the returned response.
|
||||
* The response will only ever be {@code null} if none was returned.
|
||||
* @see #simpleCheckForResource(RestClient, Logger, String, String, String, String, String)
|
||||
*/
|
||||
protected Tuple<CheckResponse, Response> checkForResource(final RestClient client, final Logger logger,
|
||||
final String resourceBasePath,
|
||||
final String resourceName, final String resourceType,
|
||||
final String resourceOwnerName, final String resourceOwnerType) {
|
||||
|
@ -169,18 +194,19 @@ public abstract class PublishableHttpResource extends HttpResource {
|
|||
if (response.getStatusLine().getStatusCode() == RestStatus.OK.getStatus()) {
|
||||
logger.debug("{} [{}] found on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
return CheckResponse.EXISTS;
|
||||
return new Tuple<>(CheckResponse.EXISTS, response);
|
||||
} else {
|
||||
throw new ResponseException(response);
|
||||
}
|
||||
} catch (final ResponseException e) {
|
||||
final int statusCode = e.getResponse().getStatusLine().getStatusCode();
|
||||
final Response response = e.getResponse();
|
||||
final int statusCode = response.getStatusLine().getStatusCode();
|
||||
|
||||
// 404
|
||||
if (statusCode == RestStatus.NOT_FOUND.getStatus()) {
|
||||
logger.debug("{} [{}] does not exist on the [{}] {}", resourceType, resourceName, resourceOwnerName, resourceOwnerType);
|
||||
|
||||
return CheckResponse.DOES_NOT_EXIST;
|
||||
return new Tuple<>(CheckResponse.DOES_NOT_EXIST, response);
|
||||
} else {
|
||||
logger.error((Supplier<?>) () ->
|
||||
new ParameterizedMessage("failed to verify {} [{}] on the [{}] {} with status code [{}]",
|
||||
|
@ -188,7 +214,7 @@ public abstract class PublishableHttpResource extends HttpResource {
|
|||
e);
|
||||
|
||||
// weirder failure than below; block responses just like other unexpected failures
|
||||
return CheckResponse.ERROR;
|
||||
return new Tuple<>(CheckResponse.ERROR, response);
|
||||
}
|
||||
} catch (IOException | RuntimeException e) {
|
||||
logger.error((Supplier<?>) () ->
|
||||
|
@ -197,7 +223,7 @@ public abstract class PublishableHttpResource extends HttpResource {
|
|||
e);
|
||||
|
||||
// do not attempt to publish the resource because we're in a broken state
|
||||
return CheckResponse.ERROR;
|
||||
return new Tuple<>(CheckResponse.ERROR, null);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ public class TemplateHttpResource extends PublishableHttpResource {
|
|||
*/
|
||||
@Override
|
||||
protected CheckResponse doCheck(final RestClient client) {
|
||||
return checkForResource(client, logger,
|
||||
return simpleCheckForResource(client, logger,
|
||||
"/_template", templateName, "monitoring template",
|
||||
resourceOwnerName, "monitoring cluster");
|
||||
}
|
||||
|
|
|
@ -17,6 +17,8 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasA
|
|||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest;
|
||||
import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateResponse;
|
||||
import org.elasticsearch.action.ingest.PutPipelineRequest;
|
||||
|
@ -41,6 +43,7 @@ import org.elasticsearch.xpack.monitoring.cleaner.CleanerService;
|
|||
import org.elasticsearch.xpack.monitoring.exporter.ExportBulk;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.Exporter;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringDoc;
|
||||
import org.elasticsearch.xpack.monitoring.exporter.MonitoringTemplateUtils;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.MonitoringIndexNameResolver;
|
||||
import org.elasticsearch.xpack.monitoring.resolver.ResolversRegistry;
|
||||
import org.elasticsearch.xpack.security.InternalClient;
|
||||
|
@ -137,57 +140,113 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
|
|||
}
|
||||
|
||||
// List of distinct templates
|
||||
Map<String, String> templates = StreamSupport.stream(new ResolversRegistry(Settings.EMPTY).spliterator(), false)
|
||||
final Map<String, String> templates = StreamSupport.stream(new ResolversRegistry(Settings.EMPTY).spliterator(), false)
|
||||
.collect(Collectors.toMap(MonitoringIndexNameResolver::templateName, MonitoringIndexNameResolver::template, (a, b) -> a));
|
||||
|
||||
// if this is not the master, we just need to make sure the master has set things up
|
||||
if (clusterService.state().nodes().isLocalNodeElectedMaster()) {
|
||||
if (setupIfElectedMaster(clusterState, templates) == false) {
|
||||
return null;
|
||||
}
|
||||
} else if (setupIfNotElectedMaster(clusterState, templates.keySet()) == false) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// if this is not the master, we'll just look to see if the monitoring templates are installed.
|
||||
// If they all are, we'll be able to start this exporter. Otherwise, we'll just wait for a new cluster state.
|
||||
if (clusterService.state().nodes().isLocalNodeElectedMaster() == false) {
|
||||
for (String template : templates.keySet()) {
|
||||
if (state.compareAndSet(State.INITIALIZED, State.RUNNING)) {
|
||||
logger.debug("started");
|
||||
}
|
||||
|
||||
return new LocalBulk(name(), logger, client, resolvers, config.settings().getAsBoolean(USE_INGEST_PIPELINE_SETTING, true));
|
||||
}
|
||||
|
||||
/**
|
||||
* When not on the elected master, we require all resources (mapping types, templates, and pipelines) to be available before we
|
||||
* attempt to run the exporter. If those resources do not exist, then it means the elected master's exporter has not yet run, so the
|
||||
* monitoring cluster (this one, as the local exporter) is not setup yet.
|
||||
*
|
||||
* @param clusterState The current cluster state.
|
||||
* @param templates All template names that should exist.
|
||||
* @return {@code true} indicates that all resources are available and the exporter can be used. {@code false} to stop and wait.
|
||||
*/
|
||||
private boolean setupIfNotElectedMaster(final ClusterState clusterState, final Set<String> templates) {
|
||||
for (final String type : MonitoringTemplateUtils.NEW_DATA_TYPES) {
|
||||
if (hasMappingType(type, clusterState) == false) {
|
||||
// the required type is not yet there in the given cluster state, we'll wait.
|
||||
logger.debug("monitoring index mapping [{}] does not exist in [{}], so service cannot start",
|
||||
type, MonitoringTemplateUtils.DATA_INDEX);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (final String template : templates) {
|
||||
if (hasTemplate(template, clusterState) == false) {
|
||||
// the required template is not yet installed in the given cluster state, we'll wait.
|
||||
logger.debug("monitoring index template [{}] does not exist, so service cannot start", template);
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// if we don't have the ingest pipeline, then it's going to fail anyway
|
||||
if (hasIngestPipelines(clusterState) == false) {
|
||||
logger.debug("monitoring ingest pipeline [{}] does not exist, so service cannot start", EXPORT_PIPELINE_NAME);
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (null != prepareAddAliasesTo2xIndices(clusterState)) {
|
||||
logger.debug("old monitoring indexes exist without aliases, waiting for them to get new aliases");
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.trace("monitoring index templates and pipelines are installed, service can start");
|
||||
|
||||
} else {
|
||||
// everything is setup
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* When on the elected master, we setup all resources (mapping types, templates, and pipelines) before we attempt to run the exporter.
|
||||
* If those resources do not exist, then we will create them.
|
||||
*
|
||||
* @param clusterState The current cluster state.
|
||||
* @param templates All template names that should exist.
|
||||
* @return {@code true} indicates that all resources are "ready" and the exporter can be used. {@code false} to stop and wait.
|
||||
*/
|
||||
private boolean setupIfElectedMaster(final ClusterState clusterState, final Map<String, String> templates) {
|
||||
// we are on the elected master
|
||||
// Check that there is nothing that could block metadata updates
|
||||
if (clusterState.blocks().hasGlobalBlock(ClusterBlockLevel.METADATA_WRITE)) {
|
||||
logger.debug("waiting until metadata writes are unblocked");
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (installingSomething.get() == true) {
|
||||
logger.trace("already installing something, waiting for install to complete");
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
|
||||
// build a list of runnables for everything that is missing, but do not start execution
|
||||
final List<Runnable> asyncActions = new ArrayList<>();
|
||||
final AtomicInteger pendingResponses = new AtomicInteger(0);
|
||||
|
||||
// Check that all necessary types exist for _xpack/monitoring/_bulk usage
|
||||
final List<String> missingMappingTypes = Arrays.stream(MonitoringTemplateUtils.NEW_DATA_TYPES)
|
||||
.filter((type) -> hasMappingType(type, clusterState) == false)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Check that each required template exist, installing it if needed
|
||||
final List<Entry<String, String>> missingTemplates = templates.entrySet()
|
||||
.stream()
|
||||
.filter((e) -> hasTemplate(e.getKey(), clusterState) == false)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (missingMappingTypes.isEmpty() == false) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("type {} not found",
|
||||
missingMappingTypes.stream().collect(Collectors.toList())));
|
||||
for (final String type : missingMappingTypes) {
|
||||
asyncActions.add(() -> putMappingType(type, new ResponseActionListener<>("type", type, pendingResponses)));
|
||||
}
|
||||
}
|
||||
|
||||
if (missingTemplates.isEmpty() == false) {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("template {} not found",
|
||||
missingTemplates.stream().map(Map.Entry::getKey).collect(Collectors.toList())));
|
||||
|
@ -252,18 +311,46 @@ public class LocalExporter extends Exporter implements ClusterStateListener, Cle
|
|||
asyncActions.forEach(Runnable::run);
|
||||
} else {
|
||||
// let the cluster catch up since requested installations may be ongoing
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
logger.debug("monitoring index templates and pipelines are installed on master node, service can start");
|
||||
}
|
||||
|
||||
// everything is setup (or running)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (state.compareAndSet(State.INITIALIZED, State.RUNNING)) {
|
||||
logger.debug("started");
|
||||
/**
|
||||
* Determine if the mapping {@code type} exists in the {@linkplain MonitoringTemplateUtils#DATA_INDEX data index}.
|
||||
*
|
||||
* @param type The data type to check (e.g., "kibana")
|
||||
* @param clusterState The current cluster state
|
||||
* @return {@code false} if the type mapping needs to be added.
|
||||
*/
|
||||
private boolean hasMappingType(final String type, final ClusterState clusterState) {
|
||||
final IndexMetaData dataIndex = clusterState.getMetaData().getIndices().get(MonitoringTemplateUtils.DATA_INDEX);
|
||||
|
||||
// if the index does not exist, then the template will add it and the type; if the index does exist, then we need the type
|
||||
return dataIndex == null || dataIndex.getMappings().containsKey(type);
|
||||
}
|
||||
|
||||
return new LocalBulk(name(), logger, client, resolvers, config.settings().getAsBoolean(USE_INGEST_PIPELINE_SETTING, true));
|
||||
/**
|
||||
* Add the mapping {@code type} to the {@linkplain MonitoringTemplateUtils#DATA_INDEX data index}.
|
||||
*
|
||||
* @param type The data type to check (e.g., "kibana")
|
||||
* @param listener The listener to use for handling the response
|
||||
*/
|
||||
private void putMappingType(final String type, final ActionListener<PutMappingResponse> listener) {
|
||||
logger.debug("adding mapping type [{}] to [{}]", type, MonitoringTemplateUtils.DATA_INDEX);
|
||||
|
||||
final PutMappingRequest putMapping = new PutMappingRequest(MonitoringTemplateUtils.DATA_INDEX);
|
||||
|
||||
putMapping.type(type);
|
||||
// avoid mapping at all; we use this index as a data cache rather than for search
|
||||
putMapping.source("{\"enabled\":false}");
|
||||
|
||||
client.admin().indices().putMapping(putMapping, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -50,6 +50,7 @@ public class ResolversRegistry implements Iterable<MonitoringIndexNameResolver>
|
|||
|
||||
// register resolvers for monitored systems
|
||||
registerMonitoredSystem(MonitoredSystem.KIBANA, settings);
|
||||
registerMonitoredSystem(MonitoredSystem.LOGSTASH, settings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,6 +66,8 @@ public class NodeStatsResolver extends MonitoringIndexNameResolver.Timestamped<N
|
|||
"node_stats.fs.total.free_in_bytes",
|
||||
"node_stats.fs.total.available_in_bytes",
|
||||
"node_stats.os.cpu.load_average.1m",
|
||||
"node_stats.os.cpu.load_average.5m",
|
||||
"node_stats.os.cpu.load_average.15m",
|
||||
"node_stats.process.cpu.percent",
|
||||
"node_stats.process.max_file_descriptors",
|
||||
"node_stats.process.open_file_descriptors",
|
||||
|
@ -99,6 +101,15 @@ public class NodeStatsResolver extends MonitoringIndexNameResolver.Timestamped<N
|
|||
"node_stats.thread_pool.watcher.threads",
|
||||
"node_stats.thread_pool.watcher.queue",
|
||||
"node_stats.thread_pool.watcher.rejected",
|
||||
// Cgroup data (generally Linux only and only sometimes on those systems)
|
||||
"node_stats.os.cgroup.cpuacct.control_group",
|
||||
"node_stats.os.cgroup.cpuacct.usage_nanos",
|
||||
"node_stats.os.cgroup.cpu.control_group",
|
||||
"node_stats.os.cgroup.cpu.cfs_period_micros",
|
||||
"node_stats.os.cgroup.cpu.cfs_quota_micros",
|
||||
"node_stats.os.cgroup.cpu.stat.number_of_elapsed_periods",
|
||||
"node_stats.os.cgroup.cpu.stat.number_of_times_throttled",
|
||||
"node_stats.os.cgroup.cpu.stat.time_throttled_nanos",
|
||||
// Linux Only (at least for now)
|
||||
// Disk Info
|
||||
"node_stats.fs.data.spins",
|
||||
|
|
|
@ -57,7 +57,7 @@ public abstract class Attachment extends BodyPartSource {
|
|||
* intentionally not emitting path as it may come as an information leak
|
||||
*/
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return builder.startObject()
|
||||
.field("type", type())
|
||||
.field("id", id)
|
||||
|
|
|
@ -7,8 +7,8 @@ package org.elasticsearch.xpack.notification.email;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -20,7 +20,7 @@ import java.util.Map;
|
|||
|
||||
import static org.elasticsearch.xpack.watcher.support.Exceptions.illegalArgument;
|
||||
|
||||
public enum DataAttachment implements ToXContent {
|
||||
public enum DataAttachment implements ToXContentObject {
|
||||
|
||||
YAML() {
|
||||
@Override
|
||||
|
@ -93,7 +93,7 @@ public enum DataAttachment implements ToXContent {
|
|||
} else if (currentFieldName == null) {
|
||||
throw new ElasticsearchParseException("could not parse data attachment. expected [{}] field but found [{}] instead",
|
||||
Field.FORMAT.getPreferredName(), token);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.FORMAT)) {
|
||||
} else if (Field.FORMAT.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
dataAttachment = resolve(parser.text());
|
||||
} else {
|
||||
|
|
|
@ -7,9 +7,9 @@ package org.elasticsearch.xpack.notification.email;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.joda.time.DateTime;
|
||||
|
@ -32,7 +32,7 @@ import java.util.Map;
|
|||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
public class Email implements ToXContent {
|
||||
public class Email implements ToXContentObject {
|
||||
|
||||
final String id;
|
||||
final Address from;
|
||||
|
@ -180,25 +180,25 @@ public class Email implements ToXContent {
|
|||
currentFieldName = parser.currentName();
|
||||
} else if ((token.isValue() || token == XContentParser.Token.START_OBJECT || token == XContentParser.Token.START_ARRAY) &&
|
||||
currentFieldName != null) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.ID)) {
|
||||
if (Field.ID.match(currentFieldName)) {
|
||||
email.id(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.FROM)) {
|
||||
} else if (Field.FROM.match(currentFieldName)) {
|
||||
email.from(Address.parse(currentFieldName, token, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.REPLY_TO)) {
|
||||
} else if (Field.REPLY_TO.match(currentFieldName)) {
|
||||
email.replyTo(AddressList.parse(currentFieldName, token, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.TO)) {
|
||||
} else if (Field.TO.match(currentFieldName)) {
|
||||
email.to(AddressList.parse(currentFieldName, token, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.CC)) {
|
||||
} else if (Field.CC.match(currentFieldName)) {
|
||||
email.cc(AddressList.parse(currentFieldName, token, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.BCC)) {
|
||||
} else if (Field.BCC.match(currentFieldName)) {
|
||||
email.bcc(AddressList.parse(currentFieldName, token, parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.PRIORITY)) {
|
||||
} else if (Field.PRIORITY.match(currentFieldName)) {
|
||||
email.priority(Email.Priority.resolve(parser.text()));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.SENT_DATE)) {
|
||||
} else if (Field.SENT_DATE.match(currentFieldName)) {
|
||||
email.sentDate(new DateTime(parser.text(), DateTimeZone.UTC));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.SUBJECT)) {
|
||||
} else if (Field.SUBJECT.match(currentFieldName)) {
|
||||
email.subject(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.BODY)) {
|
||||
} else if (Field.BODY.match(currentFieldName)) {
|
||||
String bodyField = currentFieldName;
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
email.textBody(parser.text());
|
||||
|
@ -208,9 +208,9 @@ public class Email implements ToXContent {
|
|||
currentFieldName = parser.currentName();
|
||||
} else if (currentFieldName == null) {
|
||||
throw new ElasticsearchParseException("could not parse email. empty [{}] field", bodyField);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Email.Field.BODY_TEXT)) {
|
||||
} else if (Email.Field.BODY_TEXT.match(currentFieldName)) {
|
||||
email.textBody(parser.text());
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Email.Field.BODY_HTML)) {
|
||||
} else if (Email.Field.BODY_HTML.match(currentFieldName)) {
|
||||
email.htmlBody(parser.text());
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse email. unexpected field [{}.{}] field", bodyField,
|
||||
|
@ -456,9 +456,9 @@ public class Email implements ToXContent {
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, ADDRESS_EMAIL_FIELD)) {
|
||||
if (ADDRESS_EMAIL_FIELD.match(currentFieldName)) {
|
||||
email = parser.text();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, ADDRESS_NAME_FIELD)) {
|
||||
} else if (ADDRESS_NAME_FIELD.match(currentFieldName)) {
|
||||
name = parser.text();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse [" + field + "] object as address. unknown address " +
|
||||
|
|
|
@ -6,8 +6,7 @@
|
|||
package org.elasticsearch.xpack.notification.email;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplate;
|
||||
|
@ -21,7 +20,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class EmailTemplate implements ToXContent {
|
||||
public class EmailTemplate implements ToXContentObject {
|
||||
|
||||
final TextTemplate from;
|
||||
final TextTemplate[] replyTo;
|
||||
|
@ -341,9 +340,9 @@ public class EmailTemplate implements ToXContent {
|
|||
private final EmailTemplate.Builder builder = builder();
|
||||
|
||||
public boolean handle(String fieldName, XContentParser parser) throws IOException {
|
||||
if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.FROM)) {
|
||||
if (Email.Field.FROM.match(fieldName)) {
|
||||
builder.from(TextTemplate.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.REPLY_TO)) {
|
||||
} else if (Email.Field.REPLY_TO.match(fieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.START_ARRAY) {
|
||||
List<TextTemplate> templates = new ArrayList<>();
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -353,7 +352,7 @@ public class EmailTemplate implements ToXContent {
|
|||
} else {
|
||||
builder.replyTo(TextTemplate.parse(parser));
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.TO)) {
|
||||
} else if (Email.Field.TO.match(fieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.START_ARRAY) {
|
||||
List<TextTemplate> templates = new ArrayList<>();
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -363,7 +362,7 @@ public class EmailTemplate implements ToXContent {
|
|||
} else {
|
||||
builder.to(TextTemplate.parse(parser));
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.CC)) {
|
||||
} else if (Email.Field.CC.match(fieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.START_ARRAY) {
|
||||
List<TextTemplate> templates = new ArrayList<>();
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -373,7 +372,7 @@ public class EmailTemplate implements ToXContent {
|
|||
} else {
|
||||
builder.cc(TextTemplate.parse(parser));
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.BCC)) {
|
||||
} else if (Email.Field.BCC.match(fieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.START_ARRAY) {
|
||||
List<TextTemplate> templates = new ArrayList<>();
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -383,11 +382,11 @@ public class EmailTemplate implements ToXContent {
|
|||
} else {
|
||||
builder.bcc(TextTemplate.parse(parser));
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.PRIORITY)) {
|
||||
} else if (Email.Field.PRIORITY.match(fieldName)) {
|
||||
builder.priority(TextTemplate.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.SUBJECT)) {
|
||||
} else if (Email.Field.SUBJECT.match(fieldName)) {
|
||||
builder.subject(TextTemplate.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(fieldName, Email.Field.BODY)) {
|
||||
} else if (Email.Field.BODY.match(fieldName)) {
|
||||
if (parser.currentToken() == XContentParser.Token.VALUE_STRING) {
|
||||
builder.textBody(TextTemplate.parse(parser));
|
||||
} else if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
|
||||
|
@ -398,9 +397,9 @@ public class EmailTemplate implements ToXContent {
|
|||
currentFieldName = parser.currentName();
|
||||
} else if (currentFieldName == null) {
|
||||
throw new ElasticsearchParseException("could not parse email template. empty [{}] field", fieldName);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Email.Field.BODY_TEXT)) {
|
||||
} else if (Email.Field.BODY_TEXT.match(currentFieldName)) {
|
||||
builder.textBody(TextTemplate.parse(parser));
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Email.Field.BODY_HTML)) {
|
||||
} else if (Email.Field.BODY_HTML.match(currentFieldName)) {
|
||||
builder.htmlBody(TextTemplate.parse(parser));
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse email template. unknown field [{}.{}] field",
|
||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.notification.email.attachment;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.notification.email.Attachment;
|
||||
|
@ -43,7 +42,7 @@ public class DataAttachmentParser implements EmailAttachmentParser<DataAttachmen
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Strings.hasLength(currentFieldName) && ParseFieldMatcher.STRICT.match(currentFieldName, Fields.FORMAT)) {
|
||||
} else if (Strings.hasLength(currentFieldName) && Fields.FORMAT.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
dataAttachment = resolve(parser.text());
|
||||
} else {
|
||||
|
|
|
@ -8,7 +8,6 @@ package org.elasticsearch.xpack.notification.email.attachment;
|
|||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -61,11 +60,11 @@ public class HttpEmailAttachementParser implements EmailAttachmentParser<HttpReq
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CONTENT_TYPE)) {
|
||||
} else if (Fields.CONTENT_TYPE.match(currentFieldName)) {
|
||||
contentType = parser.text();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INLINE)) {
|
||||
} else if (Fields.INLINE.match(currentFieldName)) {
|
||||
inline = parser.booleanValue();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.REQUEST)) {
|
||||
} else if (Fields.REQUEST.match(currentFieldName)) {
|
||||
requestTemplate = requestTemplateParser.parse(parser);
|
||||
} else {
|
||||
String msg = "Unknown field name [" + currentFieldName + "] in http request attachment configuration";
|
||||
|
|
|
@ -6,16 +6,15 @@
|
|||
package org.elasticsearch.xpack.notification.email.support;
|
||||
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
|
||||
import javax.activation.CommandMap;
|
||||
import javax.activation.FileTypeMap;
|
||||
import javax.mail.MessagingException;
|
||||
import javax.mail.internet.MimeBodyPart;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
||||
public abstract class BodyPartSource implements ToXContent {
|
||||
public abstract class BodyPartSource implements ToXContentObject {
|
||||
|
||||
protected static FileTypeMap fileTypeMap;
|
||||
static {
|
||||
|
|
|
@ -8,9 +8,9 @@ package org.elasticsearch.xpack.notification.hipchat;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplate;
|
||||
|
@ -24,7 +24,7 @@ import java.util.Locale;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class HipChatMessage implements ToXContent {
|
||||
public class HipChatMessage implements ToXContentObject {
|
||||
|
||||
final String body;
|
||||
@Nullable final String[] rooms;
|
||||
|
@ -249,9 +249,9 @@ public class HipChatMessage implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.FROM)) {
|
||||
} else if (Field.FROM.match(currentFieldName)) {
|
||||
from = parser.text();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.ROOM)) {
|
||||
} else if (Field.ROOM.match(currentFieldName)) {
|
||||
List<TextTemplate> templates = new ArrayList<>();
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -271,7 +271,7 @@ public class HipChatMessage implements ToXContent {
|
|||
}
|
||||
}
|
||||
rooms = templates.toArray(new TextTemplate[templates.size()]);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.USER)) {
|
||||
} else if (Field.USER.match(currentFieldName)) {
|
||||
List<TextTemplate> templates = new ArrayList<>();
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -291,28 +291,28 @@ public class HipChatMessage implements ToXContent {
|
|||
}
|
||||
}
|
||||
users = templates.toArray(new TextTemplate[templates.size()]);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.COLOR)) {
|
||||
} else if (Field.COLOR.match(currentFieldName)) {
|
||||
try {
|
||||
color = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException | IllegalArgumentException e) {
|
||||
throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", e,
|
||||
Field.COLOR.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.NOTIFY)) {
|
||||
} else if (Field.NOTIFY.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
notify = parser.booleanValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field, expected a " +
|
||||
"boolean value but found [{}]", Field.NOTIFY.getPreferredName(), token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.BODY)) {
|
||||
} else if (Field.BODY.match(currentFieldName)) {
|
||||
try {
|
||||
body = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("failed to parse hipchat message. failed to parse [{}] field", pe,
|
||||
Field.BODY.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.FORMAT)) {
|
||||
} else if (Field.FORMAT.match(currentFieldName)) {
|
||||
try {
|
||||
messageFormat = HipChatMessage.Format.parse(parser);
|
||||
} catch (IllegalArgumentException ilae) {
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.notification.hipchat;
|
|||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.common.http.HttpRequest;
|
||||
import org.elasticsearch.xpack.common.http.HttpResponse;
|
||||
|
@ -17,7 +18,7 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
public class SentMessages implements ToXContent, Iterable<SentMessages.SentMessage> {
|
||||
public class SentMessages implements ToXContentObject, Iterable<SentMessages.SentMessage> {
|
||||
|
||||
private String accountName;
|
||||
private List<SentMessage> messages;
|
||||
|
|
|
@ -9,9 +9,8 @@ import org.apache.http.HttpStatus;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
@ -25,7 +24,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
public class JiraIssue implements ToXContent {
|
||||
public class JiraIssue implements ToXContentObject {
|
||||
|
||||
@Nullable final String account;
|
||||
private final Map<String, Object> fields;
|
||||
|
@ -161,12 +160,12 @@ public class JiraIssue implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.ERRORS)) {
|
||||
} else if (Field.ERRORS.match(currentFieldName)) {
|
||||
Map<String, Object> fieldErrors = parser.mapOrdered();
|
||||
for (Map.Entry<String, Object> entry : fieldErrors.entrySet()) {
|
||||
errors.add("Field [" + entry.getKey() + "] has error [" + String.valueOf(entry.getValue()) + "]");
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Field.ERROR_MESSAGES)) {
|
||||
} else if (Field.ERROR_MESSAGES.match(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
errors.add(parser.text());
|
||||
}
|
||||
|
|
|
@ -8,9 +8,9 @@ package org.elasticsearch.xpack.notification.pagerduty;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.common.http.HttpMethod;
|
||||
|
@ -36,7 +36,7 @@ import java.util.Objects;
|
|||
* https://developer.pagerduty.com/documentation/integration/events/acknowledge
|
||||
* https://developer.pagerduty.com/documentation/integration/events/resolve
|
||||
*/
|
||||
public class IncidentEvent implements ToXContent {
|
||||
public class IncidentEvent implements ToXContentObject {
|
||||
|
||||
static final String HOST = "events.pagerduty.com";
|
||||
static final String PATH = "/generic/2010-04-15/create_event.json";
|
||||
|
@ -290,58 +290,58 @@ public class IncidentEvent implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INCIDENT_KEY)) {
|
||||
} else if (Fields.INCIDENT_KEY.match(currentFieldName)) {
|
||||
try {
|
||||
incidentKey = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]",
|
||||
Fields.INCIDENT_KEY.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.DESCRIPTION)) {
|
||||
} else if (Fields.DESCRIPTION.match(currentFieldName)) {
|
||||
try {
|
||||
description = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]",
|
||||
Fields.DESCRIPTION.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLIENT)) {
|
||||
} else if (Fields.CLIENT.match(currentFieldName)) {
|
||||
try {
|
||||
client = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]",
|
||||
Fields.CLIENT.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLIENT_URL)) {
|
||||
} else if (Fields.CLIENT_URL.match(currentFieldName)) {
|
||||
try {
|
||||
clientUrl = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]",
|
||||
Fields.CLIENT_URL.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.ACCOUNT)) {
|
||||
} else if (Fields.ACCOUNT.match(currentFieldName)) {
|
||||
try {
|
||||
account = parser.text();
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]",
|
||||
Fields.CLIENT_URL.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.PROXY)) {
|
||||
} else if (Fields.PROXY.match(currentFieldName)) {
|
||||
proxy = HttpProxy.parse(parser);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.EVENT_TYPE)) {
|
||||
} else if (Fields.EVENT_TYPE.match(currentFieldName)) {
|
||||
try {
|
||||
eventType = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException e) {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}]",
|
||||
Fields.EVENT_TYPE.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.ATTACH_PAYLOAD)) {
|
||||
} else if (Fields.ATTACH_PAYLOAD.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
attachPayload = parser.booleanValue();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse pager duty event template. failed to parse field [{}], " +
|
||||
"expected a boolean value but found [{}] instead", Fields.ATTACH_PAYLOAD.getPreferredName(), token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CONTEXT)) {
|
||||
} else if (Fields.CONTEXT.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
List<IncidentEventContext.Template> list = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
|
|
|
@ -8,7 +8,6 @@ package org.elasticsearch.xpack.notification.pagerduty;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -195,7 +194,7 @@ public class IncidentEventContext implements ToXContent {
|
|||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (Strings.hasLength(currentFieldName)) {
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TYPE)) {
|
||||
if (XField.TYPE.match(currentFieldName)) {
|
||||
try {
|
||||
type = Type.valueOf(parser.text().toUpperCase(Locale.ROOT));
|
||||
} catch (IllegalArgumentException e) {
|
||||
|
@ -211,13 +210,13 @@ public class IncidentEventContext implements ToXContent {
|
|||
throw new ElasticsearchParseException(msg, e, currentFieldName);
|
||||
}
|
||||
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.HREF)) {
|
||||
if (XField.HREF.match(currentFieldName)) {
|
||||
href = parsedTemplate;
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TEXT)) {
|
||||
} else if (XField.TEXT.match(currentFieldName)) {
|
||||
text = parsedTemplate;
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.SRC)) {
|
||||
} else if (XField.SRC.match(currentFieldName)) {
|
||||
src = parsedTemplate;
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.ALT)) {
|
||||
} else if (XField.ALT.match(currentFieldName)) {
|
||||
alt = parsedTemplate;
|
||||
} else {
|
||||
String msg = "could not parse trigger incident event context. unknown field [{}]";
|
||||
|
|
|
@ -8,22 +8,21 @@ package org.elasticsearch.xpack.notification.pagerduty;
|
|||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction;
|
||||
import org.elasticsearch.xpack.common.http.HttpRequest;
|
||||
import org.elasticsearch.xpack.common.http.HttpResponse;
|
||||
import org.elasticsearch.xpack.watcher.actions.pagerduty.PagerDutyAction;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class SentEvent implements ToXContent {
|
||||
public class SentEvent implements ToXContentObject {
|
||||
|
||||
final IncidentEvent event;
|
||||
@Nullable final HttpRequest request;
|
||||
|
@ -115,11 +114,11 @@ public class SentEvent implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.MESSAGE)) {
|
||||
} else if (XField.MESSAGE.match(currentFieldName)) {
|
||||
message = parser.text();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.CODE)) {
|
||||
} else if (XField.CODE.match(currentFieldName)) {
|
||||
// we don't use this code.. so just consume the token
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.ERRORS)) {
|
||||
} else if (XField.ERRORS.match(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
errors.add(parser.text());
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.notification.slack;
|
|||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.common.http.HttpRequest;
|
||||
import org.elasticsearch.xpack.common.http.HttpResponse;
|
||||
|
@ -17,7 +18,7 @@ import java.util.Collections;
|
|||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class SentMessages implements ToXContent, Iterable<SentMessages.SentMessage> {
|
||||
public class SentMessages implements ToXContentObject, Iterable<SentMessages.SentMessage> {
|
||||
|
||||
private String accountName;
|
||||
private List<SentMessage> messages;
|
||||
|
|
|
@ -12,6 +12,7 @@ import org.elasticsearch.common.settings.SettingsException;
|
|||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.xpack.common.http.HttpClient;
|
||||
import org.elasticsearch.xpack.common.http.HttpMethod;
|
||||
import org.elasticsearch.xpack.common.http.HttpProxy;
|
||||
import org.elasticsearch.xpack.common.http.HttpRequest;
|
||||
import org.elasticsearch.xpack.common.http.HttpResponse;
|
||||
|
@ -68,6 +69,7 @@ public class SlackAccount {
|
|||
public SentMessages.SentMessage send(final String to, final SlackMessage message, final HttpProxy proxy) {
|
||||
HttpRequest request = HttpRequest.builder(url.getHost(), url.getPort())
|
||||
.path(url.getPath())
|
||||
.method(HttpMethod.POST)
|
||||
.proxy(proxy)
|
||||
.scheme(Scheme.parse(url.getScheme()))
|
||||
.jsonBody(new ToXContent() {
|
||||
|
|
|
@ -7,8 +7,7 @@ package org.elasticsearch.xpack.notification.slack.message;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplate;
|
||||
|
@ -143,7 +142,7 @@ public class Attachment implements MessageElement {
|
|||
return builder.endObject();
|
||||
}
|
||||
|
||||
static class Template implements ToXContent {
|
||||
static class Template implements ToXContentObject {
|
||||
|
||||
final TextTemplate fallback;
|
||||
final TextTemplate color;
|
||||
|
@ -307,70 +306,70 @@ public class Attachment implements MessageElement {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.FALLBACK)) {
|
||||
} else if (XField.FALLBACK.match(currentFieldName)) {
|
||||
try {
|
||||
fallback = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.FALLBACK);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.COLOR)) {
|
||||
} else if (XField.COLOR.match(currentFieldName)) {
|
||||
try {
|
||||
color = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.COLOR);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.PRETEXT)) {
|
||||
} else if (XField.PRETEXT.match(currentFieldName)) {
|
||||
try {
|
||||
pretext = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.PRETEXT);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.AUTHOR_NAME)) {
|
||||
} else if (XField.AUTHOR_NAME.match(currentFieldName)) {
|
||||
try {
|
||||
authorName = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.AUTHOR_NAME);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.AUTHOR_LINK)) {
|
||||
} else if (XField.AUTHOR_LINK.match(currentFieldName)) {
|
||||
try {
|
||||
authorLink = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.AUTHOR_LINK);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.AUTHOR_ICON)) {
|
||||
} else if (XField.AUTHOR_ICON.match(currentFieldName)) {
|
||||
try {
|
||||
authorIcon = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.AUTHOR_ICON);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TITLE)) {
|
||||
} else if (XField.TITLE.match(currentFieldName)) {
|
||||
try {
|
||||
title = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.TITLE);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TITLE_LINK)) {
|
||||
} else if (XField.TITLE_LINK.match(currentFieldName)) {
|
||||
try {
|
||||
titleLink = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.TITLE_LINK);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TEXT)) {
|
||||
} else if (XField.TEXT.match(currentFieldName)) {
|
||||
try {
|
||||
text = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.TEXT);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.FIELDS)) {
|
||||
} else if (XField.FIELDS.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
List<Field.Template> list = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
|
@ -390,14 +389,14 @@ public class Attachment implements MessageElement {
|
|||
XField.FIELDS);
|
||||
}
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.IMAGE_URL)) {
|
||||
} else if (XField.IMAGE_URL.match(currentFieldName)) {
|
||||
try {
|
||||
imageUrl = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment. failed to parse [{}] field", pe,
|
||||
XField.IMAGE_URL);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.THUMB_URL)) {
|
||||
} else if (XField.THUMB_URL.match(currentFieldName)) {
|
||||
try {
|
||||
thumbUrl = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.notification.slack.message;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.common.text.TextTemplateEngine;
|
||||
|
@ -63,14 +62,14 @@ public class DynamicAttachments implements MessageElement {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.LIST_PATH)) {
|
||||
} else if (XField.LIST_PATH.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
listPath = parser.text();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("could not parse dynamic attachments. expected a string value for [{}] field, " +
|
||||
"but found [{}]", XField.LIST_PATH.getPreferredName(), token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TEMPLATE)) {
|
||||
} else if (XField.TEMPLATE.match(currentFieldName)) {
|
||||
try {
|
||||
template = Attachment.Template.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.notification.slack.message;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -118,21 +117,21 @@ class Field implements MessageElement {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TITLE)) {
|
||||
} else if (XField.TITLE.match(currentFieldName)) {
|
||||
try {
|
||||
title = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment field. failed to parse [{}] field", pe,
|
||||
XField.TITLE);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.VALUE)) {
|
||||
} else if (XField.VALUE.match(currentFieldName)) {
|
||||
try {
|
||||
value = TextTemplate.parse(parser);
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse message attachment field. failed to parse [{}] field", pe,
|
||||
XField.VALUE);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.SHORT)) {
|
||||
} else if (XField.SHORT.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
isShort = parser.booleanValue();
|
||||
} else {
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
package org.elasticsearch.xpack.notification.slack.message;
|
||||
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
|
||||
public interface MessageElement extends ToXContent {
|
||||
public interface MessageElement extends ToXContentObject {
|
||||
|
||||
interface XField {
|
||||
ParseField TITLE = new ParseField("title");
|
||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.notification.slack.message;
|
|||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -254,14 +253,14 @@ public class SlackMessage implements MessageElement {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.FROM)) {
|
||||
} else if (XField.FROM.match(currentFieldName)) {
|
||||
try {
|
||||
builder.setFrom(TextTemplate.parse(parser));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field", pe,
|
||||
XField.FROM.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TO)) {
|
||||
} else if (XField.TO.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
try {
|
||||
|
@ -279,21 +278,21 @@ public class SlackMessage implements MessageElement {
|
|||
XField.TO.getPreferredName());
|
||||
}
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.TEXT)) {
|
||||
} else if (XField.TEXT.match(currentFieldName)) {
|
||||
try {
|
||||
builder.setText(TextTemplate.parse(parser));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field", pe,
|
||||
XField.TEXT.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.ICON)) {
|
||||
} else if (XField.ICON.match(currentFieldName)) {
|
||||
try {
|
||||
builder.setIcon(TextTemplate.parse(parser));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
throw new ElasticsearchParseException("could not parse slack message. failed to parse [{}] field.", pe,
|
||||
XField.ICON.getPreferredName());
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.ATTACHMENTS)) {
|
||||
} else if (XField.ATTACHMENTS.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_ARRAY) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
try {
|
||||
|
@ -311,7 +310,7 @@ public class SlackMessage implements MessageElement {
|
|||
XField.ATTACHMENTS.getPreferredName());
|
||||
}
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, XField.DYNAMIC_ATTACHMENTS)) {
|
||||
} else if (XField.DYNAMIC_ATTACHMENTS.match(currentFieldName)) {
|
||||
try {
|
||||
builder.setDynamicAttachments(DynamicAttachments.parse(parser));
|
||||
} catch (ElasticsearchParseException pe) {
|
||||
|
|
|
@ -96,6 +96,7 @@ import org.elasticsearch.xpack.security.authz.AuthorizationService;
|
|||
import org.elasticsearch.xpack.security.authz.accesscontrol.OptOutQueryCache;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.SecurityIndexSearcherWrapper;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.SetSecurityUserProcessor;
|
||||
import org.elasticsearch.xpack.security.authz.permission.FieldPermissionsCache;
|
||||
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.FileRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
|
@ -312,16 +313,19 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
|||
cryptoService, failureHandler, threadPool, anonymousUser));
|
||||
components.add(authcService.get());
|
||||
|
||||
final FileRolesStore fileRolesStore = new FileRolesStore(settings, env, resourceWatcherService);
|
||||
final NativeRolesStore nativeRolesStore = new NativeRolesStore(settings, client);
|
||||
final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(securityContext);
|
||||
final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore);
|
||||
final FileRolesStore fileRolesStore = new FileRolesStore(settings, env, resourceWatcherService, licenseState);
|
||||
final NativeRolesStore nativeRolesStore = new NativeRolesStore(settings, client, licenseState);
|
||||
final ReservedRolesStore reservedRolesStore = new ReservedRolesStore();
|
||||
final CompositeRolesStore allRolesStore =
|
||||
new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, licenseState);
|
||||
// to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be
|
||||
// minimal
|
||||
licenseState.addListener(allRolesStore::invalidateAll);
|
||||
final AuthorizationService authzService = new AuthorizationService(settings, allRolesStore, clusterService,
|
||||
auditTrailService, failureHandler, threadPool, anonymousUser);
|
||||
components.add(fileRolesStore); // has lifecycle
|
||||
components.add(nativeRolesStore); // used by roles actions
|
||||
components.add(reservedRolesStore); // used by roles actions
|
||||
components.add(allRolesStore); // for SecurityFeatureSet
|
||||
components.add(allRolesStore); // for SecurityFeatureSet and clear roles cache
|
||||
components.add(authzService);
|
||||
|
||||
components.add(new SecurityLifecycleService(settings, clusterService, threadPool, indexAuditTrail,
|
||||
|
@ -404,6 +408,8 @@ public class Security implements ActionPlugin, IngestPlugin, NetworkPlugin {
|
|||
NativeRolesStore.addSettings(settingsList);
|
||||
AuthenticationService.addSettings(settingsList);
|
||||
AuthorizationService.addSettings(settingsList);
|
||||
settingsList.add(CompositeRolesStore.CACHE_SIZE_SETTING);
|
||||
settingsList.add(FieldPermissionsCache.CACHE_SIZE_SETTING);
|
||||
|
||||
// encryption settings
|
||||
CryptoService.addSettings(settingsList);
|
||||
|
|
|
@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeRealmMigrator;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
|
||||
|
@ -52,7 +53,7 @@ public class SecurityLifecycleService extends AbstractComponent implements Clust
|
|||
clusterService.addListener(this);
|
||||
clusterService.addListener(nativeUserStore);
|
||||
clusterService.addListener(nativeRolesStore);
|
||||
clusterService.addListener(new SecurityTemplateService(settings, client));
|
||||
clusterService.addListener(new SecurityTemplateService(settings, client, new NativeRealmMigrator(settings, nativeUserStore)));
|
||||
clusterService.addLifecycleListener(new LifecycleListener() {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.cluster.ClusterStateListener;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -32,11 +33,15 @@ import org.elasticsearch.common.xcontent.XContentHelper;
|
|||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeRealmMigrator;
|
||||
import org.elasticsearch.xpack.template.TemplateUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
|
@ -52,13 +57,20 @@ public class SecurityTemplateService extends AbstractComponent implements Cluste
|
|||
static final String SECURITY_INDEX_TEMPLATE_VERSION_PATTERN = Pattern.quote("${security.template.version}");
|
||||
static final Version MIN_READ_VERSION = Version.V_5_0_0;
|
||||
|
||||
enum UpgradeState {
|
||||
NOT_STARTED, IN_PROGRESS, COMPLETE, FAILED
|
||||
}
|
||||
|
||||
private final InternalClient client;
|
||||
final AtomicBoolean templateCreationPending = new AtomicBoolean(false);
|
||||
final AtomicBoolean updateMappingPending = new AtomicBoolean(false);
|
||||
final AtomicReference upgradeDataState = new AtomicReference<>(UpgradeState.NOT_STARTED);
|
||||
private final NativeRealmMigrator nativeRealmMigrator;
|
||||
|
||||
public SecurityTemplateService(Settings settings, InternalClient client) {
|
||||
public SecurityTemplateService(Settings settings, InternalClient client, NativeRealmMigrator nativeRealmMigrator) {
|
||||
super(settings);
|
||||
this.client = client;
|
||||
this.nativeRealmMigrator = nativeRealmMigrator;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -79,10 +91,24 @@ public class SecurityTemplateService extends AbstractComponent implements Cluste
|
|||
// make sure mapping is up to date
|
||||
if (state.metaData().getIndices() != null) {
|
||||
if (securityIndexMappingUpToDate(state, logger) == false) {
|
||||
updateSecurityMapping();
|
||||
if (securityIndexAvailable(state, logger)) {
|
||||
upgradeSecurityData(state, this::updateSecurityMapping);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private boolean securityIndexAvailable(ClusterState state, Logger logger) {
|
||||
final IndexRoutingTable routingTable = getSecurityIndexRoutingTable(state);
|
||||
if (routingTable == null) {
|
||||
throw new IllegalStateException("Security index does not exist");
|
||||
}
|
||||
if (routingTable.allPrimaryShardsActive() == false) {
|
||||
logger.debug("Security index is not yet active");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private void updateSecurityTemplate() {
|
||||
// only put the template if this is not already in progress
|
||||
|
@ -91,6 +117,33 @@ public class SecurityTemplateService extends AbstractComponent implements Cluste
|
|||
}
|
||||
}
|
||||
|
||||
private boolean upgradeSecurityData(ClusterState state, Runnable andThen) {
|
||||
// only update the data if this is not already in progress
|
||||
if (upgradeDataState.compareAndSet(UpgradeState.NOT_STARTED, UpgradeState.IN_PROGRESS) ) {
|
||||
final Version previousVersion = oldestSecurityIndexMappingVersion(state, logger);
|
||||
nativeRealmMigrator.performUpgrade(previousVersion, new ActionListener<Boolean>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(Boolean upgraded) {
|
||||
upgradeDataState.set(UpgradeState.COMPLETE);
|
||||
andThen.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
upgradeDataState.set(UpgradeState.FAILED);
|
||||
logger.error((Supplier<?>) () -> new ParameterizedMessage("failed to upgrade security data from version [{}] ",
|
||||
previousVersion), e);
|
||||
}
|
||||
});
|
||||
return true;
|
||||
} else {
|
||||
andThen.run();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private void updateSecurityMapping() {
|
||||
// only update the mapping if this is not already in progress
|
||||
if (updateMappingPending.compareAndSet(false, true) ) {
|
||||
|
@ -182,6 +235,16 @@ public class SecurityTemplateService extends AbstractComponent implements Cluste
|
|||
}
|
||||
|
||||
static boolean securityIndexMappingVersionMatches(ClusterState clusterState, Logger logger, Predicate<Version> predicate) {
|
||||
return securityIndexMappingVersions(clusterState, logger).stream().allMatch(predicate);
|
||||
}
|
||||
|
||||
public static Version oldestSecurityIndexMappingVersion(ClusterState clusterState, Logger logger) {
|
||||
final Set<Version> versions = securityIndexMappingVersions(clusterState, logger);
|
||||
return versions.stream().min(Version::compareTo).orElse(null);
|
||||
}
|
||||
|
||||
private static Set<Version> securityIndexMappingVersions(ClusterState clusterState, Logger logger) {
|
||||
Set<Version> versions = new HashSet<>();
|
||||
IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(SECURITY_INDEX_NAME);
|
||||
if (indexMetaData != null) {
|
||||
for (Object object : indexMetaData.getMappings().values().toArray()) {
|
||||
|
@ -189,21 +252,25 @@ public class SecurityTemplateService extends AbstractComponent implements Cluste
|
|||
if (mappingMetaData.type().equals(MapperService.DEFAULT_MAPPING)) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
if (containsCorrectVersion(mappingMetaData.sourceAsMap(), predicate) == false) {
|
||||
return false;
|
||||
versions.add(readMappingVersion(mappingMetaData, logger));
|
||||
}
|
||||
}
|
||||
return versions;
|
||||
}
|
||||
|
||||
private static Version readMappingVersion(MappingMetaData mappingMetaData, Logger logger) {
|
||||
try {
|
||||
Map<String, Object> meta = (Map<String, Object>) mappingMetaData.sourceAsMap().get("_meta");
|
||||
if (meta == null) {
|
||||
// something pre-5.0, but we don't know what. Use 2.3.0 as a placeholder for "old"
|
||||
return Version.V_2_3_0;
|
||||
}
|
||||
return Version.fromString((String) meta.get(SECURITY_VERSION_STRING));
|
||||
} catch (IOException e) {
|
||||
logger.error("Cannot parse the mapping for security index.", e);
|
||||
throw new ElasticsearchException("Cannot parse the mapping for security index.", e);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
// index does not exist so when we create it it will be up to date
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
static boolean securityTemplateExistsAndIsUpToDate(ClusterState state, Logger logger) {
|
||||
return securityTemplateExistsAndVersionMatches(state, logger, Version.CURRENT::equals);
|
||||
|
@ -249,6 +316,18 @@ public class SecurityTemplateService extends AbstractComponent implements Cluste
|
|||
return predicate.test(Version.fromString((String) meta.get(SECURITY_VERSION_STRING)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the routing-table for the security index, or <code>null</code> if the security index does not exist.
|
||||
*/
|
||||
public static IndexRoutingTable getSecurityIndexRoutingTable(ClusterState clusterState) {
|
||||
IndexMetaData metaData = clusterState.metaData().index(SECURITY_INDEX_NAME);
|
||||
if (metaData == null) {
|
||||
return null;
|
||||
} else {
|
||||
return clusterState.routingTable().index(SECURITY_INDEX_NAME);
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean securityIndexMappingAndTemplateUpToDate(ClusterState clusterState, Logger logger) {
|
||||
if (securityTemplateExistsAndIsUpToDate(clusterState, logger) == false) {
|
||||
logger.debug("security template [{}] does not exist or is not up to date, so service cannot start",
|
||||
|
|
|
@ -37,9 +37,9 @@ import org.elasticsearch.xpack.security.authc.Authentication;
|
|||
import org.elasticsearch.xpack.security.authc.AuthenticationService;
|
||||
import org.elasticsearch.xpack.security.authz.AuthorizationService;
|
||||
import org.elasticsearch.xpack.security.authz.AuthorizationUtils;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.GeneralPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.HealthAndStatsPrivilege;
|
||||
import org.elasticsearch.xpack.security.crypto.CryptoService;
|
||||
import org.elasticsearch.xpack.security.support.Automatons;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
|
@ -55,8 +55,7 @@ import static org.elasticsearch.xpack.security.support.Exceptions.authorizationE
|
|||
public class SecurityActionFilter extends AbstractComponent implements ActionFilter {
|
||||
|
||||
private static final Predicate<String> LICENSE_EXPIRATION_ACTION_MATCHER = HealthAndStatsPrivilege.INSTANCE.predicate();
|
||||
private static final Predicate<String> SECURITY_ACTION_MATCHER =
|
||||
new GeneralPrivilege("_security_matcher", "cluster:admin/xpack/security*").predicate();
|
||||
private static final Predicate<String> SECURITY_ACTION_MATCHER = Automatons.predicate("cluster:admin/xpack/security*");
|
||||
|
||||
private final AuthenticationService authcService;
|
||||
private final AuthorizationService authzService;
|
||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.security.action.role;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -16,7 +16,7 @@ import java.io.IOException;
|
|||
/**
|
||||
* Response for a role being deleted from the security index
|
||||
*/
|
||||
public class DeleteRoleResponse extends ActionResponse implements ToXContent {
|
||||
public class DeleteRoleResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private boolean found = false;
|
||||
|
||||
|
|
|
@ -15,7 +15,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
import org.elasticsearch.xpack.security.authz.permission.FieldPermissions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -65,12 +64,13 @@ public class PutRoleRequest extends ActionRequest implements WriteRequest<PutRol
|
|||
this.indicesPrivileges.addAll(Arrays.asList(privileges));
|
||||
}
|
||||
|
||||
public void addIndex(String[] indices, String[] privileges, FieldPermissions fieldPermissions,
|
||||
public void addIndex(String[] indices, String[] privileges, String[] grantedFields, String[] deniedFields,
|
||||
@Nullable BytesReference query) {
|
||||
this.indicesPrivileges.add(RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(indices)
|
||||
.privileges(privileges)
|
||||
.fieldPermissions(fieldPermissions)
|
||||
.grantedFields(grantedFields)
|
||||
.deniedFields(deniedFields)
|
||||
.query(query)
|
||||
.build());
|
||||
}
|
||||
|
|
|
@ -11,7 +11,6 @@ import org.elasticsearch.client.ElasticsearchClient;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.permission.FieldPermissions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
@ -58,9 +57,9 @@ public class PutRoleRequestBuilder extends ActionRequestBuilder<PutRoleRequest,
|
|||
return this;
|
||||
}
|
||||
|
||||
public PutRoleRequestBuilder addIndices(String[] indices, String[] privileges,
|
||||
FieldPermissions fieldPermissions, @Nullable BytesReference query) {
|
||||
request.addIndex(indices, privileges, fieldPermissions, query);
|
||||
public PutRoleRequestBuilder addIndices(String[] indices, String[] privileges, String[] grantedFields, String[] deniedFields,
|
||||
@Nullable BytesReference query) {
|
||||
request.addIndex(indices, privileges, grantedFields, deniedFields, query);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -21,12 +21,12 @@ import java.util.List;
|
|||
public class TransportClearRolesCacheAction extends TransportNodesAction<ClearRolesCacheRequest, ClearRolesCacheResponse,
|
||||
ClearRolesCacheRequest.Node, ClearRolesCacheResponse.Node> {
|
||||
|
||||
private final NativeRolesStore rolesStore;
|
||||
private final CompositeRolesStore rolesStore;
|
||||
|
||||
@Inject
|
||||
public TransportClearRolesCacheAction(Settings settings, ThreadPool threadPool,
|
||||
ClusterService clusterService, TransportService transportService, ActionFilters actionFilters,
|
||||
NativeRolesStore rolesStore, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
CompositeRolesStore rolesStore, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearRolesCacheAction.NAME, threadPool, clusterService, transportService,
|
||||
actionFilters, indexNameExpressionResolver, ClearRolesCacheRequest::new, ClearRolesCacheRequest.Node::new,
|
||||
ThreadPool.Names.MANAGEMENT, ClearRolesCacheResponse.Node.class);
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.action.role;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
|
@ -16,15 +14,12 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.permission.KibanaRole;
|
||||
import org.elasticsearch.xpack.security.authz.store.NativeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.common.Strings.arrayToDelimitedString;
|
||||
|
||||
public class TransportGetRolesAction extends HandledTransportAction<GetRolesRequest, GetRolesResponse> {
|
||||
|
||||
private final NativeRolesStore nativeRolesStore;
|
||||
|
@ -55,9 +50,8 @@ public class TransportGetRolesAction extends HandledTransportAction<GetRolesRequ
|
|||
if (rd != null) {
|
||||
roles.add(rd);
|
||||
} else {
|
||||
// the kibana role name is reseved but is only visible to the Kibana user, so this should be the only null
|
||||
// descriptor. More details in the ReservedRolesStore
|
||||
assert KibanaRole.NAME.equals(role);
|
||||
listener.onFailure(new IllegalStateException("unable to obtain reserved role [" + role + "]"));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
rolesToSearchFor.add(role);
|
||||
|
|
|
@ -9,7 +9,6 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.action.ActionRequestBuilder;
|
||||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
|
@ -67,7 +66,7 @@ public class ChangePasswordRequestBuilder
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.PASSWORD)) {
|
||||
} else if (User.Fields.PASSWORD.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
String password = parser.text();
|
||||
char[] passwordChars = password.toCharArray();
|
||||
|
|
|
@ -10,7 +10,6 @@ import org.elasticsearch.action.ActionRequestBuilder;
|
|||
import org.elasticsearch.action.support.WriteRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -101,7 +100,7 @@ public class PutUserRequestBuilder extends ActionRequestBuilder<PutUserRequest,
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.PASSWORD)) {
|
||||
} else if (User.Fields.PASSWORD.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
String password = parser.text();
|
||||
char[] passwordChars = password.toCharArray();
|
||||
|
@ -111,7 +110,7 @@ public class PutUserRequestBuilder extends ActionRequestBuilder<PutUserRequest,
|
|||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.PASSWORD_HASH)) {
|
||||
} else if (User.Fields.PASSWORD_HASH.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
char[] passwordChars = parser.text().toCharArray();
|
||||
passwordHash(passwordChars);
|
||||
|
@ -119,41 +118,41 @@ public class PutUserRequestBuilder extends ActionRequestBuilder<PutUserRequest,
|
|||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.ROLES)) {
|
||||
} else if (User.Fields.ROLES.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
roles(Strings.commaDelimitedListToStringArray(parser.text()));
|
||||
} else {
|
||||
roles(XContentUtils.readStringArray(parser, false));
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.FULL_NAME)) {
|
||||
} else if (User.Fields.FULL_NAME.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
fullName(parser.text());
|
||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
||||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.EMAIL)) {
|
||||
} else if (User.Fields.EMAIL.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
email(parser.text());
|
||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
||||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type string, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.METADATA)) {
|
||||
} else if (User.Fields.METADATA.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
metadata(parser.map());
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type object, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.ENABLED)) {
|
||||
} else if (User.Fields.ENABLED.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_BOOLEAN) {
|
||||
enabled(parser.booleanValue());
|
||||
} else {
|
||||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type boolean, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, User.Fields.USERNAME)) {
|
||||
} else if (User.Fields.USERNAME.match(currentFieldName)) {
|
||||
if (token == Token.VALUE_STRING) {
|
||||
if (username.equals(parser.text()) == false) {
|
||||
throw new IllegalArgumentException("[username] in source does not match the username provided [" +
|
||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.security.action.user;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -17,7 +17,7 @@ import java.io.IOException;
|
|||
* Response when adding a user to the security index. Returns a
|
||||
* single boolean field for whether the user was created or updated.
|
||||
*/
|
||||
public class PutUserResponse extends ActionResponse implements ToXContent {
|
||||
public class PutUserResponse extends ActionResponse implements ToXContentObject {
|
||||
|
||||
private boolean created;
|
||||
|
||||
|
|
|
@ -8,9 +8,6 @@ package org.elasticsearch.xpack.security.authc.esnative;
|
|||
import joptsimple.OptionParser;
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
|
||||
import com.google.common.base.Charsets;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
@ -53,6 +50,7 @@ import java.io.OutputStream;
|
|||
import java.net.HttpURLConnection;
|
||||
import java.net.URI;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.security.AccessController;
|
||||
|
@ -164,7 +162,7 @@ public class ESNativeRealmMigrateTool extends MultiCommand {
|
|||
conn.connect();
|
||||
if (bodyString != null) {
|
||||
try (OutputStream out = conn.getOutputStream()) {
|
||||
out.write(bodyString.getBytes(Charsets.UTF_8));
|
||||
out.write(bodyString.getBytes(StandardCharsets.UTF_8));
|
||||
} catch (Exception e) {
|
||||
try {
|
||||
conn.disconnect();
|
||||
|
@ -174,7 +172,7 @@ public class ESNativeRealmMigrateTool extends MultiCommand {
|
|||
throw e;
|
||||
}
|
||||
}
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), Charsets.UTF_8))) {
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String line = null;
|
||||
while ((line = reader.readLine()) != null) {
|
||||
|
@ -182,7 +180,7 @@ public class ESNativeRealmMigrateTool extends MultiCommand {
|
|||
}
|
||||
return sb.toString();
|
||||
} catch (IOException e) {
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getErrorStream(), Charsets.UTF_8))) {
|
||||
try (BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getErrorStream(), StandardCharsets.UTF_8))) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
String line = null;
|
||||
while ((line = reader.readLine()) != null) {
|
||||
|
@ -301,7 +299,7 @@ public class ESNativeRealmMigrateTool extends MultiCommand {
|
|||
|
||||
static String createRoleJson(RoleDescriptor rd) throws IOException {
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
rd.toXContent(builder, ToXContent.EMPTY_PARAMS);
|
||||
rd.toXContent(builder, ToXContent.EMPTY_PARAMS, false);
|
||||
return builder.string();
|
||||
}
|
||||
|
||||
|
@ -314,7 +312,7 @@ public class ESNativeRealmMigrateTool extends MultiCommand {
|
|||
}
|
||||
terminal.println("importing roles from [" + rolesFile + "]...");
|
||||
Logger logger = getTerminalLogger(terminal);
|
||||
Map<String, RoleDescriptor> roles = FileRolesStore.parseRoleDescriptors(rolesFile, logger, true, env.settings());
|
||||
Map<String, RoleDescriptor> roles = FileRolesStore.parseRoleDescriptors(rolesFile, logger, true, Settings.EMPTY);
|
||||
Set<String> existingRoles;
|
||||
try {
|
||||
existingRoles = getRolesThatExist(terminal, env.settings(), env, options);
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authc.esnative;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.xpack.security.SecurityTemplateService;
|
||||
import org.elasticsearch.xpack.security.user.LogstashSystemUser;
|
||||
|
||||
/**
|
||||
* Performs migration steps for the {@link NativeRealm} and {@link ReservedRealm}.
|
||||
* When upgrading an Elasticsearch/X-Pack installation from a previous version, this class is responsible for ensuring that user/role
|
||||
* data stored in the security index is converted to a format that is appropriate for the newly installed version.
|
||||
* @see SecurityTemplateService
|
||||
*/
|
||||
public class NativeRealmMigrator {
|
||||
|
||||
private final NativeUsersStore nativeUsersStore;
|
||||
private final Logger logger;
|
||||
|
||||
public NativeRealmMigrator(Settings settings, NativeUsersStore nativeUsersStore) {
|
||||
this.nativeUsersStore = nativeUsersStore;
|
||||
this.logger = Loggers.getLogger(getClass(), settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Special care must be taken because this upgrade happens <strong>before</strong> the security-mapping is updated.
|
||||
* We do it in that order because the version of the security-mapping controls the behaviour of the
|
||||
* reserved and native realm
|
||||
*
|
||||
* @param listener A listener for the results of the upgrade. Calls {@link ActionListener#onFailure(Exception)} if a problem occurs,
|
||||
* {@link ActionListener#onResponse(Object) onResponse(true)} if an upgrade is performed, or
|
||||
* {@link ActionListener#onResponse(Object) onResponse(false)} if no upgrade was required.
|
||||
* @see SecurityTemplateService#securityIndexMappingAndTemplateSufficientToRead(ClusterState, Logger)
|
||||
* @see NativeUsersStore#canWrite
|
||||
* @see NativeUsersStore#mappingVersion
|
||||
*/
|
||||
public void performUpgrade(@Nullable Version previousVersion, ActionListener<Boolean> listener) {
|
||||
try {
|
||||
if (shouldDisableLogstashUser(previousVersion)) {
|
||||
logger.info("Upgrading security from version [{}] - new reserved user [{}] will default to disabled",
|
||||
previousVersion, LogstashSystemUser.NAME);
|
||||
nativeUsersStore.ensureReservedUserIsDisabled(LogstashSystemUser.NAME, new ActionListener<Void>() {
|
||||
@Override
|
||||
public void onResponse(Void aVoid) {
|
||||
listener.onResponse(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
listener.onResponse(false);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* If we're upgrading from a security version where the {@link LogstashSystemUser} did not exist, then we mark the user as disabled.
|
||||
* Otherwise the user will exist with a default password, which is desirable for an <em>out-of-the-box</em> experience in fresh installs
|
||||
* but problematic for already-locked-down upgrades.
|
||||
*/
|
||||
private boolean shouldDisableLogstashUser(@Nullable Version previousVersion) {
|
||||
return previousVersion != null && previousVersion.before(LogstashSystemUser.DEFINED_SINCE);
|
||||
}
|
||||
|
||||
}
|
|
@ -9,6 +9,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.DocWriteResponse.Result;
|
||||
|
@ -26,7 +27,7 @@ import org.elasticsearch.action.update.UpdateResponse;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
|
@ -62,7 +63,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.xpack.security.SecurityTemplateService.oldestSecurityIndexMappingVersion;
|
||||
import static org.elasticsearch.xpack.security.SecurityTemplateService.securityIndexMappingAndTemplateSufficientToRead;
|
||||
import static org.elasticsearch.xpack.security.SecurityTemplateService.securityIndexMappingAndTemplateUpToDate;
|
||||
|
||||
|
@ -94,6 +97,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
|
||||
private volatile boolean securityIndexExists = false;
|
||||
private volatile boolean canWrite = false;
|
||||
private volatile Version mappingVersion = null;
|
||||
|
||||
public NativeUsersStore(Settings settings, InternalClient client) {
|
||||
super(settings);
|
||||
|
@ -404,7 +408,6 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
.execute(new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
assert updateResponse.getResult() == Result.UPDATED;
|
||||
clearRealmCache(username, listener, null);
|
||||
}
|
||||
|
||||
|
@ -428,6 +431,16 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
void ensureReservedUserIsDisabled(final String username, final ActionListener<Void> listener) {
|
||||
getReservedUserInfo(username, ActionListener.wrap(userInfo -> {
|
||||
if (userInfo == null || userInfo.enabled) {
|
||||
setReservedUserEnabled(username, false, RefreshPolicy.IMMEDIATE, listener);
|
||||
} else {
|
||||
listener.onResponse(null);
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
private void setReservedUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy,
|
||||
final ActionListener<Void> listener) {
|
||||
try {
|
||||
|
@ -439,7 +452,6 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
.execute(new ActionListener<UpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(UpdateResponse updateResponse) {
|
||||
assert updateResponse.getResult() == Result.UPDATED || updateResponse.getResult() == Result.CREATED;
|
||||
clearRealmCache(username, listener, null);
|
||||
}
|
||||
|
||||
|
@ -509,19 +521,19 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
if (securityIndexMappingAndTemplateUpToDate(clusterState, logger)) {
|
||||
canWrite = true;
|
||||
} else if (securityIndexMappingAndTemplateSufficientToRead(clusterState, logger)) {
|
||||
mappingVersion = oldestSecurityIndexMappingVersion(clusterState, logger);
|
||||
canWrite = false;
|
||||
} else {
|
||||
canWrite = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
IndexMetaData metaData = clusterState.metaData().index(SecurityTemplateService.SECURITY_INDEX_NAME);
|
||||
if (metaData == null) {
|
||||
final IndexRoutingTable routingTable = SecurityTemplateService.getSecurityIndexRoutingTable(clusterState);
|
||||
if (routingTable == null) {
|
||||
logger.debug("security index [{}] does not exist, so service can start", SecurityTemplateService.SECURITY_INDEX_NAME);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (clusterState.routingTable().index(SecurityTemplateService.SECURITY_INDEX_NAME).allPrimaryShardsActive()) {
|
||||
if (routingTable.allPrimaryShardsActive()) {
|
||||
logger.debug("security index [{}] all primary shards started, so service can start",
|
||||
SecurityTemplateService.SECURITY_INDEX_NAME);
|
||||
securityIndexExists = true;
|
||||
|
@ -578,9 +590,21 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
return securityIndexExists;
|
||||
}
|
||||
|
||||
void getReservedUserInfo(String username, ActionListener<ReservedUserInfo> listener) {
|
||||
assert started();
|
||||
/**
|
||||
* Test whether the effective (active) version of the security mapping meets the <code>requiredVersion</code>.
|
||||
*
|
||||
* @return <code>true</code> if the effective version passes the predicate, or the security mapping does not exist (<code>null</code>
|
||||
* version). Otherwise, <code>false</code>.
|
||||
*/
|
||||
public boolean checkMappingVersion(Predicate<Version> requiredVersion) {
|
||||
return this.mappingVersion == null || requiredVersion.test(this.mappingVersion);
|
||||
}
|
||||
|
||||
void getReservedUserInfo(String username, ActionListener<ReservedUserInfo> listener) {
|
||||
if (!started() && !securityIndexExists()) {
|
||||
listener.onFailure(new IllegalStateException("Attempt to get reserved user info - started=" + started() +
|
||||
" index-exists=" + securityIndexExists()));
|
||||
}
|
||||
client.prepareGet(SecurityTemplateService.SECURITY_INDEX_NAME, RESERVED_USER_DOC_TYPE, username)
|
||||
.execute(new ActionListener<GetResponse>() {
|
||||
@Override
|
||||
|
@ -699,6 +723,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
securityIndexExists = event.state().metaData().indices().get(SecurityTemplateService.SECURITY_INDEX_NAME) != null;
|
||||
canWrite = securityIndexMappingAndTemplateUpToDate(event.state(), logger);
|
||||
mappingVersion = oldestSecurityIndexMappingVersion(event.state(), logger);
|
||||
}
|
||||
|
||||
public State state() {
|
||||
|
@ -713,6 +738,7 @@ public class NativeUsersStore extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
this.securityIndexExists = false;
|
||||
this.canWrite = false;
|
||||
this.mappingVersion = null;
|
||||
this.state.set(State.INITIALIZED);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authc.esnative;
|
|||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -21,6 +22,7 @@ import org.elasticsearch.xpack.security.support.Exceptions;
|
|||
import org.elasticsearch.xpack.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.security.user.ElasticUser;
|
||||
import org.elasticsearch.xpack.security.user.KibanaUser;
|
||||
import org.elasticsearch.xpack.security.user.LogstashSystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
@ -28,6 +30,7 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A realm for predefined users. These users can only be modified in terms of changing their passwords; no other modifications are allowed.
|
||||
|
@ -38,6 +41,7 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
public static final String TYPE = "reserved";
|
||||
static final char[] DEFAULT_PASSWORD_HASH = Hasher.BCRYPT.hash(new SecuredString("changeme".toCharArray()));
|
||||
private static final ReservedUserInfo DEFAULT_USER_INFO = new ReservedUserInfo(DEFAULT_PASSWORD_HASH, true);
|
||||
private static final ReservedUserInfo DISABLED_USER_INFO = new ReservedUserInfo(DEFAULT_PASSWORD_HASH, false);
|
||||
|
||||
private final NativeUsersStore nativeUsersStore;
|
||||
private final AnonymousUser anonymousUser;
|
||||
|
@ -113,6 +117,7 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
switch (username) {
|
||||
case ElasticUser.NAME:
|
||||
case KibanaUser.NAME:
|
||||
case LogstashSystemUser.NAME:
|
||||
return XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings);
|
||||
default:
|
||||
return AnonymousUser.isAnonymousUsername(username, settings);
|
||||
|
@ -126,6 +131,8 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
return new ElasticUser(userInfo.enabled);
|
||||
case KibanaUser.NAME:
|
||||
return new KibanaUser(userInfo.enabled);
|
||||
case LogstashSystemUser.NAME:
|
||||
return new LogstashSystemUser(userInfo.enabled);
|
||||
default:
|
||||
if (anonymousEnabled && anonymousUser.principal().equals(username)) {
|
||||
return anonymousUser;
|
||||
|
@ -140,14 +147,21 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
listener.onResponse(anonymousEnabled ? Collections.singletonList(anonymousUser) : Collections.emptyList());
|
||||
} else {
|
||||
nativeUsersStore.getAllReservedUserInfo(ActionListener.wrap((reservedUserInfos) -> {
|
||||
List<User> users = new ArrayList<>(3);
|
||||
List<User> users = new ArrayList<>(4);
|
||||
|
||||
ReservedUserInfo userInfo = reservedUserInfos.get(ElasticUser.NAME);
|
||||
users.add(new ElasticUser(userInfo == null || userInfo.enabled));
|
||||
|
||||
userInfo = reservedUserInfos.get(KibanaUser.NAME);
|
||||
users.add(new KibanaUser(userInfo == null || userInfo.enabled));
|
||||
|
||||
userInfo = reservedUserInfos.get(LogstashSystemUser.NAME);
|
||||
users.add(new LogstashSystemUser(userInfo == null || userInfo.enabled));
|
||||
|
||||
if (anonymousEnabled) {
|
||||
users.add(anonymousUser);
|
||||
}
|
||||
|
||||
listener.onResponse(users);
|
||||
}, (e) -> {
|
||||
logger.error("failed to retrieve reserved users", e);
|
||||
|
@ -160,6 +174,9 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
if (nativeUsersStore.started() == false) {
|
||||
// we need to be able to check for the user store being started...
|
||||
listener.onResponse(null);
|
||||
} else if (userIsDefinedForCurrentSecurityMapping(username) == false) {
|
||||
logger.debug("Marking user [{}] as disabled because the security mapping is not at the required version", username);
|
||||
listener.onResponse(DISABLED_USER_INFO);
|
||||
} else if (nativeUsersStore.securityIndexExists() == false) {
|
||||
listener.onResponse(DEFAULT_USER_INFO);
|
||||
} else {
|
||||
|
@ -176,4 +193,18 @@ public class ReservedRealm extends CachingUsernamePasswordRealm {
|
|||
}));
|
||||
}
|
||||
}
|
||||
|
||||
private boolean userIsDefinedForCurrentSecurityMapping(String username) {
|
||||
final Version requiredVersion = getDefinedVersion(username);
|
||||
return nativeUsersStore.checkMappingVersion(requiredVersion::onOrBefore);
|
||||
}
|
||||
|
||||
private Version getDefinedVersion(String username) {
|
||||
switch (username) {
|
||||
case LogstashSystemUser.NAME:
|
||||
return LogstashSystemUser.DEFINED_SINCE;
|
||||
default:
|
||||
return Version.V_5_0_0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
String domainDN = buildDnFromDomain(domainName);
|
||||
GroupsResolver groupResolver = new ActiveDirectoryGroupsResolver(settings.getAsSettings("group_search"), domainDN);
|
||||
defaultADAuthenticator = new DefaultADAuthenticator(settings, timeout, logger, groupResolver, domainDN);
|
||||
downLevelADAuthenticator = new DownLevelADAuthenticator(settings, timeout, logger, groupResolver, domainDN);
|
||||
downLevelADAuthenticator = new DownLevelADAuthenticator(config, timeout, logger, groupResolver, domainDN, sslService);
|
||||
upnADAuthenticator = new UpnADAuthenticator(settings, timeout, logger, groupResolver, domainDN);
|
||||
}
|
||||
|
||||
|
@ -227,11 +227,16 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
|
||||
final String domainDN;
|
||||
final Settings settings;
|
||||
final SSLService sslService;
|
||||
final RealmConfig config;
|
||||
|
||||
DownLevelADAuthenticator(Settings settings, TimeValue timeout, Logger logger, GroupsResolver groupsResolver, String domainDN) {
|
||||
super(settings, timeout, logger, groupsResolver, domainDN);
|
||||
DownLevelADAuthenticator(RealmConfig config, TimeValue timeout, Logger logger, GroupsResolver groupsResolver, String domainDN,
|
||||
SSLService sslService) {
|
||||
super(config.settings(), timeout, logger, groupsResolver, domainDN);
|
||||
this.domainDN = domainDN;
|
||||
this.settings = settings;
|
||||
this.settings = config.settings();
|
||||
this.sslService = sslService;
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -271,7 +276,7 @@ class ActiveDirectorySessionFactory extends SessionFactory {
|
|||
// the global catalog does not replicate the necessary information to map a netbios dns name to a DN so we need to instead
|
||||
// connect to the normal ports. This code uses the standard ports to avoid adding even more settings and is probably ok as
|
||||
// most AD users do not use non-standard ports
|
||||
final LDAPConnectionOptions options = connectionOptions(settings);
|
||||
final LDAPConnectionOptions options = connectionOptions(config, sslService, logger);
|
||||
boolean startedSearching = false;
|
||||
LDAPConnection searchConnection = null;
|
||||
try {
|
||||
|
|
|
@ -263,6 +263,7 @@ public final class LdapUtils {
|
|||
// either no referrals to follow or we have explicitly disabled referral following on the connection so we just create
|
||||
// a new search result that has the values we've collected. The search result passed to this method will not have of the
|
||||
// entries as we are using a result listener and the results are not being collected by the LDAP library
|
||||
LOGGER.trace("LDAP Search {} => {} ({})", searchRequest, searchResult, entryList);
|
||||
SearchResult resultWithValues = new SearchResult(searchResult.getMessageID(), searchResult.getResultCode(), searchResult
|
||||
.getDiagnosticMessage(), searchResult.getMatchedDN(), referralUrls, entryList, referenceList, entryList.size(),
|
||||
referenceList.size(), searchResult.getResponseControls());
|
||||
|
@ -270,11 +271,15 @@ public final class LdapUtils {
|
|||
} else if (depth >= ldapConnection.getConnectionOptions().getReferralHopLimit()) {
|
||||
// we've gone through too many levels of referrals so we terminate with the values collected so far and the proper result
|
||||
// code to indicate the search was terminated early
|
||||
LOGGER.trace("Referral limit exceeded {} => {} ({})", searchRequest, searchResult, entryList);
|
||||
SearchResult resultWithValues = new SearchResult(searchResult.getMessageID(), ResultCode.REFERRAL_LIMIT_EXCEEDED,
|
||||
searchResult.getDiagnosticMessage(), searchResult.getMatchedDN(), referralUrls, entryList, referenceList,
|
||||
entryList.size(), referenceList.size(), searchResult.getResponseControls());
|
||||
consumer.accept(requestID, resultWithValues);
|
||||
} else {
|
||||
if (LOGGER.isTraceEnabled()) {
|
||||
LOGGER.trace("LDAP referred elsewhere {} => {}", searchRequest, Arrays.toString(referralUrls));
|
||||
}
|
||||
// there are referrals to follow, so we start the process to follow the referrals
|
||||
final CountDown countDown = new CountDown(referralUrls.length);
|
||||
final List<String> referralUrlsList = new ArrayList<>(Arrays.asList(referralUrls));
|
||||
|
|
|
@ -13,13 +13,16 @@ import com.unboundid.util.ssl.HostNameSSLSocketVerifier;
|
|||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.xpack.security.authc.RealmConfig;
|
||||
import org.elasticsearch.xpack.security.authc.RealmSettings;
|
||||
import org.elasticsearch.xpack.security.authc.support.SecuredString;
|
||||
import org.elasticsearch.xpack.ssl.SSLConfigurationSettings;
|
||||
import org.elasticsearch.xpack.ssl.SSLService;
|
||||
import org.elasticsearch.xpack.ssl.VerificationMode;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import java.util.Arrays;
|
||||
|
@ -73,7 +76,7 @@ public abstract class SessionFactory {
|
|||
this.timeout = searchTimeout;
|
||||
this.sslService = sslService;
|
||||
LDAPServers ldapServers = ldapServers(config.settings());
|
||||
this.serverSet = serverSet(config.settings(), sslService, ldapServers);
|
||||
this.serverSet = serverSet(config, sslService, ldapServers);
|
||||
this.sslUsed = ldapServers.ssl;
|
||||
}
|
||||
|
||||
|
@ -107,13 +110,33 @@ public abstract class SessionFactory {
|
|||
throw new UnsupportedOperationException("unauthenticated sessions are not supported");
|
||||
}
|
||||
|
||||
protected static LDAPConnectionOptions connectionOptions(Settings settings) {
|
||||
protected static LDAPConnectionOptions connectionOptions(RealmConfig config, SSLService sslService, Logger logger) {
|
||||
Settings realmSettings = config.settings();
|
||||
LDAPConnectionOptions options = new LDAPConnectionOptions();
|
||||
options.setConnectTimeoutMillis(Math.toIntExact(settings.getAsTime(TIMEOUT_TCP_CONNECTION_SETTING, TIMEOUT_DEFAULT).millis()));
|
||||
options.setFollowReferrals(settings.getAsBoolean(FOLLOW_REFERRALS_SETTING, true));
|
||||
options.setResponseTimeoutMillis(settings.getAsTime(TIMEOUT_TCP_READ_SETTING, TIMEOUT_DEFAULT).millis());
|
||||
options.setConnectTimeoutMillis(Math.toIntExact(realmSettings.getAsTime(TIMEOUT_TCP_CONNECTION_SETTING, TIMEOUT_DEFAULT).millis()));
|
||||
options.setFollowReferrals(realmSettings.getAsBoolean(FOLLOW_REFERRALS_SETTING, true));
|
||||
options.setResponseTimeoutMillis(realmSettings.getAsTime(TIMEOUT_TCP_READ_SETTING, TIMEOUT_DEFAULT).millis());
|
||||
options.setAllowConcurrentSocketFactoryUse(true);
|
||||
if (settings.getAsBoolean(HOSTNAME_VERIFICATION_SETTING, true)) {
|
||||
SSLConfigurationSettings sslConfigurationSettings = SSLConfigurationSettings.withoutPrefix();
|
||||
final Settings realmSSLSettings = realmSettings.getByPrefix("ssl.");
|
||||
final boolean verificationModeExists = sslConfigurationSettings.verificationMode.exists(realmSSLSettings);
|
||||
final boolean hostnameVerficationExists = realmSettings.get(HOSTNAME_VERIFICATION_SETTING, null) != null;
|
||||
if (verificationModeExists && hostnameVerficationExists) {
|
||||
throw new IllegalArgumentException("[" + HOSTNAME_VERIFICATION_SETTING + "] and [" +
|
||||
sslConfigurationSettings.verificationMode.getKey() + "] may not be used at the same time");
|
||||
} else if (verificationModeExists) {
|
||||
VerificationMode verificationMode = sslService.getVerificationMode(realmSSLSettings, Settings.EMPTY);
|
||||
if (verificationMode == VerificationMode.FULL) {
|
||||
options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true));
|
||||
}
|
||||
} else if (hostnameVerficationExists) {
|
||||
new DeprecationLogger(logger).deprecated("the setting [{}] has been deprecated and will be removed in a future version. use " +
|
||||
"[{}] instead", RealmSettings.getFullSettingKey(config, HOSTNAME_VERIFICATION_SETTING),
|
||||
RealmSettings.getFullSettingKey(config, "ssl." + sslConfigurationSettings.verificationMode.getKey()));
|
||||
if (realmSettings.getAsBoolean(HOSTNAME_VERIFICATION_SETTING, true)) {
|
||||
options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true));
|
||||
}
|
||||
} else {
|
||||
options.setSSLSocketVerifier(new HostNameSSLSocketVerifier(true));
|
||||
}
|
||||
return options;
|
||||
|
@ -132,7 +155,8 @@ public abstract class SessionFactory {
|
|||
return null;
|
||||
}
|
||||
|
||||
private ServerSet serverSet(Settings settings, SSLService clientSSLService, LDAPServers ldapServers) {
|
||||
private ServerSet serverSet(RealmConfig realmConfig, SSLService clientSSLService, LDAPServers ldapServers) {
|
||||
Settings settings = realmConfig.settings();
|
||||
SocketFactory socketFactory = null;
|
||||
if (ldapServers.ssl()) {
|
||||
socketFactory = clientSSLService.sslSocketFactory(settings.getByPrefix("ssl."));
|
||||
|
@ -143,7 +167,7 @@ public abstract class SessionFactory {
|
|||
}
|
||||
}
|
||||
return LdapLoadBalancing.serverSet(ldapServers.addresses(), ldapServers.ports(), settings, socketFactory,
|
||||
connectionOptions(settings));
|
||||
connectionOptions(realmConfig, sslService, logger));
|
||||
}
|
||||
|
||||
// package private to use for testing
|
||||
|
|
|
@ -33,28 +33,30 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.xpack.common.GroupedActionListener;
|
||||
import org.elasticsearch.xpack.security.SecurityTemplateService;
|
||||
import org.elasticsearch.xpack.security.action.user.AuthenticateAction;
|
||||
import org.elasticsearch.xpack.security.action.user.ChangePasswordAction;
|
||||
import org.elasticsearch.xpack.security.action.user.UserRequest;
|
||||
import org.elasticsearch.xpack.security.audit.AuditTrailService;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authc.AuthenticationFailureHandler;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeRealm;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.security.authz.permission.ClusterPermission;
|
||||
import org.elasticsearch.xpack.security.authz.permission.DefaultRole;
|
||||
import org.elasticsearch.xpack.security.authz.permission.GlobalPermission;
|
||||
import org.elasticsearch.xpack.security.authz.permission.FieldPermissionsCache;
|
||||
import org.elasticsearch.xpack.security.authz.permission.Role;
|
||||
import org.elasticsearch.xpack.security.authz.permission.RunAsPermission;
|
||||
import org.elasticsearch.xpack.security.authz.permission.SuperuserRole;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.IndexPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore;
|
||||
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
|
||||
import org.elasticsearch.xpack.security.support.Automatons;
|
||||
import org.elasticsearch.xpack.security.user.AnonymousUser;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
|
@ -72,6 +74,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
public static final String ORIGINATING_ACTION_KEY = "_originating_action_name";
|
||||
|
||||
private static final Predicate<String> MONITOR_INDEX_PREDICATE = IndexPrivilege.MONITOR.predicate();
|
||||
private static final Predicate<String> SAME_USER_PRIVILEGE = Automatons.predicate(ChangePasswordAction.NAME, AuthenticateAction.NAME);
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final CompositeRolesStore rolesStore;
|
||||
|
@ -80,6 +83,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
private final AuthenticationFailureHandler authcFailureHandler;
|
||||
private final ThreadContext threadContext;
|
||||
private final AnonymousUser anonymousUser;
|
||||
private final FieldPermissionsCache fieldPermissionsCache;
|
||||
private final boolean isAnonymousEnabled;
|
||||
private final boolean anonymousAuthzExceptionEnabled;
|
||||
|
||||
|
@ -96,6 +100,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
this.anonymousUser = anonymousUser;
|
||||
this.isAnonymousEnabled = AnonymousUser.isAnonymousEnabled(settings);
|
||||
this.anonymousAuthzExceptionEnabled = ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING.get(settings);
|
||||
this.fieldPermissionsCache = new FieldPermissionsCache(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -108,8 +113,8 @@ public class AuthorizationService extends AbstractComponent {
|
|||
* @param request The request
|
||||
* @throws ElasticsearchSecurityException If the given user is no allowed to execute the given request
|
||||
*/
|
||||
public void authorize(Authentication authentication, String action, TransportRequest request, Collection<Role> userRoles,
|
||||
Collection<Role> runAsRoles) throws ElasticsearchSecurityException {
|
||||
public void authorize(Authentication authentication, String action, TransportRequest request, Role userRole,
|
||||
Role runAsRole) throws ElasticsearchSecurityException {
|
||||
final TransportRequest originalRequest = request;
|
||||
if (request instanceof ConcreteShardRequest) {
|
||||
request = ((ConcreteShardRequest<?>) request).getRequest();
|
||||
|
@ -126,38 +131,20 @@ public class AuthorizationService extends AbstractComponent {
|
|||
}
|
||||
throw denial(authentication, action, request);
|
||||
}
|
||||
Collection<Role> roles = userRoles;
|
||||
// get the roles of the authenticated user, which may be different than the effective
|
||||
GlobalPermission permission = permission(roles);
|
||||
|
||||
final boolean isRunAs = authentication.isRunAs();
|
||||
// permission can be empty as it might be that the user's role is unknown
|
||||
if (permission.isEmpty()) {
|
||||
if (isRunAs) {
|
||||
// the request is a run as request so we should call the specific audit event for a denied run as attempt
|
||||
throw denyRunAs(authentication, action, request);
|
||||
} else {
|
||||
throw denial(authentication, action, request);
|
||||
}
|
||||
}
|
||||
// get the roles of the authenticated user, which may be different than the effective
|
||||
Role permission = userRole;
|
||||
|
||||
// check if the request is a run as request
|
||||
final boolean isRunAs = authentication.isRunAs();
|
||||
if (isRunAs) {
|
||||
// if we are running as a user we looked up then the authentication must contain a lookedUpBy. If it doesn't then this user
|
||||
// doesn't really exist but the authc service allowed it through to avoid leaking users that exist in the system
|
||||
if (authentication.getLookedUpBy() == null) {
|
||||
throw denyRunAs(authentication, action, request);
|
||||
}
|
||||
|
||||
// first we must authorize for the RUN_AS action
|
||||
RunAsPermission runAs = permission.runAs();
|
||||
if (runAs != null && runAs.check(authentication.getRunAsUser().principal())) {
|
||||
} else if (permission.runAs().check(authentication.getRunAsUser().principal())) {
|
||||
grantRunAs(authentication, action, request);
|
||||
roles = runAsRoles;
|
||||
permission = permission(roles);
|
||||
// permission can be empty as it might be that the run as user's role is unknown
|
||||
if (permission.isEmpty()) {
|
||||
throw denial(authentication, action, request);
|
||||
}
|
||||
permission = runAsRole;
|
||||
} else {
|
||||
throw denyRunAs(authentication, action, request);
|
||||
}
|
||||
|
@ -166,8 +153,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
// first, we'll check if the action is a cluster action. If it is, we'll only check it against the cluster permissions
|
||||
if (ClusterPrivilege.ACTION_MATCHER.test(action)) {
|
||||
ClusterPermission cluster = permission.cluster();
|
||||
// we use the effectiveUser for permission checking since we are running as a user!
|
||||
if (cluster != null && cluster.check(action, request, authentication)) {
|
||||
if (cluster.check(action) || checkSameUserPermissions(action, request, authentication)) {
|
||||
setIndicesAccessControl(IndicesAccessControl.ALLOW_ALL);
|
||||
grant(authentication, action, request);
|
||||
return;
|
||||
|
@ -212,12 +198,12 @@ public class AuthorizationService extends AbstractComponent {
|
|||
throw denial(authentication, action, request);
|
||||
}
|
||||
|
||||
if (permission.indices() == null || permission.indices().isEmpty()) {
|
||||
if (permission.indices().check(action) == false) {
|
||||
throw denial(authentication, action, request);
|
||||
}
|
||||
|
||||
MetaData metaData = clusterService.state().metaData();
|
||||
AuthorizedIndices authorizedIndices = new AuthorizedIndices(authentication.getRunAsUser(), roles, action, metaData);
|
||||
AuthorizedIndices authorizedIndices = new AuthorizedIndices(authentication.getRunAsUser(), permission, action, metaData);
|
||||
Set<String> indexNames = resolveIndexNames(authentication, action, request, metaData, authorizedIndices);
|
||||
assert !indexNames.isEmpty() : "every indices request needs to have its indices set thus the resolved indices must not be empty";
|
||||
|
||||
|
@ -229,14 +215,14 @@ public class AuthorizationService extends AbstractComponent {
|
|||
return;
|
||||
}
|
||||
|
||||
IndicesAccessControl indicesAccessControl = permission.authorize(action, indexNames, metaData);
|
||||
IndicesAccessControl indicesAccessControl = permission.authorize(action, indexNames, metaData, fieldPermissionsCache);
|
||||
if (!indicesAccessControl.isGranted()) {
|
||||
throw denial(authentication, action, request);
|
||||
} else if (indicesAccessControl.getIndexPermissions(SecurityTemplateService.SECURITY_INDEX_NAME) != null
|
||||
&& indicesAccessControl.getIndexPermissions(SecurityTemplateService.SECURITY_INDEX_NAME).isGranted()
|
||||
&& XPackUser.is(authentication.getRunAsUser()) == false
|
||||
&& MONITOR_INDEX_PREDICATE.test(action) == false
|
||||
&& Arrays.binarySearch(authentication.getRunAsUser().roles(), SuperuserRole.NAME) < 0) {
|
||||
&& Arrays.binarySearch(authentication.getRunAsUser().roles(), ReservedRolesStore.SUPERUSER_ROLE.name()) < 0) {
|
||||
// only the XPackUser is allowed to work with this index, but we should allow indices monitoring actions through for debugging
|
||||
// purposes. These monitor requests also sometimes resolve indices concretely and then requests them
|
||||
logger.debug("user [{}] attempted to directly perform [{}] against the security index [{}]",
|
||||
|
@ -255,7 +241,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
for (Alias alias : aliases) {
|
||||
aliasesAndIndices.add(alias.name());
|
||||
}
|
||||
indicesAccessControl = permission.authorize("indices:admin/aliases", aliasesAndIndices, metaData);
|
||||
indicesAccessControl = permission.authorize("indices:admin/aliases", aliasesAndIndices, metaData, fieldPermissionsCache);
|
||||
if (!indicesAccessControl.isGranted()) {
|
||||
throw denial(authentication, "indices:admin/aliases", request);
|
||||
}
|
||||
|
@ -290,16 +276,7 @@ public class AuthorizationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
// pkg-private for testing
|
||||
GlobalPermission permission(Collection<Role> roles) {
|
||||
GlobalPermission.Compound.Builder rolesBuilder = GlobalPermission.Compound.builder();
|
||||
for (Role role : roles) {
|
||||
rolesBuilder.add(role);
|
||||
}
|
||||
return rolesBuilder.build();
|
||||
}
|
||||
|
||||
public void roles(User user, ActionListener<Collection<Role>> roleActionListener) {
|
||||
public void roles(User user, ActionListener<Role> roleActionListener) {
|
||||
// we need to special case the internal users in this method, if we apply the anonymous roles to every user including these system
|
||||
// user accounts then we run into the chance of a deadlock because then we need to get a role that we may be trying to get as the
|
||||
// internal user. The SystemUser is special cased as it has special privileges to execute internal actions and should never be
|
||||
|
@ -309,8 +286,8 @@ public class AuthorizationService extends AbstractComponent {
|
|||
" roles");
|
||||
}
|
||||
if (XPackUser.is(user)) {
|
||||
assert XPackUser.INSTANCE.roles().length == 1 && SuperuserRole.NAME.equals(XPackUser.INSTANCE.roles()[0]);
|
||||
roleActionListener.onResponse(Collections.singleton(SuperuserRole.INSTANCE));
|
||||
assert XPackUser.INSTANCE.roles().length == 1 && ReservedRolesStore.SUPERUSER_ROLE.name().equals(XPackUser.INSTANCE.roles()[0]);
|
||||
roleActionListener.onResponse(ReservedRolesStore.SUPERUSER_ROLE);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -323,15 +300,12 @@ public class AuthorizationService extends AbstractComponent {
|
|||
Collections.addAll(roleNames, anonymousUser.roles());
|
||||
}
|
||||
|
||||
final Collection<Role> defaultRoles = Collections.singletonList(DefaultRole.INSTANCE);
|
||||
if (roleNames.isEmpty()) {
|
||||
roleActionListener.onResponse(defaultRoles);
|
||||
roleActionListener.onResponse(Role.EMPTY);
|
||||
} else if (roleNames.contains(ReservedRolesStore.SUPERUSER_ROLE.name())) {
|
||||
roleActionListener.onResponse(ReservedRolesStore.SUPERUSER_ROLE);
|
||||
} else {
|
||||
final GroupedActionListener<Role> listener = new GroupedActionListener<>(roleActionListener, roleNames.size(),
|
||||
defaultRoles);
|
||||
for (String roleName : roleNames) {
|
||||
rolesStore.roles(roleName, listener);
|
||||
}
|
||||
rolesStore.roles(roleNames, fieldPermissionsCache, roleActionListener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -367,6 +341,49 @@ public class AuthorizationService extends AbstractComponent {
|
|||
action.equals(SearchTransportService.CLEAR_SCROLL_CONTEXTS_ACTION_NAME);
|
||||
}
|
||||
|
||||
static boolean checkSameUserPermissions(String action, TransportRequest request, Authentication authentication) {
|
||||
final boolean actionAllowed = SAME_USER_PRIVILEGE.test(action);
|
||||
if (actionAllowed) {
|
||||
if (request instanceof UserRequest == false) {
|
||||
assert false : "right now only a user request should be allowed";
|
||||
return false;
|
||||
}
|
||||
UserRequest userRequest = (UserRequest) request;
|
||||
String[] usernames = userRequest.usernames();
|
||||
if (usernames == null || usernames.length != 1 || usernames[0] == null) {
|
||||
assert false : "this role should only be used for actions to apply to a single user";
|
||||
return false;
|
||||
}
|
||||
final String username = usernames[0];
|
||||
final boolean sameUsername = authentication.getRunAsUser().principal().equals(username);
|
||||
if (sameUsername && ChangePasswordAction.NAME.equals(action)) {
|
||||
return checkChangePasswordAction(authentication);
|
||||
}
|
||||
|
||||
assert AuthenticateAction.NAME.equals(action) || sameUsername == false;
|
||||
return sameUsername;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private static boolean checkChangePasswordAction(Authentication authentication) {
|
||||
// we need to verify that this user was authenticated by or looked up by a realm type that support password changes
|
||||
// otherwise we open ourselves up to issues where a user in a different realm could be created with the same username
|
||||
// and do malicious things
|
||||
final boolean isRunAs = authentication.isRunAs();
|
||||
final String realmType;
|
||||
if (isRunAs) {
|
||||
realmType = authentication.getLookedUpBy().getType();
|
||||
} else {
|
||||
realmType = authentication.getAuthenticatedBy().getType();
|
||||
}
|
||||
|
||||
assert realmType != null;
|
||||
// ensure the user was authenticated by a realm that we can change a password for. The native realm is an internal realm and
|
||||
// right now only one can exist in the realm configuration - if this changes we should update this check
|
||||
return ReservedRealm.TYPE.equals(realmType) || NativeRealm.TYPE.equals(realmType);
|
||||
}
|
||||
|
||||
private ElasticsearchSecurityException denial(Authentication authentication, String action, TransportRequest request) {
|
||||
auditTrail.accessDenied(authentication.getUser(), action, request);
|
||||
return denialException(authentication, action);
|
||||
|
|
|
@ -10,18 +10,15 @@ import org.elasticsearch.common.util.concurrent.CountDown;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authz.permission.Role;
|
||||
import org.elasticsearch.xpack.security.support.AutomatonPredicate;
|
||||
import org.elasticsearch.xpack.security.support.Automatons;
|
||||
import org.elasticsearch.xpack.security.user.SystemUser;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
public final class AuthorizationUtils {
|
||||
|
||||
private static final Predicate<String> INTERNAL_PREDICATE = new AutomatonPredicate(Automatons.patterns("internal:*"));
|
||||
private static final Predicate<String> INTERNAL_PREDICATE = Automatons.predicate("internal:*");
|
||||
|
||||
private AuthorizationUtils() {}
|
||||
|
||||
|
@ -72,14 +69,13 @@ public final class AuthorizationUtils {
|
|||
public static class AsyncAuthorizer {
|
||||
|
||||
private final ActionListener listener;
|
||||
private final BiConsumer<Collection<Role>, Collection<Role>> consumer;
|
||||
private final BiConsumer<Role, Role> consumer;
|
||||
private final Authentication authentication;
|
||||
private volatile Collection<Role> userRoles;
|
||||
private volatile Collection<Role> runAsRoles;
|
||||
private volatile Role userRoles;
|
||||
private volatile Role runAsRoles;
|
||||
private CountDown countDown = new CountDown(2); // we expect only two responses!!
|
||||
|
||||
public AsyncAuthorizer(Authentication authentication, ActionListener listener, BiConsumer<Collection<Role>,
|
||||
Collection<Role>> consumer) {
|
||||
public AsyncAuthorizer(Authentication authentication, ActionListener listener, BiConsumer<Role, Role> consumer) {
|
||||
this.consumer = consumer;
|
||||
this.listener = listener;
|
||||
this.authentication = authentication;
|
||||
|
@ -87,25 +83,25 @@ public final class AuthorizationUtils {
|
|||
|
||||
public void authorize(AuthorizationService service) {
|
||||
if (SystemUser.is(authentication.getUser())) {
|
||||
setUserRoles(Collections.emptyList()); // we can inform the listener immediately - nothing to fetch for us on system user
|
||||
setRunAsRoles(Collections.emptyList());
|
||||
setUserRoles(null); // we can inform the listener immediately - nothing to fetch for us on system user
|
||||
setRunAsRoles(null);
|
||||
} else {
|
||||
service.roles(authentication.getUser(), ActionListener.wrap(this::setUserRoles, listener::onFailure));
|
||||
if (authentication.isRunAs()) {
|
||||
assert authentication.getRunAsUser() != null : "runAs user is null but shouldn't";
|
||||
service.roles(authentication.getRunAsUser(), ActionListener.wrap(this::setRunAsRoles, listener::onFailure));
|
||||
} else {
|
||||
setRunAsRoles(Collections.emptyList());
|
||||
setRunAsRoles(null);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void setUserRoles(Collection<Role> roles) {
|
||||
private void setUserRoles(Role roles) {
|
||||
this.userRoles = roles;
|
||||
maybeRun();
|
||||
}
|
||||
|
||||
private void setRunAsRoles(Collection<Role> roles) {
|
||||
private void setRunAsRoles(Role roles) {
|
||||
this.runAsRoles = roles;
|
||||
maybeRun();
|
||||
}
|
||||
|
|
|
@ -9,13 +9,12 @@ import org.elasticsearch.cluster.metadata.AliasOrIndex;
|
|||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.xpack.security.SecurityTemplateService;
|
||||
import org.elasticsearch.xpack.security.authz.permission.Role;
|
||||
import org.elasticsearch.xpack.security.authz.permission.SuperuserRole;
|
||||
import org.elasticsearch.xpack.security.authz.store.ReservedRolesStore;
|
||||
import org.elasticsearch.xpack.security.user.User;
|
||||
import org.elasticsearch.xpack.security.user.XPackUser;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -29,10 +28,10 @@ class AuthorizedIndices {
|
|||
private final User user;
|
||||
private final String action;
|
||||
private final MetaData metaData;
|
||||
private final Collection<Role> userRoles;
|
||||
private final Role userRoles;
|
||||
private List<String> authorizedIndices;
|
||||
|
||||
AuthorizedIndices(User user, Collection<Role> userRoles, String action, MetaData metaData) {
|
||||
AuthorizedIndices(User user, Role userRoles, String action, MetaData metaData) {
|
||||
this.user = user;
|
||||
this.userRoles = userRoles;
|
||||
this.action = action;
|
||||
|
@ -47,16 +46,7 @@ class AuthorizedIndices {
|
|||
}
|
||||
|
||||
private List<String> load() {
|
||||
if (userRoles.isEmpty()) {
|
||||
return Collections.emptyList();
|
||||
}
|
||||
|
||||
List<Predicate<String>> predicates = new ArrayList<>();
|
||||
for (Role userRole : userRoles) {
|
||||
predicates.add(userRole.indices().allowedIndicesMatcher(action));
|
||||
}
|
||||
|
||||
Predicate<String> predicate = predicates.stream().reduce(s -> false, Predicate::or);
|
||||
Predicate<String> predicate = userRoles.indices().allowedIndicesMatcher(action);
|
||||
|
||||
List<String> indicesAndAliases = new ArrayList<>();
|
||||
// TODO: can this be done smarter? I think there are usually more indices/aliases in the cluster then indices defined a roles?
|
||||
|
@ -67,7 +57,7 @@ class AuthorizedIndices {
|
|||
}
|
||||
}
|
||||
|
||||
if (XPackUser.is(user) == false && Arrays.binarySearch(user.roles(), SuperuserRole.NAME) < 0) {
|
||||
if (XPackUser.is(user) == false && Arrays.binarySearch(user.roles(), ReservedRolesStore.SUPERUSER_ROLE_DESCRIPTOR.getName()) < 0) {
|
||||
// we should filter out the .security index from wildcards
|
||||
indicesAndAliases.remove(SecurityTemplateService.SECURITY_INDEX_NAME);
|
||||
}
|
||||
|
|
|
@ -6,9 +6,9 @@
|
|||
package org.elasticsearch.xpack.security.authz;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
|
@ -18,12 +18,12 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.xpack.common.xcontent.XContentUtils;
|
||||
import org.elasticsearch.xpack.security.authz.permission.FieldPermissions;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
import org.elasticsearch.xpack.security.support.Validation;
|
||||
|
||||
|
@ -38,13 +38,14 @@ import java.util.Map;
|
|||
* A holder for a Role that contains user-readable information about the Role
|
||||
* without containing the actual Role object.
|
||||
*/
|
||||
public class RoleDescriptor implements ToXContent {
|
||||
public class RoleDescriptor implements ToXContentObject {
|
||||
|
||||
private final String name;
|
||||
private final String[] clusterPrivileges;
|
||||
private final IndicesPrivileges[] indicesPrivileges;
|
||||
private final String[] runAs;
|
||||
private final Map<String, Object> metadata;
|
||||
private final Map<String, Object> transientMetadata;
|
||||
|
||||
public RoleDescriptor(String name,
|
||||
@Nullable String[] clusterPrivileges,
|
||||
|
@ -58,12 +59,23 @@ public class RoleDescriptor implements ToXContent {
|
|||
@Nullable IndicesPrivileges[] indicesPrivileges,
|
||||
@Nullable String[] runAs,
|
||||
@Nullable Map<String, Object> metadata) {
|
||||
this(name, clusterPrivileges, indicesPrivileges, runAs, metadata, null);
|
||||
}
|
||||
|
||||
|
||||
public RoleDescriptor(String name,
|
||||
@Nullable String[] clusterPrivileges,
|
||||
@Nullable IndicesPrivileges[] indicesPrivileges,
|
||||
@Nullable String[] runAs,
|
||||
@Nullable Map<String, Object> metadata,
|
||||
@Nullable Map<String, Object> transientMetadata) {
|
||||
this.name = name;
|
||||
this.clusterPrivileges = clusterPrivileges != null ? clusterPrivileges : Strings.EMPTY_ARRAY;
|
||||
this.indicesPrivileges = indicesPrivileges != null ? indicesPrivileges : IndicesPrivileges.NONE;
|
||||
this.runAs = runAs != null ? runAs : Strings.EMPTY_ARRAY;
|
||||
this.metadata = metadata != null ? Collections.unmodifiableMap(metadata) : Collections.emptyMap();
|
||||
this.transientMetadata = transientMetadata != null ? Collections.unmodifiableMap(transientMetadata) :
|
||||
Collections.singletonMap("enabled", true);
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
|
@ -86,6 +98,14 @@ public class RoleDescriptor implements ToXContent {
|
|||
return metadata;
|
||||
}
|
||||
|
||||
public Map<String, Object> getTransientMetadata() {
|
||||
return transientMetadata;
|
||||
}
|
||||
|
||||
public boolean isUsingDocumentOrFieldLevelSecurity() {
|
||||
return Arrays.stream(indicesPrivileges).anyMatch(ip -> ip.isUsingDocumentLevelSecurity() || ip.isUsingFieldLevelSecurity());
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("Role[");
|
||||
|
@ -126,7 +146,12 @@ public class RoleDescriptor implements ToXContent {
|
|||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
return toXContent(builder, params, true);
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeTransient) throws IOException {
|
||||
builder.startObject();
|
||||
builder.array(Fields.CLUSTER.getPreferredName(), clusterPrivileges);
|
||||
builder.array(Fields.INDICES.getPreferredName(), (Object[]) indicesPrivileges);
|
||||
|
@ -134,6 +159,9 @@ public class RoleDescriptor implements ToXContent {
|
|||
builder.array(Fields.RUN_AS.getPreferredName(), runAs);
|
||||
}
|
||||
builder.field(Fields.METADATA.getPreferredName(), metadata);
|
||||
if (includeTransient) {
|
||||
builder.field(Fields.TRANSIENT_METADATA.getPreferredName(), transientMetadata);
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
|
@ -147,7 +175,14 @@ public class RoleDescriptor implements ToXContent {
|
|||
}
|
||||
String[] runAs = in.readStringArray();
|
||||
Map<String, Object> metadata = in.readMap();
|
||||
return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAs, metadata);
|
||||
|
||||
final Map<String, Object> transientMetadata;
|
||||
if (in.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
transientMetadata = in.readMap();
|
||||
} else {
|
||||
transientMetadata = Collections.emptyMap();
|
||||
}
|
||||
return new RoleDescriptor(name, clusterPrivileges, indicesPrivileges, runAs, metadata, transientMetadata);
|
||||
}
|
||||
|
||||
public static void writeTo(RoleDescriptor descriptor, StreamOutput out) throws IOException {
|
||||
|
@ -159,6 +194,9 @@ public class RoleDescriptor implements ToXContent {
|
|||
}
|
||||
out.writeStringArray(descriptor.runAs);
|
||||
out.writeMap(descriptor.metadata);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_2_0_UNRELEASED)) {
|
||||
out.writeMap(descriptor.transientMetadata);
|
||||
}
|
||||
}
|
||||
|
||||
public static RoleDescriptor parse(String name, BytesReference source, boolean allow2xFormat) throws IOException {
|
||||
|
@ -191,18 +229,26 @@ public class RoleDescriptor implements ToXContent {
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.INDICES)) {
|
||||
} else if (Fields.INDICES.match(currentFieldName)) {
|
||||
indicesPrivileges = parseIndices(name, parser, allow2xFormat);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.RUN_AS)) {
|
||||
} else if (Fields.RUN_AS.match(currentFieldName)) {
|
||||
runAsUsers = readStringArray(name, parser, true);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.CLUSTER)) {
|
||||
} else if (Fields.CLUSTER.match(currentFieldName)) {
|
||||
clusterPrivileges = readStringArray(name, parser, true);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.METADATA)) {
|
||||
} else if (Fields.METADATA.match(currentFieldName)) {
|
||||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException(
|
||||
"expected field [{}] to be of type object, but found [{}] instead", currentFieldName, token);
|
||||
}
|
||||
metadata = parser.map();
|
||||
} else if (Fields.TRANSIENT_METADATA.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
// consume object but just drop
|
||||
parser.map();
|
||||
} else {
|
||||
throw new ElasticsearchParseException("expected field [{}] to be an object, but found [{}] instead",
|
||||
currentFieldName, token);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse role [{}]. unexpected field [{}]", name, currentFieldName);
|
||||
}
|
||||
|
@ -241,14 +287,14 @@ public class RoleDescriptor implements ToXContent {
|
|||
}
|
||||
String currentFieldName = null;
|
||||
String[] names = null;
|
||||
String query = null;
|
||||
BytesReference query = null;
|
||||
String[] privileges = null;
|
||||
String[] grantedFields = null;
|
||||
String[] deniedFields = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.NAMES)) {
|
||||
} else if (Fields.NAMES.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.VALUE_STRING) {
|
||||
names = new String[] { parser.text() };
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
|
@ -261,34 +307,35 @@ public class RoleDescriptor implements ToXContent {
|
|||
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] " +
|
||||
"value to be a string or an array of strings, but found [{}] instead", roleName, currentFieldName, token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.QUERY)) {
|
||||
} else if (Fields.QUERY.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder();
|
||||
XContentHelper.copyCurrentStructure(builder.generator(), parser);
|
||||
query = builder.string();
|
||||
query = builder.bytes();
|
||||
} else if (token == XContentParser.Token.VALUE_STRING) {
|
||||
final String text = parser.text();
|
||||
if (text.isEmpty() == false) {
|
||||
query = text;
|
||||
query = new BytesArray(text);
|
||||
}
|
||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
||||
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. expected field [{}] " +
|
||||
"value to be null, a string, or an object, but found [{}] instead", roleName, currentFieldName, token);
|
||||
"value to be null, a string, an array, or an object, but found [{}] instead", roleName, currentFieldName,
|
||||
token);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.FIELD_PERMISSIONS)) {
|
||||
} else if (Fields.FIELD_PERMISSIONS.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
token = parser.nextToken();
|
||||
do {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.GRANT_FIELDS)) {
|
||||
if (Fields.GRANT_FIELDS.match(currentFieldName)) {
|
||||
parser.nextToken();
|
||||
grantedFields = readStringArray(roleName, parser, true);
|
||||
if (grantedFields == null) {
|
||||
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. {} must not " +
|
||||
"be null.", roleName, Fields.GRANT_FIELDS);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.EXCEPT_FIELDS)) {
|
||||
} else if (Fields.EXCEPT_FIELDS.match(currentFieldName)) {
|
||||
parser.nextToken();
|
||||
deniedFields = readStringArray(roleName, parser, true);
|
||||
if (deniedFields == null) {
|
||||
|
@ -317,9 +364,9 @@ public class RoleDescriptor implements ToXContent {
|
|||
" in \"{}\".", roleName, XContentParser.Token.START_OBJECT,
|
||||
XContentParser.Token.START_ARRAY, token, Fields.FIELD_PERMISSIONS);
|
||||
}
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.PRIVILEGES)) {
|
||||
} else if (Fields.PRIVILEGES.match(currentFieldName)) {
|
||||
privileges = readStringArray(roleName, parser, true);
|
||||
} else if (ParseFieldMatcher.STRICT.match(currentFieldName, Fields.FIELD_PERMISSIONS_2X)) {
|
||||
} else if (Fields.FIELD_PERMISSIONS_2X.match(currentFieldName)) {
|
||||
if (allow2xFormat) {
|
||||
grantedFields = readStringArray(roleName, parser, true);
|
||||
} else {
|
||||
|
@ -327,6 +374,15 @@ public class RoleDescriptor implements ToXContent {
|
|||
" permissions in role [{}], use [\"{}\": {\"{}\":[...]," + "\"{}\":[...]}] instead",
|
||||
roleName, Fields.FIELD_PERMISSIONS, Fields.GRANT_FIELDS, Fields.EXCEPT_FIELDS, roleName);
|
||||
}
|
||||
} else if (Fields.TRANSIENT_METADATA.match(currentFieldName)) {
|
||||
if (token == XContentParser.Token.START_OBJECT) {
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
// it is transient metadata, skip it
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse transient metadata for role [{}]. expected {} but got {}" +
|
||||
" in \"{}\".", roleName, XContentParser.Token.START_OBJECT, token, Fields.TRANSIENT_METADATA);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("failed to parse indices privileges for role [{}]. unexpected field [{}]",
|
||||
roleName, currentFieldName);
|
||||
|
@ -347,7 +403,8 @@ public class RoleDescriptor implements ToXContent {
|
|||
return RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(names)
|
||||
.privileges(privileges)
|
||||
.fieldPermissions(new FieldPermissions(grantedFields, deniedFields))
|
||||
.grantedFields(grantedFields)
|
||||
.deniedFields(deniedFields)
|
||||
.query(query)
|
||||
.build();
|
||||
}
|
||||
|
@ -362,7 +419,8 @@ public class RoleDescriptor implements ToXContent {
|
|||
|
||||
private String[] indices;
|
||||
private String[] privileges;
|
||||
private FieldPermissions fieldPermissions = new FieldPermissions();
|
||||
private String[] grantedFields = null;
|
||||
private String[] deniedFields = null;
|
||||
private BytesReference query;
|
||||
|
||||
private IndicesPrivileges() {
|
||||
|
@ -380,8 +438,14 @@ public class RoleDescriptor implements ToXContent {
|
|||
return this.privileges;
|
||||
}
|
||||
|
||||
public FieldPermissions getFieldPermissions() {
|
||||
return fieldPermissions;
|
||||
@Nullable
|
||||
public String[] getGrantedFields() {
|
||||
return this.grantedFields;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String[] getDeniedFields() {
|
||||
return this.deniedFields;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -389,15 +453,57 @@ public class RoleDescriptor implements ToXContent {
|
|||
return this.query;
|
||||
}
|
||||
|
||||
public boolean isUsingDocumentLevelSecurity() {
|
||||
return query != null;
|
||||
}
|
||||
|
||||
public boolean isUsingFieldLevelSecurity() {
|
||||
return hasDeniedFields() || hasGrantedFields();
|
||||
}
|
||||
|
||||
private boolean hasDeniedFields() {
|
||||
return deniedFields != null && deniedFields.length > 0;
|
||||
}
|
||||
|
||||
private boolean hasGrantedFields() {
|
||||
if (grantedFields != null && grantedFields.length >= 0) {
|
||||
// we treat just '*' as no FLS since that's what the UI defaults to
|
||||
if (grantedFields.length == 1 && "*".equals(grantedFields[0])) {
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("IndicesPrivileges[");
|
||||
sb.append("indices=[").append(Strings.arrayToCommaDelimitedString(indices));
|
||||
sb.append("], privileges=[").append(Strings.arrayToCommaDelimitedString(privileges));
|
||||
sb.append("], ");
|
||||
sb.append(fieldPermissions.toString());
|
||||
if (grantedFields != null || deniedFields != null) {
|
||||
sb.append(RoleDescriptor.Fields.FIELD_PERMISSIONS).append("=[");
|
||||
if (grantedFields == null) {
|
||||
sb.append(RoleDescriptor.Fields.GRANT_FIELDS).append("=null");
|
||||
} else {
|
||||
sb.append(RoleDescriptor.Fields.GRANT_FIELDS).append("=[")
|
||||
.append(Strings.arrayToCommaDelimitedString(grantedFields));
|
||||
sb.append("]");
|
||||
}
|
||||
if (deniedFields == null) {
|
||||
sb.append(", ").append(RoleDescriptor.Fields.EXCEPT_FIELDS).append("=null");
|
||||
} else {
|
||||
sb.append(", ").append(RoleDescriptor.Fields.EXCEPT_FIELDS).append("=[")
|
||||
.append(Strings.arrayToCommaDelimitedString(deniedFields));
|
||||
sb.append("]");
|
||||
}
|
||||
sb.append("]");
|
||||
}
|
||||
if (query != null) {
|
||||
sb.append(", query=").append(query.utf8ToString());
|
||||
sb.append(", query=");
|
||||
sb.append(query.utf8ToString());
|
||||
}
|
||||
sb.append("]");
|
||||
return sb.toString();
|
||||
|
@ -412,7 +518,8 @@ public class RoleDescriptor implements ToXContent {
|
|||
|
||||
if (!Arrays.equals(indices, that.indices)) return false;
|
||||
if (!Arrays.equals(privileges, that.privileges)) return false;
|
||||
if (fieldPermissions.equals(that.fieldPermissions) == false) return false;
|
||||
if (!Arrays.equals(grantedFields, that.grantedFields)) return false;
|
||||
if (!Arrays.equals(deniedFields, that.deniedFields)) return false;
|
||||
return !(query != null ? !query.equals(that.query) : that.query != null);
|
||||
}
|
||||
|
||||
|
@ -420,7 +527,8 @@ public class RoleDescriptor implements ToXContent {
|
|||
public int hashCode() {
|
||||
int result = Arrays.hashCode(indices);
|
||||
result = 31 * result + Arrays.hashCode(privileges);
|
||||
result = 31 * result + fieldPermissions.hashCode();
|
||||
result = 31 * result + Arrays.hashCode(grantedFields);
|
||||
result = 31 * result + Arrays.hashCode(deniedFields);
|
||||
result = 31 * result + (query != null ? query.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
|
@ -430,7 +538,16 @@ public class RoleDescriptor implements ToXContent {
|
|||
builder.startObject();
|
||||
builder.array("names", indices);
|
||||
builder.array("privileges", privileges);
|
||||
builder = fieldPermissions.toXContent(builder, params);
|
||||
if (grantedFields != null || deniedFields != null) {
|
||||
builder.startObject(RoleDescriptor.Fields.FIELD_PERMISSIONS.getPreferredName());
|
||||
if (grantedFields != null) {
|
||||
builder.array(RoleDescriptor.Fields.GRANT_FIELDS.getPreferredName(), grantedFields);
|
||||
}
|
||||
if (deniedFields != null) {
|
||||
builder.array(RoleDescriptor.Fields.EXCEPT_FIELDS.getPreferredName(), deniedFields);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
if (query != null) {
|
||||
builder.field("query", query.utf8ToString());
|
||||
}
|
||||
|
@ -446,7 +563,8 @@ public class RoleDescriptor implements ToXContent {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
this.indices = in.readStringArray();
|
||||
this.fieldPermissions = new FieldPermissions(in);
|
||||
this.grantedFields = in.readOptionalStringArray();
|
||||
this.deniedFields = in.readOptionalStringArray();
|
||||
this.privileges = in.readStringArray();
|
||||
this.query = in.readOptionalBytesReference();
|
||||
}
|
||||
|
@ -454,7 +572,8 @@ public class RoleDescriptor implements ToXContent {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeStringArray(indices);
|
||||
fieldPermissions.writeTo(out);
|
||||
out.writeOptionalStringArray(grantedFields);
|
||||
out.writeOptionalStringArray(deniedFields);
|
||||
out.writeStringArray(privileges);
|
||||
out.writeOptionalBytesReference(query);
|
||||
}
|
||||
|
@ -476,8 +595,13 @@ public class RoleDescriptor implements ToXContent {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder fieldPermissions(FieldPermissions fieldPermissions) {
|
||||
indicesPrivileges.fieldPermissions = fieldPermissions;
|
||||
public Builder grantedFields(String... grantedFields) {
|
||||
indicesPrivileges.grantedFields = grantedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder deniedFields(String... deniedFields) {
|
||||
indicesPrivileges.deniedFields = deniedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -486,7 +610,11 @@ public class RoleDescriptor implements ToXContent {
|
|||
}
|
||||
|
||||
public Builder query(@Nullable BytesReference query) {
|
||||
if (query == null) {
|
||||
indicesPrivileges.query = null;
|
||||
} else {
|
||||
indicesPrivileges.query = query;
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -514,5 +642,6 @@ public class RoleDescriptor implements ToXContent {
|
|||
ParseField GRANT_FIELDS = new ParseField("grant");
|
||||
ParseField EXCEPT_FIELDS = new ParseField("except");
|
||||
ParseField METADATA = new ParseField("metadata");
|
||||
ParseField TRANSIENT_METADATA = new ParseField("transient_metadata");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,36 +88,5 @@ public class IndicesAccessControl {
|
|||
public Set<BytesReference> getQueries() {
|
||||
return queries;
|
||||
}
|
||||
|
||||
public IndexAccessControl merge(IndexAccessControl other) {
|
||||
if (other.isGranted() == false) {
|
||||
// nothing to merge
|
||||
return this;
|
||||
}
|
||||
|
||||
final boolean granted = this.granted;
|
||||
if (granted == false) {
|
||||
// we do not support negatives, so if the current isn't granted - just return other
|
||||
assert other.isGranted();
|
||||
return other;
|
||||
}
|
||||
|
||||
FieldPermissions newPermissions = FieldPermissions.merge(this.fieldPermissions, other.fieldPermissions);
|
||||
|
||||
Set<BytesReference> queries = null;
|
||||
if (this.queries != null && other.getQueries() != null) {
|
||||
queries = new HashSet<>();
|
||||
if (this.queries != null) {
|
||||
queries.addAll(this.queries);
|
||||
}
|
||||
if (other.getQueries() != null) {
|
||||
queries.addAll(other.getQueries());
|
||||
}
|
||||
queries = unmodifiableSet(queries);
|
||||
}
|
||||
return new IndexAccessControl(granted, newPermissions, queries);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.action.ActionRequestBuilder;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.FilterClient;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.logging.LoggerMessageFormat;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -303,7 +302,7 @@ public class SecurityIndexSearcherWrapper extends IndexSearcherWrapper {
|
|||
if (token != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("Unexpected token [" + token + "]");
|
||||
}
|
||||
Script script = Script.parse(parser, ParseFieldMatcher.EMPTY);
|
||||
Script script = Script.parse(parser);
|
||||
// Add the user details to the params
|
||||
Map<String, Object> params = new HashMap<>();
|
||||
if (script.getParams() != null) {
|
||||
|
|
|
@ -5,39 +5,21 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A permission that is based on privileges for cluster wide actions
|
||||
*/
|
||||
public interface ClusterPermission extends Permission {
|
||||
public final class ClusterPermission {
|
||||
|
||||
boolean check(String action, TransportRequest request, Authentication authentication);
|
||||
|
||||
class Core implements ClusterPermission {
|
||||
|
||||
public static final Core NONE = new Core(ClusterPrivilege.NONE) {
|
||||
@Override
|
||||
public boolean check(String action, TransportRequest request, Authentication authentication) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
public static final ClusterPermission NONE = new ClusterPermission(ClusterPrivilege.NONE);
|
||||
|
||||
private final ClusterPrivilege privilege;
|
||||
private final Predicate<String> predicate;
|
||||
|
||||
Core(ClusterPrivilege privilege) {
|
||||
ClusterPermission(ClusterPrivilege privilege) {
|
||||
this.privilege = privilege;
|
||||
this.predicate = privilege.predicate();
|
||||
}
|
||||
|
@ -46,52 +28,7 @@ public interface ClusterPermission extends Permission {
|
|||
return privilege;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(String action, TransportRequest request, Authentication authentication) {
|
||||
public boolean check(String action) {
|
||||
return predicate.test(action);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
class Globals implements ClusterPermission {
|
||||
|
||||
private final List<GlobalPermission> globals;
|
||||
|
||||
Globals(List<GlobalPermission> globals) {
|
||||
this.globals = globals;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(String action, TransportRequest request, Authentication authentication) {
|
||||
if (globals == null) {
|
||||
return false;
|
||||
}
|
||||
for (GlobalPermission global : globals) {
|
||||
Objects.requireNonNull(global, "global must not be null");
|
||||
Objects.requireNonNull(global.indices(), "global.indices() must not be null");
|
||||
if (global.cluster().check(action, request, authentication)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
if (globals == null || globals.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
for (GlobalPermission global : globals) {
|
||||
if (!global.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.NativeRealm;
|
||||
import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm;
|
||||
import org.elasticsearch.xpack.security.action.user.AuthenticateAction;
|
||||
import org.elasticsearch.xpack.security.action.user.ChangePasswordAction;
|
||||
import org.elasticsearch.xpack.security.action.user.UserRequest;
|
||||
import org.elasticsearch.xpack.security.authz.permission.RunAsPermission.Core;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
|
||||
/**
|
||||
* A default role that will be applied to all users other than the internal {@link org.elasticsearch.xpack.security.user.SystemUser}. This
|
||||
* role grants access to actions that every user should be able to execute such as the ability to change their password and execute the
|
||||
* authenticate endpoint to get information about themselves
|
||||
*/
|
||||
public class DefaultRole extends Role {
|
||||
|
||||
private static final ClusterPermission.Core CLUSTER_PERMISSION =
|
||||
new SameUserClusterPermission(ClusterPrivilege.get(new Name(ChangePasswordAction.NAME, AuthenticateAction.NAME)));
|
||||
private static final IndicesPermission.Core INDICES_PERMISSION = IndicesPermission.Core.NONE;
|
||||
private static final RunAsPermission.Core RUN_AS_PERMISSION = Core.NONE;
|
||||
|
||||
public static final String NAME = "__default_role";
|
||||
public static final DefaultRole INSTANCE = new DefaultRole();
|
||||
|
||||
private DefaultRole() {
|
||||
super(NAME, CLUSTER_PERMISSION, INDICES_PERMISSION, RUN_AS_PERMISSION);
|
||||
}
|
||||
|
||||
private static class SameUserClusterPermission extends ClusterPermission.Core {
|
||||
|
||||
private SameUserClusterPermission(ClusterPrivilege privilege) {
|
||||
super(privilege);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(String action, TransportRequest request, Authentication authentication) {
|
||||
final boolean actionAllowed = super.check(action, request, authentication);
|
||||
if (actionAllowed) {
|
||||
if (request instanceof UserRequest == false) {
|
||||
assert false : "right now only a user request should be allowed";
|
||||
return false;
|
||||
}
|
||||
UserRequest userRequest = (UserRequest) request;
|
||||
String[] usernames = userRequest.usernames();
|
||||
if (usernames == null || usernames.length != 1 || usernames[0] == null) {
|
||||
assert false : "this role should only be used for actions to apply to a single user";
|
||||
return false;
|
||||
}
|
||||
final String username = usernames[0];
|
||||
final boolean sameUsername = authentication.getRunAsUser().principal().equals(username);
|
||||
if (sameUsername && ChangePasswordAction.NAME.equals(action)) {
|
||||
return checkChangePasswordAction(authentication);
|
||||
}
|
||||
|
||||
assert AuthenticateAction.NAME.equals(action) || sameUsername == false;
|
||||
return sameUsername;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static boolean checkChangePasswordAction(Authentication authentication) {
|
||||
// we need to verify that this user was authenticated by or looked up by a realm type that support password changes
|
||||
// otherwise we open ourselves up to issues where a user in a different realm could be created with the same username
|
||||
// and do malicious things
|
||||
final boolean isRunAs = authentication.isRunAs();
|
||||
final String realmType;
|
||||
if (isRunAs) {
|
||||
realmType = authentication.getLookedUpBy().getType();
|
||||
} else {
|
||||
realmType = authentication.getAuthenticatedBy().getType();
|
||||
}
|
||||
|
||||
assert realmType != null;
|
||||
// ensure the user was authenticated by a realm that we can change a password for. The native realm is an internal realm and right
|
||||
// now only one can exist in the realm configuration - if this changes we should update this check
|
||||
return ReservedRealm.TYPE.equals(realmType) || NativeRealm.TYPE.equals(realmType);
|
||||
}
|
||||
}
|
|
@ -13,8 +13,6 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.mapper.AllFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
|
@ -26,14 +24,10 @@ import java.util.Collection;
|
|||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.apache.lucene.util.automaton.MinimizationOperations.minimize;
|
||||
import static org.apache.lucene.util.automaton.Operations.DEFAULT_MAX_DETERMINIZED_STATES;
|
||||
import static org.apache.lucene.util.automaton.Operations.isTotal;
|
||||
import static org.apache.lucene.util.automaton.Operations.run;
|
||||
import static org.apache.lucene.util.automaton.Operations.sameLanguage;
|
||||
import static org.apache.lucene.util.automaton.Operations.subsetOf;
|
||||
import static org.apache.lucene.util.automaton.Operations.union;
|
||||
import static org.elasticsearch.xpack.security.support.Automatons.minusAndDeterminize;
|
||||
import static org.elasticsearch.xpack.security.support.Automatons.minusAndMinimize;
|
||||
|
||||
/**
|
||||
* Stores patterns to fields which access is granted or denied to and maintains an automaton that can be used to check if permission is
|
||||
|
@ -43,30 +37,42 @@ import static org.elasticsearch.xpack.security.support.Automatons.minusAndDeterm
|
|||
* 1. It has to match the patterns in grantedFieldsArray
|
||||
* 2. it must not match the patterns in deniedFieldsArray
|
||||
*/
|
||||
public class FieldPermissions implements Writeable, ToXContent {
|
||||
public final class FieldPermissions implements Writeable {
|
||||
|
||||
public static final FieldPermissions DEFAULT = new FieldPermissions();
|
||||
|
||||
// the patterns for fields which we allow access to. if gratedFieldsArray is null we assume that all fields are grated access to
|
||||
String[] grantedFieldsArray;
|
||||
private final String[] grantedFieldsArray;
|
||||
// the patterns for fields which we deny access to. if this is an empty list or null we assume that we do not deny access to any
|
||||
// field explicitly
|
||||
String[] deniedFieldsArray;
|
||||
private final String[] deniedFieldsArray;
|
||||
// an automaton that matches all strings that match the patterns in permittedFieldsArray but does not match those that also match a
|
||||
// pattern in deniedFieldsArray. If permittedFieldsAutomaton is null we assume that all fields are granted access to.
|
||||
Automaton permittedFieldsAutomaton;
|
||||
private final Automaton permittedFieldsAutomaton;
|
||||
|
||||
// we cannot easily determine if all fields are allowed and we can therefore also allow access to the _all field hence we deny access
|
||||
// to _all unless this was explicitly configured.
|
||||
boolean allFieldIsAllowed = false;
|
||||
private final boolean allFieldIsAllowed;
|
||||
|
||||
public FieldPermissions() {
|
||||
this(null, null);
|
||||
}
|
||||
|
||||
public FieldPermissions(StreamInput in) throws IOException {
|
||||
this(in.readOptionalStringArray(), in.readOptionalStringArray());
|
||||
}
|
||||
|
||||
public FieldPermissions(@Nullable String[] grantedFieldsArray, @Nullable String[] deniedFieldsArray) {
|
||||
this(grantedFieldsArray, deniedFieldsArray, initializePermittedFieldsAutomaton(grantedFieldsArray, deniedFieldsArray),
|
||||
checkAllFieldIsAllowed(grantedFieldsArray, deniedFieldsArray));
|
||||
}
|
||||
|
||||
FieldPermissions(@Nullable String[] grantedFieldsArray, @Nullable String[] deniedFieldsArray,
|
||||
Automaton permittedFieldsAutomaton, boolean allFieldIsAllowed) {
|
||||
this.grantedFieldsArray = grantedFieldsArray;
|
||||
this.deniedFieldsArray = deniedFieldsArray;
|
||||
permittedFieldsAutomaton = initializePermittedFieldsAutomaton(grantedFieldsArray, deniedFieldsArray);
|
||||
allFieldIsAllowed = checkAllFieldIsAllowed(grantedFieldsArray, deniedFieldsArray);
|
||||
this.permittedFieldsAutomaton = permittedFieldsAutomaton;
|
||||
this.allFieldIsAllowed = allFieldIsAllowed;
|
||||
}
|
||||
|
||||
private static boolean checkAllFieldIsAllowed(String[] grantedFieldsArray, String[] deniedFieldsArray) {
|
||||
|
@ -87,8 +93,7 @@ public class FieldPermissions implements Writeable, ToXContent {
|
|||
return false;
|
||||
}
|
||||
|
||||
private static Automaton initializePermittedFieldsAutomaton(final String[] grantedFieldsArray,
|
||||
final String[] deniedFieldsArray) {
|
||||
private static Automaton initializePermittedFieldsAutomaton(final String[] grantedFieldsArray, final String[] deniedFieldsArray) {
|
||||
Automaton grantedFieldsAutomaton;
|
||||
if (grantedFieldsArray == null || containsWildcard(grantedFieldsArray)) {
|
||||
grantedFieldsAutomaton = Automatons.MATCH_ALL;
|
||||
|
@ -107,7 +112,7 @@ public class FieldPermissions implements Writeable, ToXContent {
|
|||
Arrays.toString(grantedFieldsArray));
|
||||
}
|
||||
|
||||
grantedFieldsAutomaton = minusAndDeterminize(grantedFieldsAutomaton, deniedFieldsAutomaton);
|
||||
grantedFieldsAutomaton = minusAndMinimize(grantedFieldsAutomaton, deniedFieldsAutomaton);
|
||||
return grantedFieldsAutomaton;
|
||||
}
|
||||
|
||||
|
@ -120,26 +125,12 @@ public class FieldPermissions implements Writeable, ToXContent {
|
|||
return false;
|
||||
}
|
||||
|
||||
public FieldPermissions() {
|
||||
this(null, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalStringArray(grantedFieldsArray);
|
||||
out.writeOptionalStringArray(deniedFieldsArray);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
String[] getGrantedFieldsArray() {
|
||||
return grantedFieldsArray;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
String[] getDeniedFieldsArray() {
|
||||
return deniedFieldsArray;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -164,21 +155,6 @@ public class FieldPermissions implements Writeable, ToXContent {
|
|||
return sb.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (grantedFieldsArray != null || deniedFieldsArray != null) {
|
||||
builder.startObject(RoleDescriptor.Fields.FIELD_PERMISSIONS.getPreferredName());
|
||||
if (grantedFieldsArray != null) {
|
||||
builder.array(RoleDescriptor.Fields.GRANT_FIELDS.getPreferredName(), grantedFieldsArray);
|
||||
}
|
||||
if (deniedFieldsArray != null) {
|
||||
builder.array(RoleDescriptor.Fields.EXCEPT_FIELDS.getPreferredName(), deniedFieldsArray);
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if this field permission policy allows access to the field and false if not.
|
||||
* fieldName can be a wildcard.
|
||||
|
@ -187,22 +163,28 @@ public class FieldPermissions implements Writeable, ToXContent {
|
|||
return isTotal(permittedFieldsAutomaton) || run(permittedFieldsAutomaton, fieldName);
|
||||
}
|
||||
|
||||
// Also, if one grants no access to fields and the other grants all access, merging should result in all access...
|
||||
public static FieldPermissions merge(FieldPermissions p1, FieldPermissions p2) {
|
||||
Automaton mergedPermittedFieldsAutomaton;
|
||||
// we only allow the union of the two automatons
|
||||
mergedPermittedFieldsAutomaton = union(p1.permittedFieldsAutomaton, p2.permittedFieldsAutomaton);
|
||||
// need to minimize otherwise isTotal() might return false even if one of the merged ones returned true before
|
||||
mergedPermittedFieldsAutomaton = minimize(mergedPermittedFieldsAutomaton, DEFAULT_MAX_DETERMINIZED_STATES);
|
||||
// if one of them allows access to _all we allow it for the merged too
|
||||
boolean allFieldIsAllowedInMerged = p1.allFieldIsAllowed || p2.allFieldIsAllowed;
|
||||
return new MergedFieldPermissions(mergedPermittedFieldsAutomaton, allFieldIsAllowedInMerged);
|
||||
Automaton getPermittedFieldsAutomaton() {
|
||||
return permittedFieldsAutomaton;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
String[] getGrantedFieldsArray() {
|
||||
return grantedFieldsArray;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
String[] getDeniedFieldsArray() {
|
||||
return deniedFieldsArray;
|
||||
}
|
||||
|
||||
public boolean hasFieldLevelSecurity() {
|
||||
return isTotal(permittedFieldsAutomaton) == false;
|
||||
}
|
||||
|
||||
boolean isAllFieldIsAllowed() {
|
||||
return allFieldIsAllowed;
|
||||
}
|
||||
|
||||
public Set<String> resolveAllowedFields(Set<String> allowedMetaFields, MapperService mapperService) {
|
||||
HashSet<String> finalAllowedFields = new HashSet<>();
|
||||
// we always add the allowed meta fields because we must make sure access is not denied accidentally
|
||||
|
@ -232,59 +214,14 @@ public class FieldPermissions implements Writeable, ToXContent {
|
|||
// Probably incorrect - comparing Object[] arrays with Arrays.equals
|
||||
if (!Arrays.equals(grantedFieldsArray, that.grantedFieldsArray)) return false;
|
||||
// Probably incorrect - comparing Object[] arrays with Arrays.equals
|
||||
if (!Arrays.equals(deniedFieldsArray, that.deniedFieldsArray)) return false;
|
||||
return sameLanguage(permittedFieldsAutomaton, that.permittedFieldsAutomaton);
|
||||
|
||||
return Arrays.equals(deniedFieldsArray, that.deniedFieldsArray);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Arrays.hashCode(grantedFieldsArray);
|
||||
result = 31 * result + Arrays.hashCode(deniedFieldsArray);
|
||||
result = 31 * result + permittedFieldsAutomaton.hashCode();
|
||||
result = 31 * result + (allFieldIsAllowed ? 1 : 0);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* When we merge field permissions we need to union all the allowed fields. We do this by a union of the automatons
|
||||
* that define which fields are granted access too. However, that means that after merging we cannot know anymore
|
||||
* which strings defined the automatons. Hence we make a new class that only has an automaton for the fields that
|
||||
* we grant access to and that throws an exception whenever we try to access the original patterns that lead to
|
||||
* the automaton.
|
||||
*/
|
||||
public static class MergedFieldPermissions extends FieldPermissions {
|
||||
public MergedFieldPermissions(Automaton grantedFields, boolean allFieldIsAllowed) {
|
||||
assert grantedFields != null;
|
||||
this.permittedFieldsAutomaton = grantedFields;
|
||||
this.grantedFieldsArray = null;
|
||||
this.deniedFieldsArray = null;
|
||||
this.allFieldIsAllowed = allFieldIsAllowed;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
throw new UnsupportedOperationException("Cannot build xcontent for merged field permissions");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
throw new UnsupportedOperationException("Cannot build string for merged field permissions");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
throw new UnsupportedOperationException("Cannot stream for merged field permissions");
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String[] getGrantedFieldsArray() {
|
||||
throw new UnsupportedOperationException("Merged field permissions does not maintain sets");
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String[] getDeniedFieldsArray() {
|
||||
throw new UnsupportedOperationException("Merged field permissions does not maintain sets");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.cache.Cache;
|
||||
import org.elasticsearch.common.cache.CacheBuilder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.xpack.security.Security.setting;
|
||||
|
||||
/**
|
||||
* A service for managing the caching of {@link FieldPermissions} as these may often need to be combined or created and internally they
|
||||
* use an {@link org.apache.lucene.util.automaton.Automaton}, which can be costly to create once you account for minimization
|
||||
*/
|
||||
public final class FieldPermissionsCache {
|
||||
|
||||
public static final Setting<Long> CACHE_SIZE_SETTING = Setting.longSetting(
|
||||
setting("authz.store.roles.field_permissions.cache.max_size_in_bytes"), 100 * 1024 * 1024, -1L, Property.NodeScope);
|
||||
private final Cache<Key, FieldPermissions> cache;
|
||||
|
||||
public FieldPermissionsCache(Settings settings) {
|
||||
this.cache = CacheBuilder.<Key, FieldPermissions>builder()
|
||||
.setMaximumWeight(CACHE_SIZE_SETTING.get(settings))
|
||||
// this is not completely accurate but in most cases the automaton should be the most expensive aspect
|
||||
.weigher((key, fieldPermissions) -> fieldPermissions.getPermittedFieldsAutomaton().ramBytesUsed())
|
||||
.build();
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a {@link FieldPermissions} instance that corresponds to the granted and denied parameters. The instance may come from the cache
|
||||
* or if it gets created, the instance will be cached
|
||||
*/
|
||||
FieldPermissions getFieldPermissions(String[] granted, String[] denied) {
|
||||
final Set<String> grantedSet;
|
||||
if (granted != null) {
|
||||
grantedSet = new HashSet<>(granted.length);
|
||||
Collections.addAll(grantedSet, granted);
|
||||
} else {
|
||||
grantedSet = null;
|
||||
}
|
||||
|
||||
final Set<String> deniedSet;
|
||||
if (denied != null) {
|
||||
deniedSet = new HashSet<>(denied.length);
|
||||
Collections.addAll(deniedSet, denied);
|
||||
} else {
|
||||
deniedSet = null;
|
||||
}
|
||||
|
||||
return getFieldPermissions(grantedSet, deniedSet);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a {@link FieldPermissions} instance that corresponds to the granted and denied parameters. The instance may come from the cache
|
||||
* or if it gets created, the instance will be cached
|
||||
*/
|
||||
public FieldPermissions getFieldPermissions(Set<String> granted, Set<String> denied) {
|
||||
Key fpKey = new Key(granted == null ? null : Collections.unmodifiableSet(granted),
|
||||
denied == null ? null : Collections.unmodifiableSet(denied));
|
||||
try {
|
||||
return cache.computeIfAbsent(fpKey,
|
||||
(key) -> new FieldPermissions(key.grantedFields == null ? null : key.grantedFields.toArray(Strings.EMPTY_ARRAY),
|
||||
key.deniedFields == null ? null : key.deniedFields.toArray(Strings.EMPTY_ARRAY)));
|
||||
} catch (ExecutionException e) {
|
||||
throw new ElasticsearchException("unable to compute field permissions", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a field permissions object that corresponds to the merging of the given field permissions and caches the instance if one was
|
||||
* not found in the cache.
|
||||
*/
|
||||
FieldPermissions getFieldPermissions(Collection<FieldPermissions> fieldPermissionsCollection) {
|
||||
Optional<FieldPermissions> allowAllFieldPermissions = fieldPermissionsCollection.stream()
|
||||
.filter((fp) -> Operations.isTotal(fp.getPermittedFieldsAutomaton()))
|
||||
.findFirst();
|
||||
return allowAllFieldPermissions.orElseGet(() -> {
|
||||
final Set<String> allowedFields;
|
||||
Optional<FieldPermissions> nullAllowedFields = fieldPermissionsCollection.stream()
|
||||
.filter((fieldPermissions) -> fieldPermissions.getGrantedFieldsArray() == null)
|
||||
.findFirst();
|
||||
if (nullAllowedFields.isPresent()) {
|
||||
allowedFields = null;
|
||||
} else {
|
||||
allowedFields = fieldPermissionsCollection.stream()
|
||||
.flatMap(fieldPermissions -> Arrays.stream(fieldPermissions.getGrantedFieldsArray()))
|
||||
.collect(Collectors.toSet());
|
||||
}
|
||||
|
||||
final Set<String> deniedFields = fieldPermissionsCollection.stream()
|
||||
.filter(fieldPermissions -> fieldPermissions.getDeniedFieldsArray() != null)
|
||||
.flatMap(fieldPermissions -> Arrays.stream(fieldPermissions.getDeniedFieldsArray()))
|
||||
.collect(Collectors.toSet());
|
||||
try {
|
||||
return cache.computeIfAbsent(new Key(allowedFields, deniedFields),
|
||||
(key) -> {
|
||||
final String[] actualDeniedFields = key.deniedFields == null ? null :
|
||||
computeDeniedFieldsForPermissions(fieldPermissionsCollection, key.deniedFields);
|
||||
return new FieldPermissions(key.grantedFields == null ? null : key.grantedFields.toArray(Strings.EMPTY_ARRAY),
|
||||
actualDeniedFields);
|
||||
});
|
||||
} catch (ExecutionException e) {
|
||||
throw new ElasticsearchException("unable to compute field permissions", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static String[] computeDeniedFieldsForPermissions(Collection<FieldPermissions> fieldPermissionsCollection,
|
||||
Set<String> allDeniedFields) {
|
||||
Set<String> allowedDeniedFields = new HashSet<>();
|
||||
fieldPermissionsCollection
|
||||
.stream()
|
||||
.filter(fieldPermissions -> fieldPermissions.getDeniedFieldsArray() != null)
|
||||
.forEach((fieldPermissions) -> {
|
||||
String[] deniedFieldsForPermission = fieldPermissions.getDeniedFieldsArray();
|
||||
fieldPermissionsCollection.forEach((fp) -> {
|
||||
if (fp != fieldPermissions) {
|
||||
Arrays.stream(deniedFieldsForPermission).forEach((field) -> {
|
||||
if (fp.grantsAccessTo(field)) {
|
||||
allowedDeniedFields.add(field);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
Set<String> difference = Sets.difference(allDeniedFields, allowedDeniedFields);
|
||||
if (difference.isEmpty()) {
|
||||
return null;
|
||||
} else {
|
||||
return difference.toArray(Strings.EMPTY_ARRAY);
|
||||
}
|
||||
}
|
||||
|
||||
private static class Key {
|
||||
|
||||
private final Set<String> grantedFields;
|
||||
private final Set<String> deniedFields;
|
||||
|
||||
Key(Set<String> grantedFields, Set<String> deniedFields) {
|
||||
this.grantedFields = grantedFields == null ? null : Collections.unmodifiableSet(grantedFields);
|
||||
this.deniedFields = deniedFields == null ? null : Collections.unmodifiableSet(deniedFields);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (!(o instanceof Key)) return false;
|
||||
|
||||
Key key = (Key) o;
|
||||
|
||||
if (grantedFields != null ? !grantedFields.equals(key.grantedFields) : key.grantedFields != null) return false;
|
||||
return deniedFields != null ? deniedFields.equals(key.deniedFields) : key.deniedFields == null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = grantedFields != null ? grantedFields.hashCode() : 0;
|
||||
result = 31 * result + (deniedFields != null ? deniedFields.hashCode() : 0);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.IndicesAccessControl;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* A composite permission that combines {@code cluster}, {@code indices} and {@code run_as} permissions
|
||||
*/
|
||||
public class GlobalPermission implements Permission {
|
||||
|
||||
public static final GlobalPermission NONE = new GlobalPermission(ClusterPermission.Core.NONE, IndicesPermission.Core.NONE,
|
||||
RunAsPermission.Core.NONE);
|
||||
|
||||
private final ClusterPermission cluster;
|
||||
private final IndicesPermission indices;
|
||||
private final RunAsPermission runAs;
|
||||
|
||||
GlobalPermission(ClusterPermission cluster, IndicesPermission indices, RunAsPermission runAs) {
|
||||
this.cluster = cluster;
|
||||
this.indices = indices;
|
||||
this.runAs = runAs;
|
||||
}
|
||||
|
||||
public ClusterPermission cluster() {
|
||||
return cluster;
|
||||
}
|
||||
|
||||
public IndicesPermission indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
public RunAsPermission runAs() {
|
||||
return runAs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return (cluster == null || cluster.isEmpty()) && (indices == null || indices.isEmpty()) && (runAs == null || runAs.isEmpty());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether at least one group encapsulated by this indices permissions is authorized to execute the
|
||||
* specified action with the requested indices/aliases. At the same time if field and/or document level security
|
||||
* is configured for any group also the allowed fields and role queries are resolved.
|
||||
*/
|
||||
public IndicesAccessControl authorize(String action, Set<String> requestedIndicesOrAliases, MetaData metaData) {
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> indexPermissions = indices.authorize(
|
||||
action, requestedIndicesOrAliases, metaData
|
||||
);
|
||||
|
||||
// At least one role / indices permission set need to match with all the requested indices/aliases:
|
||||
boolean granted = true;
|
||||
for (Map.Entry<String, IndicesAccessControl.IndexAccessControl> entry : indexPermissions.entrySet()) {
|
||||
if (!entry.getValue().isGranted()) {
|
||||
granted = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return new IndicesAccessControl(granted, indexPermissions);
|
||||
}
|
||||
|
||||
public static class Compound extends GlobalPermission {
|
||||
|
||||
Compound(List<GlobalPermission> globals) {
|
||||
super(new ClusterPermission.Globals(globals), new IndicesPermission.Globals(globals), new RunAsPermission.Globals(globals));
|
||||
}
|
||||
|
||||
public static Compound.Builder builder() {
|
||||
return new Compound.Builder();
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private List<GlobalPermission> globals = new ArrayList<>();
|
||||
|
||||
private Builder() {
|
||||
}
|
||||
|
||||
public Compound.Builder add(GlobalPermission global) {
|
||||
globals.add(global);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Compound build() {
|
||||
return new Compound(Collections.unmodifiableList(globals));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -12,12 +12,10 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.IndexPrivilege;
|
||||
import org.elasticsearch.xpack.security.support.AutomatonPredicate;
|
||||
import org.elasticsearch.xpack.security.support.Automatons;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
|
@ -30,7 +28,6 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
||||
|
@ -38,34 +35,9 @@ import static java.util.Collections.unmodifiableSet;
|
|||
* A permission that is based on privileges for index related actions executed
|
||||
* on specific indices
|
||||
*/
|
||||
public interface IndicesPermission extends Permission, Iterable<IndicesPermission.Group> {
|
||||
public final class IndicesPermission implements Iterable<IndicesPermission.Group> {
|
||||
|
||||
/**
|
||||
* Authorizes the provided action against the provided indices, given the current cluster metadata
|
||||
*/
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> authorize(String action, Set<String> requestedIndicesOrAliases, MetaData metaData);
|
||||
|
||||
/**
|
||||
* Checks if the permission matches the provided action, without looking at indices.
|
||||
* To be used in very specific cases where indices actions need to be authorized regardless of their indices.
|
||||
* The usecase for this is composite actions that are initially only authorized based on the action name (indices are not
|
||||
* checked on the coordinating node), and properly authorized later at the shard level checking their indices as well.
|
||||
*/
|
||||
boolean check(String action);
|
||||
|
||||
class Core implements IndicesPermission {
|
||||
|
||||
public static final Core NONE = new Core() {
|
||||
@Override
|
||||
public Iterator<Group> iterator() {
|
||||
return Collections.emptyIterator();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
public static final IndicesPermission NONE = new IndicesPermission();
|
||||
|
||||
private final Function<String, Predicate<String>> loadingFunction;
|
||||
|
||||
|
@ -73,11 +45,7 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
|
||||
private final Group[] groups;
|
||||
|
||||
public Core(List<Group> groups) {
|
||||
this(groups.toArray(new Group[groups.size()]));
|
||||
}
|
||||
|
||||
public Core(Group... groups) {
|
||||
public IndicesPermission(Group... groups) {
|
||||
this.groups = groups;
|
||||
loadingFunction = (action) -> {
|
||||
List<String> indices = new ArrayList<>();
|
||||
|
@ -86,7 +54,7 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
indices.addAll(Arrays.asList(group.indices));
|
||||
}
|
||||
}
|
||||
return new AutomatonPredicate(Automatons.patterns(Collections.unmodifiableList(indices)));
|
||||
return Automatons.predicate(indices);
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -99,11 +67,6 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
return groups;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return groups == null || groups.length == 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return A predicate that will match all the indices that this permission
|
||||
* has the privilege for executing the given action on.
|
||||
|
@ -112,7 +75,12 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
return allowedIndicesMatchersForAction.computeIfAbsent(action, loadingFunction);
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Checks if the permission matches the provided action, without looking at indices.
|
||||
* To be used in very specific cases where indices actions need to be authorized regardless of their indices.
|
||||
* The usecase for this is composite actions that are initially only authorized based on the action name (indices are not
|
||||
* checked on the coordinating node), and properly authorized later at the shard level checking their indices as well.
|
||||
*/
|
||||
public boolean check(String action) {
|
||||
for (Group group : groups) {
|
||||
if (group.check(action)) {
|
||||
|
@ -122,9 +90,11 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
/**
|
||||
* Authorizes the provided action against the provided indices, given the current cluster metadata
|
||||
*/
|
||||
public Map<String, IndicesAccessControl.IndexAccessControl> authorize(String action, Set<String> requestedIndicesOrAliases,
|
||||
MetaData metaData) {
|
||||
MetaData metaData, FieldPermissionsCache fieldPermissionsCache) {
|
||||
// now... every index that is associated with the request, must be granted
|
||||
// by at least one indices permission group
|
||||
|
||||
|
@ -147,17 +117,11 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
if (group.check(action, indexOrAlias)) {
|
||||
granted = true;
|
||||
for (String index : concreteIndices) {
|
||||
if (fieldPermissionsByIndex.get(index) == null) {
|
||||
fieldPermissionsByIndex.put(index, new HashSet<>());
|
||||
}
|
||||
fieldPermissionsByIndex.get(index).add(group.getFieldPermissions());
|
||||
Set<FieldPermissions> fieldPermissions = fieldPermissionsByIndex.computeIfAbsent(index, (k) -> new HashSet<>());
|
||||
fieldPermissions.add(group.getFieldPermissions());
|
||||
if (group.hasQuery()) {
|
||||
Set<BytesReference> roleQueries = roleQueriesByIndex.get(index);
|
||||
if (roleQueries == null) {
|
||||
roleQueries = new HashSet<>();
|
||||
roleQueriesByIndex.put(index, roleQueries);
|
||||
}
|
||||
roleQueries.add(group.getQuery());
|
||||
Set<BytesReference> roleQueries = roleQueriesByIndex.computeIfAbsent(index, (k) -> new HashSet<>());
|
||||
roleQueries.addAll(group.getQuery());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -180,148 +144,20 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
roleQueries = unmodifiableSet(roleQueries);
|
||||
}
|
||||
|
||||
FieldPermissions fieldPermissions = new FieldPermissions();
|
||||
Set<FieldPermissions> indexFieldPermissions = fieldPermissionsByIndex.get(index);
|
||||
if (indexFieldPermissions != null) {
|
||||
// get the first field permission entry because we do not want the merge to overwrite granted fields with null
|
||||
fieldPermissions = indexFieldPermissions.iterator().next();
|
||||
for (FieldPermissions fp : indexFieldPermissions) {
|
||||
fieldPermissions = FieldPermissions.merge(fieldPermissions, fp);
|
||||
}
|
||||
final FieldPermissions fieldPermissions;
|
||||
final Set<FieldPermissions> indexFieldPermissions = fieldPermissionsByIndex.get(index);
|
||||
if (indexFieldPermissions != null && indexFieldPermissions.isEmpty() == false) {
|
||||
fieldPermissions = indexFieldPermissions.size() == 1 ? indexFieldPermissions.iterator().next() :
|
||||
fieldPermissionsCache.getFieldPermissions(indexFieldPermissions);
|
||||
} else {
|
||||
fieldPermissions = FieldPermissions.DEFAULT;
|
||||
}
|
||||
indexPermissions.put(index, new IndicesAccessControl.IndexAccessControl(entry.getValue(), fieldPermissions, roleQueries));
|
||||
}
|
||||
return unmodifiableMap(indexPermissions);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class Globals implements IndicesPermission {
|
||||
|
||||
private final List<GlobalPermission> globals;
|
||||
|
||||
public Globals(List<GlobalPermission> globals) {
|
||||
this.globals = globals;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<Group> iterator() {
|
||||
return globals == null || globals.isEmpty() ?
|
||||
Collections.<Group>emptyIterator() :
|
||||
new Globals.Iter(globals);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
if (globals == null || globals.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
for (GlobalPermission global : globals) {
|
||||
if (!global.indices().isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(String action) {
|
||||
if (globals == null) {
|
||||
return false;
|
||||
}
|
||||
for (GlobalPermission global : globals) {
|
||||
Objects.requireNonNull(global, "global must not be null");
|
||||
Objects.requireNonNull(global.indices(), "global.indices() must not be null");
|
||||
if (global.indices().check(action)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, IndicesAccessControl.IndexAccessControl> authorize(String action, Set<String> requestedIndicesOrAliases,
|
||||
MetaData metaData) {
|
||||
if (isEmpty()) {
|
||||
return emptyMap();
|
||||
}
|
||||
|
||||
// What this code does is just merge `IndexAccessControl` instances from the permissions this class holds:
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> indicesAccessControl = null;
|
||||
for (GlobalPermission permission : globals) {
|
||||
Map<String, IndicesAccessControl.IndexAccessControl> temp = permission.indices().authorize(action,
|
||||
requestedIndicesOrAliases, metaData);
|
||||
if (indicesAccessControl == null) {
|
||||
indicesAccessControl = new HashMap<>(temp);
|
||||
} else {
|
||||
for (Map.Entry<String, IndicesAccessControl.IndexAccessControl> entry : temp.entrySet()) {
|
||||
IndicesAccessControl.IndexAccessControl existing = indicesAccessControl.get(entry.getKey());
|
||||
if (existing != null) {
|
||||
indicesAccessControl.put(entry.getKey(), existing.merge(entry.getValue()));
|
||||
} else {
|
||||
indicesAccessControl.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (indicesAccessControl == null) {
|
||||
return emptyMap();
|
||||
} else {
|
||||
return unmodifiableMap(indicesAccessControl);
|
||||
}
|
||||
}
|
||||
|
||||
static class Iter implements Iterator<Group> {
|
||||
|
||||
private final Iterator<GlobalPermission> globals;
|
||||
private Iterator<Group> current;
|
||||
|
||||
Iter(List<GlobalPermission> globals) {
|
||||
this.globals = globals.iterator();
|
||||
advance();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return current != null && current.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Group next() {
|
||||
Group group = current.next();
|
||||
advance();
|
||||
return group;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove() {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
private void advance() {
|
||||
if (current != null && current.hasNext()) {
|
||||
return;
|
||||
}
|
||||
if (!globals.hasNext()) {
|
||||
// we've reached the end of the globals array
|
||||
current = null;
|
||||
return;
|
||||
}
|
||||
|
||||
while (globals.hasNext()) {
|
||||
IndicesPermission indices = globals.next().indices();
|
||||
if (!indices.isEmpty()) {
|
||||
current = indices.iterator();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
current = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class Group {
|
||||
public static class Group {
|
||||
private final IndexPrivilege privilege;
|
||||
private final Predicate<String> actionMatcher;
|
||||
private final String[] indices;
|
||||
|
@ -332,14 +168,14 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
}
|
||||
|
||||
private final FieldPermissions fieldPermissions;
|
||||
private final BytesReference query;
|
||||
private final Set<BytesReference> query;
|
||||
|
||||
public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable BytesReference query, String... indices) {
|
||||
public Group(IndexPrivilege privilege, FieldPermissions fieldPermissions, @Nullable Set<BytesReference> query, String... indices) {
|
||||
assert indices.length != 0;
|
||||
this.privilege = privilege;
|
||||
this.actionMatcher = privilege.predicate();
|
||||
this.indices = indices;
|
||||
this.indexNameMatcher = new AutomatonPredicate(Automatons.patterns(indices));
|
||||
this.indexNameMatcher = Automatons.predicate(indices);
|
||||
this.fieldPermissions = Objects.requireNonNull(fieldPermissions);
|
||||
this.query = query;
|
||||
}
|
||||
|
@ -353,7 +189,7 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
}
|
||||
|
||||
@Nullable
|
||||
public BytesReference getQuery() {
|
||||
public Set<BytesReference> getQuery() {
|
||||
return query;
|
||||
}
|
||||
|
||||
|
@ -366,7 +202,7 @@ public interface IndicesPermission extends Permission, Iterable<IndicesPermissio
|
|||
return check(action) && indexNameMatcher.test(index);
|
||||
}
|
||||
|
||||
public boolean hasQuery() {
|
||||
boolean hasQuery() {
|
||||
return query != null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
* Role for users that should be allowed to use the Add Data/Ingest features in the UI
|
||||
*/
|
||||
public class IngestAdminRole extends Role {
|
||||
|
||||
private static final String[] CLUSTER_PRIVILEGES = new String[] { "manage_index_templates", "manage_pipeline" };
|
||||
private static final RoleDescriptor.IndicesPrivileges[] INDICES_PRIVILEGES = new RoleDescriptor.IndicesPrivileges[0];
|
||||
|
||||
public static final String NAME = "ingest_admin";
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, CLUSTER_PRIVILEGES, INDICES_PRIVILEGES, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final IngestAdminRole INSTANCE = new IngestAdminRole();
|
||||
|
||||
private IngestAdminRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
new ClusterPermission.Core(ClusterPrivilege.get(new Name(DESCRIPTOR.getClusterPrivileges()))),
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.monitoring.action.MonitoringBulkAction;
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
public class KibanaRole extends Role {
|
||||
|
||||
private static final String[] CLUSTER_PRIVILEGES = new String[] { "monitor", MonitoringBulkAction.NAME};
|
||||
private static final RoleDescriptor.IndicesPrivileges[] INDICES_PRIVILEGES = new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*", ".reporting-*").privileges("all").build() };
|
||||
|
||||
public static final String NAME = "kibana";
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, CLUSTER_PRIVILEGES, INDICES_PRIVILEGES, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final KibanaRole INSTANCE = new KibanaRole();
|
||||
|
||||
private KibanaRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
new ClusterPermission.Core(ClusterPrivilege.get(new Name(DESCRIPTOR.getClusterPrivileges()))),
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
public class KibanaUserRole extends Role {
|
||||
|
||||
private static final String[] CLUSTER_PRIVILEGES = new String[] { "monitor" };
|
||||
private static final RoleDescriptor.IndicesPrivileges[] INDICES_PRIVILEGES = new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("manage", "read", "index", "delete").build() };
|
||||
|
||||
public static final String NAME = "kibana_user";
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, CLUSTER_PRIVILEGES, INDICES_PRIVILEGES, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final KibanaUserRole INSTANCE = new KibanaUserRole();
|
||||
|
||||
private KibanaUserRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
new ClusterPermission.Core(ClusterPrivilege.get(new Name(DESCRIPTOR.getClusterPrivileges()))),
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.permission.ClusterPermission.Core;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
* A built-in role that grants users the necessary privileges to use Monitoring. The user will also need the {@link KibanaUserRole}
|
||||
*/
|
||||
public class MonitoringUserRole extends Role {
|
||||
|
||||
private static final RoleDescriptor.IndicesPrivileges[] INDICES_PRIVILEGES = new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(".marvel-es-*", ".monitoring-*")
|
||||
.privileges("read")
|
||||
.build() };
|
||||
|
||||
public static final String NAME = "monitoring_user";
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, null, INDICES_PRIVILEGES, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final MonitoringUserRole INSTANCE = new MonitoringUserRole();
|
||||
|
||||
private MonitoringUserRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
Core.NONE,
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
/**
|
||||
* Represents a permission in the system.
|
||||
*/
|
||||
public interface Permission {
|
||||
|
||||
boolean isEmpty();
|
||||
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
* Built-in role that grants the necessary privileges for a remote monitoring agent.
|
||||
*/
|
||||
public class RemoteMonitoringAgentRole extends Role {
|
||||
|
||||
private static final String[] CLUSTER_PRIVILEGES = new String[] { "manage_index_templates", "manage_ingest_pipelines", "monitor" };
|
||||
private static final RoleDescriptor.IndicesPrivileges[] INDICES_PRIVILEGES = new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(".marvel-es-*", ".monitoring-*")
|
||||
.privileges("all")
|
||||
.build() };
|
||||
|
||||
public static final String NAME = "remote_monitoring_agent";
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, CLUSTER_PRIVILEGES, INDICES_PRIVILEGES, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final RemoteMonitoringAgentRole INSTANCE = new RemoteMonitoringAgentRole();
|
||||
|
||||
private RemoteMonitoringAgentRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
new ClusterPermission.Core(ClusterPrivilege.get(new Name(DESCRIPTOR.getClusterPrivileges()))),
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.permission.ClusterPermission.Core;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
* A role for users of the reporting features in xpack
|
||||
*/
|
||||
public class ReportingUserRole extends Role {
|
||||
private static final RoleDescriptor.IndicesPrivileges[] INDICES_PRIVILEGES = new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder()
|
||||
.indices(".reporting-*")
|
||||
.privileges("read", "write")
|
||||
.build()
|
||||
};
|
||||
|
||||
public static final String NAME = "reporting_user";
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, null, INDICES_PRIVILEGES, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final ReportingUserRole INSTANCE = new ReportingUserRole();
|
||||
|
||||
private ReportingUserRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
Core.NONE,
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -5,110 +5,155 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.IndicesAccessControl;
|
||||
import org.elasticsearch.xpack.security.authz.accesscontrol.IndicesAccessControl.IndexAccessControl;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.GeneralPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.IndexPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
public class Role extends GlobalPermission {
|
||||
public final class Role {
|
||||
|
||||
public static final Role EMPTY = Role.builder("__empty").build();
|
||||
|
||||
private final String name;
|
||||
private final ClusterPermission cluster;
|
||||
private final IndicesPermission indices;
|
||||
private final RunAsPermission runAs;
|
||||
|
||||
Role(String name, ClusterPermission.Core cluster, IndicesPermission.Core indices, RunAsPermission.Core runAs) {
|
||||
super(cluster, indices, runAs);
|
||||
Role(String name, ClusterPermission cluster, IndicesPermission indices, RunAsPermission runAs) {
|
||||
this.name = name;
|
||||
this.cluster = Objects.requireNonNull(cluster);
|
||||
this.indices = Objects.requireNonNull(indices);
|
||||
this.runAs = Objects.requireNonNull(runAs);
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterPermission.Core cluster() {
|
||||
return (ClusterPermission.Core) super.cluster();
|
||||
public ClusterPermission cluster() {
|
||||
return cluster;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesPermission.Core indices() {
|
||||
return (IndicesPermission.Core) super.indices();
|
||||
public IndicesPermission indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RunAsPermission.Core runAs() {
|
||||
return (RunAsPermission.Core) super.runAs();
|
||||
public RunAsPermission runAs() {
|
||||
return runAs;
|
||||
}
|
||||
|
||||
public static Builder builder(String name) {
|
||||
return new Builder(name);
|
||||
return new Builder(name, null);
|
||||
}
|
||||
|
||||
public static Builder builder(RoleDescriptor rd) {
|
||||
return new Builder(rd);
|
||||
public static Builder builder(String name, FieldPermissionsCache fieldPermissionsCache) {
|
||||
return new Builder(name, fieldPermissionsCache);
|
||||
}
|
||||
|
||||
public static Builder builder(RoleDescriptor rd, FieldPermissionsCache fieldPermissionsCache) {
|
||||
return new Builder(rd, fieldPermissionsCache);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether at least one group encapsulated by this indices permissions is authorized to execute the
|
||||
* specified action with the requested indices/aliases. At the same time if field and/or document level security
|
||||
* is configured for any group also the allowed fields and role queries are resolved.
|
||||
*/
|
||||
public IndicesAccessControl authorize(String action, Set<String> requestedIndicesOrAliases, MetaData metaData,
|
||||
FieldPermissionsCache fieldPermissionsCache) {
|
||||
Map<String, IndexAccessControl> indexPermissions = indices.authorize(
|
||||
action, requestedIndicesOrAliases, metaData, fieldPermissionsCache
|
||||
);
|
||||
|
||||
// At least one role / indices permission set need to match with all the requested indices/aliases:
|
||||
boolean granted = true;
|
||||
for (Map.Entry<String, IndicesAccessControl.IndexAccessControl> entry : indexPermissions.entrySet()) {
|
||||
if (!entry.getValue().isGranted()) {
|
||||
granted = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return new IndicesAccessControl(granted, indexPermissions);
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
||||
private final String name;
|
||||
private ClusterPermission.Core cluster = ClusterPermission.Core.NONE;
|
||||
private RunAsPermission.Core runAs = RunAsPermission.Core.NONE;
|
||||
private ClusterPermission cluster = ClusterPermission.NONE;
|
||||
private RunAsPermission runAs = RunAsPermission.NONE;
|
||||
private List<IndicesPermission.Group> groups = new ArrayList<>();
|
||||
private FieldPermissionsCache fieldPermissionsCache = null;
|
||||
|
||||
private Builder(String name) {
|
||||
private Builder(String name, FieldPermissionsCache fieldPermissionsCache) {
|
||||
this.name = name;
|
||||
this.fieldPermissionsCache = fieldPermissionsCache;
|
||||
}
|
||||
|
||||
private Builder(RoleDescriptor rd) {
|
||||
private Builder(RoleDescriptor rd, @Nullable FieldPermissionsCache fieldPermissionsCache) {
|
||||
this.name = rd.getName();
|
||||
this.fieldPermissionsCache = fieldPermissionsCache;
|
||||
if (rd.getClusterPrivileges().length == 0) {
|
||||
cluster = ClusterPermission.Core.NONE;
|
||||
cluster = ClusterPermission.NONE;
|
||||
} else {
|
||||
this.cluster(ClusterPrivilege.get((new Privilege.Name(rd.getClusterPrivileges()))));
|
||||
this.cluster(ClusterPrivilege.get(Sets.newHashSet(rd.getClusterPrivileges())));
|
||||
}
|
||||
groups.addAll(convertFromIndicesPrivileges(rd.getIndicesPrivileges()));
|
||||
groups.addAll(convertFromIndicesPrivileges(rd.getIndicesPrivileges(), fieldPermissionsCache));
|
||||
String[] rdRunAs = rd.getRunAs();
|
||||
if (rdRunAs != null && rdRunAs.length > 0) {
|
||||
this.runAs(new GeneralPrivilege(new Privilege.Name(rdRunAs), rdRunAs));
|
||||
this.runAs(new Privilege(Sets.newHashSet(rdRunAs), rdRunAs));
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME we should throw an exception if we have already set cluster or runAs...
|
||||
public Builder cluster(ClusterPrivilege privilege) {
|
||||
cluster = new ClusterPermission.Core(privilege);
|
||||
cluster = new ClusterPermission(privilege);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder runAs(GeneralPrivilege privilege) {
|
||||
runAs = new RunAsPermission.Core(privilege);
|
||||
public Builder runAs(Privilege privilege) {
|
||||
runAs = new RunAsPermission(privilege);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder add(IndexPrivilege privilege, String... indices) {
|
||||
groups.add(new IndicesPermission.Group(privilege, new FieldPermissions(), null, indices));
|
||||
groups.add(new IndicesPermission.Group(privilege, FieldPermissions.DEFAULT, null, indices));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder add(FieldPermissions fieldPermissions, BytesReference query, IndexPrivilege privilege, String... indices) {
|
||||
public Builder add(FieldPermissions fieldPermissions, Set<BytesReference> query, IndexPrivilege privilege, String... indices) {
|
||||
groups.add(new IndicesPermission.Group(privilege, fieldPermissions, query, indices));
|
||||
return this;
|
||||
}
|
||||
|
||||
public Role build() {
|
||||
IndicesPermission.Core indices = groups.isEmpty() ? IndicesPermission.Core.NONE :
|
||||
new IndicesPermission.Core(groups.toArray(new IndicesPermission.Group[groups.size()]));
|
||||
IndicesPermission indices = groups.isEmpty() ? IndicesPermission.NONE :
|
||||
new IndicesPermission(groups.toArray(new IndicesPermission.Group[groups.size()]));
|
||||
return new Role(name, cluster, indices, runAs);
|
||||
}
|
||||
|
||||
static List<IndicesPermission.Group> convertFromIndicesPrivileges(RoleDescriptor.IndicesPrivileges[] indicesPrivileges) {
|
||||
static List<IndicesPermission.Group> convertFromIndicesPrivileges(RoleDescriptor.IndicesPrivileges[] indicesPrivileges,
|
||||
@Nullable FieldPermissionsCache fieldPermissionsCache) {
|
||||
List<IndicesPermission.Group> list = new ArrayList<>(indicesPrivileges.length);
|
||||
for (RoleDescriptor.IndicesPrivileges privilege : indicesPrivileges) {
|
||||
list.add(new IndicesPermission.Group(IndexPrivilege.get(new Privilege.Name(privilege.getPrivileges())),
|
||||
privilege.getFieldPermissions(),
|
||||
privilege.getQuery(),
|
||||
final FieldPermissions fieldPermissions = fieldPermissionsCache != null ?
|
||||
fieldPermissionsCache.getFieldPermissions(privilege.getGrantedFields(), privilege.getDeniedFields()) :
|
||||
new FieldPermissions(privilege.getGrantedFields(), privilege.getDeniedFields());
|
||||
final Set<BytesReference> query = privilege.getQuery() == null ? null : Collections.singleton(privilege.getQuery());
|
||||
list.add(new IndicesPermission.Group(IndexPrivilege.get(Sets.newHashSet(privilege.getPrivileges())),
|
||||
fieldPermissions,
|
||||
query,
|
||||
privilege.getIndices()));
|
||||
|
||||
}
|
||||
|
|
|
@ -5,76 +5,28 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.privilege.GeneralPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* A permissions that is based on a general privilege that contains patterns of users that this
|
||||
* user can execute a request as
|
||||
*/
|
||||
public interface RunAsPermission extends Permission {
|
||||
public final class RunAsPermission {
|
||||
|
||||
public static final RunAsPermission NONE = new RunAsPermission(Privilege.NONE);
|
||||
|
||||
private final Predicate<String> predicate;
|
||||
|
||||
RunAsPermission(Privilege privilege) {
|
||||
this.predicate = privilege.predicate();
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if this permission grants run as to the specified user
|
||||
*/
|
||||
boolean check(String username);
|
||||
|
||||
class Core implements RunAsPermission {
|
||||
|
||||
public static final Core NONE = new Core(GeneralPrivilege.NONE);
|
||||
|
||||
private final GeneralPrivilege privilege;
|
||||
private final Predicate<String> predicate;
|
||||
|
||||
public Core(GeneralPrivilege privilege) {
|
||||
this.privilege = privilege;
|
||||
this.predicate = privilege.predicate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(String username) {
|
||||
return predicate.test(username);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
return this == NONE;
|
||||
}
|
||||
}
|
||||
|
||||
class Globals implements RunAsPermission {
|
||||
private final List<GlobalPermission> globals;
|
||||
|
||||
public Globals(List<GlobalPermission> globals) {
|
||||
this.globals = globals;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean check(String username) {
|
||||
if (globals == null) {
|
||||
return false;
|
||||
}
|
||||
for (GlobalPermission global : globals) {
|
||||
if (global.runAs().check(username)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEmpty() {
|
||||
if (globals == null || globals.isEmpty()) {
|
||||
return true;
|
||||
}
|
||||
for (GlobalPermission global : globals) {
|
||||
if (!global.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.GeneralPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
public class SuperuserRole extends Role {
|
||||
|
||||
public static final String NAME = "superuser";
|
||||
public static final RoleDescriptor DESCRIPTOR = new RoleDescriptor(NAME, new String[] { "all" },
|
||||
new RoleDescriptor.IndicesPrivileges[] {
|
||||
RoleDescriptor.IndicesPrivileges.builder().indices("*").privileges("all").build()},
|
||||
new String[] { "*" },
|
||||
MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final SuperuserRole INSTANCE = new SuperuserRole();
|
||||
|
||||
private SuperuserRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
new ClusterPermission.Core(ClusterPrivilege.get(new Name(DESCRIPTOR.getClusterPrivileges()))),
|
||||
new IndicesPermission.Core(Role.Builder.convertFromIndicesPrivileges(DESCRIPTOR.getIndicesPrivileges())),
|
||||
new RunAsPermission.Core(GeneralPrivilege.ALL));
|
||||
}
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.permission;
|
||||
|
||||
import org.elasticsearch.xpack.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.ClusterPrivilege;
|
||||
import org.elasticsearch.xpack.security.authz.privilege.Privilege.Name;
|
||||
import org.elasticsearch.xpack.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
* Reserved role for the transport client
|
||||
*/
|
||||
public class TransportClientRole extends Role {
|
||||
|
||||
public static final String NAME = "transport_client";
|
||||
private static final String[] CLUSTER_PRIVILEGES = new String[] { "transport_client" };
|
||||
|
||||
public static final RoleDescriptor DESCRIPTOR =
|
||||
new RoleDescriptor(NAME, CLUSTER_PRIVILEGES, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA);
|
||||
public static final TransportClientRole INSTANCE = new TransportClientRole();
|
||||
|
||||
private TransportClientRole() {
|
||||
super(DESCRIPTOR.getName(),
|
||||
new ClusterPermission.Core(ClusterPrivilege.get(new Name(DESCRIPTOR.getClusterPrivileges()))),
|
||||
IndicesPermission.Core.NONE, RunAsPermission.Core.NONE);
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.security.authz.privilege;
|
||||
|
||||
import org.apache.lucene.util.automaton.Automaton;
|
||||
import org.elasticsearch.xpack.security.support.AutomatonPredicate;
|
||||
import org.elasticsearch.xpack.security.support.Automatons;
|
||||
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.apache.lucene.util.automaton.Operations.subsetOf;
|
||||
import static org.elasticsearch.xpack.security.support.Automatons.patterns;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
abstract class AbstractAutomatonPrivilege<P extends AbstractAutomatonPrivilege<P>> extends Privilege<P> {
|
||||
|
||||
protected final Automaton automaton;
|
||||
|
||||
AbstractAutomatonPrivilege(String name, String... patterns) {
|
||||
super(new Name(name));
|
||||
this.automaton = patterns(patterns);
|
||||
}
|
||||
|
||||
AbstractAutomatonPrivilege(Name name, String... patterns) {
|
||||
super(name);
|
||||
this.automaton = patterns(patterns);
|
||||
}
|
||||
|
||||
AbstractAutomatonPrivilege(Name name, Automaton automaton) {
|
||||
super(name);
|
||||
this.automaton = automaton;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Predicate<String> predicate() {
|
||||
return new AutomatonPredicate(automaton);
|
||||
}
|
||||
|
||||
protected P plus(P other) {
|
||||
if (other.implies((P) this)) {
|
||||
return other;
|
||||
}
|
||||
if (this.implies(other)) {
|
||||
return (P) this;
|
||||
}
|
||||
return create(name.add(other.name), Automatons.unionAndDeterminize(automaton, other.automaton));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean implies(P other) {
|
||||
return subsetOf(other.automaton, automaton);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return name.toString();
|
||||
}
|
||||
|
||||
protected abstract P create(Name name, Automaton automaton);
|
||||
|
||||
protected abstract P none();
|
||||
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue