Merge branch 'master' into index-lifecycle
client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClien t.java /Users/colings86/dev/work/git/elasticsearch/.git/worktrees/elasticsearch -ilm/MERGE_HEAD client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLe velClient.java client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClien t.java client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifec ycleIT.java client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLe velClientTests.java client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT. java client/rest-high-level/src/test/java/org/elasticsearch/client/documentat ion/LicensingDocumentationIT.java client/rest-high-level/src/test/java/org/elasticsearch/client/documentat ion/WatcherDocumentationIT.java modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler. java modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionR ef.java modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessE xplainError.java modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptCla ssInfo.java modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Enh ancedPainlessLexer.java modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/Pa inlessLookup.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExp licit.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFun ctionRef.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EIns tanceof.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELam bda.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELis tInit.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMap Init.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENew Array.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENew Obj.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ESta tic.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCal lInvoke.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PFie ld.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSub ListShortcut.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSub MapShortcut.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCat ch.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDec laration.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEac h.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFun ction.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSub EachIterable.java modules/lang-painless/src/test/java/org/elasticsearch/painless/OverloadT ests.java modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessD ocGenerator.java modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTest s.java modules/lang-painless/src/test/java/org/elasticsearch/painless/node/Node ToStringTests.java rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_bas ic.yml server/src/main/java/org/elasticsearch/action/admin/cluster/stats/Cluste rStatsResponse.java server/src/main/java/org/elasticsearch/action/admin/cluster/stats/Transp ortClusterStatsAction.java server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAct ion.java server/src/main/java/org/elasticsearch/action/support/replication/Transp ortReplicationAction.java server/src/main/java/org/elasticsearch/index/engine/Engine.java server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.ja va server/src/main/java/org/elasticsearch/index/shard/IndexShard.java server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPe rmits.java server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer. java server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java server/src/main/java/org/elasticsearch/index/translog/Translog.java server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHa ndler.java server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.j ava server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkAct ionTests.java server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTe sts.java server/src/test/java/org/elasticsearch/action/support/replication/Transp ortReplicationActionTests.java server/src/test/java/org/elasticsearch/action/support/replication/Transp ortWriteActionTests.java server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardS tateIT.java server/src/test/java/org/elasticsearch/index/replication/IndexLevelRepli cationTests.java server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringR eplicationTests.java server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTes ts.java server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPe rmitsTests.java server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java server/src/test/java/org/elasticsearch/index/shard/IndexingOperationList enerTests.java server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerT ests.java server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHa ndlerTests.java server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.ja va server/src/test/java/org/elasticsearch/search/profile/query/QueryProfile rIT.java test/framework/src/main/java/org/elasticsearch/index/replication/ESIndex LevelReplicationTestCase.java test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTes tCase.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job /MetricConfig.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job /RollupJobConfig.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/S ecurityContext.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/U serSettings.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/AuthenticateResponse.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/ChangePasswordRequestBuilder.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/GetUsersResponse.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/PutUserRequestBuilder.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthc/Authentication.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthc/AuthenticationResult.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthc/Realm.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthz/RoleDescriptor.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthz/accesscontrol/SecurityIndexSearcherWrapper.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/s upport/MetadataUtils.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/AnonymousUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/BeatsSystemUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/ElasticUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/InternalUserSerializationHelper.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/KibanaUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/LogstashSystemUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/SystemUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/XPackSecurityUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/XPackUser.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/Con figTestHelpers.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job /MetricsConfigSerializingTests.java -> x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job /MetricConfigSerializingTests.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/a uthz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitorin g/collector/cluster/ClusterStatsMonitoringDocTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/Rollup JobIdentifierUtilTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/Rollup RequestTranslationTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action /SearchActionTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config /ConfigTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/In dexerUtilsTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/Ro llupIndexerIndexingTests.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/saml/TransportSamlLogoutAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/user/TransportAuthenticateAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/user/TransportGetUsersAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/user/TransportHasPrivilegesAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/AuditTrail.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/AuditTrailService.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrail.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/logfile/LoggingAuditTrail.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/AuthenticationService.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/NativeRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/NativeUsersStore.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/ReservedRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/UserAndPassword.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/file/FileRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/file/FileUserPasswdStore.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/ldap/LdapRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/pki/PkiRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/saml/SamlRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/support/CachingUsernamePasswordRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thz/AuthorizationService.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thz/AuthorizedIndices.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/in gest/SetSecurityUserProcessor.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/re st/action/RestAuthenticateAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/re st/action/user/RestChangePasswordAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/re st/action/user/RestGetUsersAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tr ansport/ServerTransportFilter.java x-pack/plugin/security/src/test/java/org/elasticsearch/integration/Clear RealmsCacheTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/Se curityContextTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/filter/SecurityActionFilterTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/interceptor/IndicesAliasesRequestInterceptorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/interceptor/ResizeRequestInterceptorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/saml/TransportSamlInvalidateSessionActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/saml/TransportSamlLogoutActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportAuthenticateActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportChangePasswordActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportDeleteUserActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportGetUsersActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportHasPrivilegesActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportPutUserActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportSetEnabledActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/AuditTrailServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrailMutedTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrailTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/logfile/LoggingAuditTrailFilterTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/logfile/LoggingAuditTrailTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/AuthenticationServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/RealmsTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/TokenServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/UserTokenTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/esnative/NativeRealmIntegTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/esnative/NativeUsersStoreTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/esnative/ReservedRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/file/FileRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/file/FileUserPasswdStoreTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmAuthenticateFailedTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmCacheTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmTestCase.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/ldap/ActiveDirectoryRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/ldap/CancellableLdapRunnableTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/ldap/LdapRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/pki/PkiRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/support/CachingUsernamePasswordRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/support/mapper/NativeRoleMappingStoreTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/AuthorizationServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/AuthorizationUtilsTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/AuthorizedIndicesTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/IndicesAndAliasesResolverTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/SecuritySearchOperationListenerTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/in gest/SetSecurityUserProcessorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/tr ansport/SecurityServerTransportInterceptorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/tr ansport/ServerTransportFilterTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/us er/AnonymousUserTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/us er/UserTests.java -> x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/us er/UserSerializationTests.java x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/f unction/scalar/string/StringFunctionUtils.java x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/exec ution/ExecutionServiceTests.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DefaultDetectorDescription.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectionRule.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/Detector.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectorFunction.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/FilterRef.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/MlFilter.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/Operator.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleAction.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleCondition.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleScope.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/packag e-info.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/User.java -> x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/ User.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectionRuleTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectorTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/FilterRefTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/MlFilterTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleConditionTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleScopeTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/ UserTests.java x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/Watch erRestartIT.java x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cl uster/60_watcher.yml x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_clus ter/60_watcher.yml x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded _cluster/60_watcher.yml x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch /example/realm/CustomRealm.java x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch /example/realm/CustomRealmTests.java x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/s ecurity/MigrateToolIT.java
This commit is contained in:
commit
20915a9baf
|
@ -205,6 +205,9 @@ public class RestHighLevelClient implements Closeable {
|
||||||
private final SnapshotClient snapshotClient = new SnapshotClient(this);
|
private final SnapshotClient snapshotClient = new SnapshotClient(this);
|
||||||
private final TasksClient tasksClient = new TasksClient(this);
|
private final TasksClient tasksClient = new TasksClient(this);
|
||||||
private final XPackClient xPackClient = new XPackClient(this);
|
private final XPackClient xPackClient = new XPackClient(this);
|
||||||
|
private final WatcherClient watcherClient = new WatcherClient(this);
|
||||||
|
private final LicenseClient licenseClient = new LicenseClient(this);
|
||||||
|
private final IndexLifecycleClient ilmClient = new IndexLifecycleClient(this);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
|
* Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
|
||||||
|
@ -296,18 +299,47 @@ public class RestHighLevelClient implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
* Provides methods for accessing the Elastic Licensed X-Pack Info
|
||||||
* accessing the Elastic Licensed X-Pack APIs that are shipped with the
|
* and Usage APIs that are shipped with the default distribution of
|
||||||
* default distribution of Elasticsearch. All of these APIs will 404 if run
|
* Elasticsearch. All of these APIs will 404 if run against the OSS
|
||||||
* against the OSS distribution of Elasticsearch.
|
* distribution of Elasticsearch.
|
||||||
* <p>
|
* <p>
|
||||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-api.html">
|
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html">
|
||||||
* X-Pack APIs on elastic.co</a> for more information.
|
* Info APIs on elastic.co</a> for more information.
|
||||||
*/
|
*/
|
||||||
public final XPackClient xpack() {
|
public final XPackClient xpack() {
|
||||||
return xPackClient;
|
return xPackClient;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides methods for accessing the Elastic Licensed Watcher APIs that
|
||||||
|
* are shipped with the default distribution of Elasticsearch. All of
|
||||||
|
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
|
||||||
|
* <p>
|
||||||
|
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api.html">
|
||||||
|
* Watcher APIs on elastic.co</a> for more information.
|
||||||
|
*/
|
||||||
|
public WatcherClient watcher() { return watcherClient; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides methods for accessing the Elastic Licensed Licensing APIs that
|
||||||
|
* are shipped with the default distribution of Elasticsearch. All of
|
||||||
|
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
|
||||||
|
* <p>
|
||||||
|
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/licensing-apis.html">
|
||||||
|
* Licensing APIs on elastic.co</a> for more information.
|
||||||
|
*/
|
||||||
|
public LicenseClient license() { return licenseClient; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
||||||
|
* accessing the Elastic Index Lifecycle APIs.
|
||||||
|
* <p>
|
||||||
|
* See the <a href="http://FILL-ME-IN-WE-HAVE-NO-DOCS-YET.com"> X-Pack APIs
|
||||||
|
* on elastic.co</a> for more information.
|
||||||
|
*/
|
||||||
|
public IndexLifecycleClient indexLifecycle() { return ilmClient; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Executes a bulk request using the Bulk API.
|
* Executes a bulk request using the Bulk API.
|
||||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||||
|
|
|
@ -41,19 +41,9 @@ import static java.util.Collections.emptySet;
|
||||||
public final class XPackClient {
|
public final class XPackClient {
|
||||||
|
|
||||||
private final RestHighLevelClient restHighLevelClient;
|
private final RestHighLevelClient restHighLevelClient;
|
||||||
private final WatcherClient watcherClient;
|
|
||||||
private final LicenseClient licenseClient;
|
|
||||||
private final IndexLifecycleClient indexLifecycleClient;
|
|
||||||
|
|
||||||
XPackClient(RestHighLevelClient restHighLevelClient) {
|
XPackClient(RestHighLevelClient restHighLevelClient) {
|
||||||
this.restHighLevelClient = restHighLevelClient;
|
this.restHighLevelClient = restHighLevelClient;
|
||||||
this.watcherClient = new WatcherClient(restHighLevelClient);
|
|
||||||
this.licenseClient = new LicenseClient(restHighLevelClient);
|
|
||||||
this.indexLifecycleClient = new IndexLifecycleClient(restHighLevelClient);
|
|
||||||
}
|
|
||||||
|
|
||||||
public WatcherClient watcher() {
|
|
||||||
return watcherClient;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -104,26 +94,4 @@ public final class XPackClient {
|
||||||
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xpackUsage, options,
|
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xpackUsage, options,
|
||||||
XPackUsageResponse::fromXContent, listener, emptySet());
|
XPackUsageResponse::fromXContent, listener, emptySet());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
|
||||||
* accessing the Elastic Licensing APIs.
|
|
||||||
* <p>
|
|
||||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/licensing-apis.html">
|
|
||||||
* X-Pack APIs on elastic.co</a> for more information.
|
|
||||||
*/
|
|
||||||
public LicenseClient license() {
|
|
||||||
return licenseClient;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
|
||||||
* accessing the Elastic Index Lifecycle APIs.
|
|
||||||
* <p>
|
|
||||||
* See the <a href="http://FILL-ME-IN-WE-HAVE-NO-DOCS-YET.com">
|
|
||||||
* X-Pack APIs on elastic.co</a> for more information.
|
|
||||||
*/
|
|
||||||
public IndexLifecycleClient indexLifecycle() {
|
|
||||||
return this.indexLifecycleClient;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,8 +90,8 @@ public class IndexLifecycleIT extends ESRestHighLevelClientTestCase {
|
||||||
createIndex("foo", Settings.builder().put("index.lifecycle.name", "bar").build());
|
createIndex("foo", Settings.builder().put("index.lifecycle.name", "bar").build());
|
||||||
createIndex("baz", Settings.builder().put("index.lifecycle.name", "eggplant").build());
|
createIndex("baz", Settings.builder().put("index.lifecycle.name", "eggplant").build());
|
||||||
SetIndexLifecyclePolicyRequest req = new SetIndexLifecyclePolicyRequest(policy, "foo", "baz");
|
SetIndexLifecyclePolicyRequest req = new SetIndexLifecyclePolicyRequest(policy, "foo", "baz");
|
||||||
SetIndexLifecyclePolicyResponse response = execute(req, highLevelClient().xpack().indexLifecycle()::setIndexLifecyclePolicy,
|
SetIndexLifecyclePolicyResponse response = execute(req, highLevelClient().indexLifecycle()::setIndexLifecyclePolicy,
|
||||||
highLevelClient().xpack().indexLifecycle()::setIndexLifecyclePolicyAsync);
|
highLevelClient().indexLifecycle()::setIndexLifecyclePolicyAsync);
|
||||||
assertThat(response.hasFailures(), is(false));
|
assertThat(response.hasFailures(), is(false));
|
||||||
assertThat(response.getFailedIndexes().isEmpty(), is(true));
|
assertThat(response.getFailedIndexes().isEmpty(), is(true));
|
||||||
|
|
||||||
|
|
|
@ -755,7 +755,9 @@ public class RestHighLevelClientTests extends ESTestCase {
|
||||||
method.isAnnotationPresent(Deprecated.class));
|
method.isAnnotationPresent(Deprecated.class));
|
||||||
} else {
|
} else {
|
||||||
//TODO xpack api are currently ignored, we need to load xpack yaml spec too
|
//TODO xpack api are currently ignored, we need to load xpack yaml spec too
|
||||||
if (apiName.startsWith("xpack.") == false) {
|
if (apiName.startsWith("xpack.") == false &&
|
||||||
|
apiName.startsWith("license.") == false &&
|
||||||
|
apiName.startsWith("watcher.") == false) {
|
||||||
apiNotFound.add(apiName);
|
apiNotFound.add(apiName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
||||||
"}";
|
"}";
|
||||||
BytesReference bytesReference = new BytesArray(json);
|
BytesReference bytesReference = new BytesArray(json);
|
||||||
PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, bytesReference, XContentType.JSON);
|
PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, bytesReference, XContentType.JSON);
|
||||||
return highLevelClient().xpack().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
return highLevelClient().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testDeleteWatch() throws Exception {
|
public void testDeleteWatch() throws Exception {
|
||||||
|
@ -54,7 +54,7 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
||||||
{
|
{
|
||||||
String watchId = randomAlphaOfLength(10);
|
String watchId = randomAlphaOfLength(10);
|
||||||
createWatch(watchId);
|
createWatch(watchId);
|
||||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().xpack().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
DeleteWatchResponse deleteWatchResponse = highLevelClient().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||||
RequestOptions.DEFAULT);
|
RequestOptions.DEFAULT);
|
||||||
assertThat(deleteWatchResponse.getId(), is(watchId));
|
assertThat(deleteWatchResponse.getId(), is(watchId));
|
||||||
assertThat(deleteWatchResponse.getVersion(), is(2L));
|
assertThat(deleteWatchResponse.getVersion(), is(2L));
|
||||||
|
@ -64,7 +64,7 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
||||||
// delete watch that does not exist
|
// delete watch that does not exist
|
||||||
{
|
{
|
||||||
String watchId = randomAlphaOfLength(10);
|
String watchId = randomAlphaOfLength(10);
|
||||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().xpack().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
DeleteWatchResponse deleteWatchResponse = highLevelClient().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||||
RequestOptions.DEFAULT);
|
RequestOptions.DEFAULT);
|
||||||
assertThat(deleteWatchResponse.getId(), is(watchId));
|
assertThat(deleteWatchResponse.getId(), is(watchId));
|
||||||
assertThat(deleteWatchResponse.getVersion(), is(1L));
|
assertThat(deleteWatchResponse.getVersion(), is(1L));
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
request.setLicenseDefinition(license); // <1>
|
request.setLicenseDefinition(license); // <1>
|
||||||
request.setAcknowledge(false); // <2>
|
request.setAcknowledge(false); // <2>
|
||||||
|
|
||||||
PutLicenseResponse response = client.xpack().license().putLicense(request, RequestOptions.DEFAULT);
|
PutLicenseResponse response = client.license().putLicense(request, RequestOptions.DEFAULT);
|
||||||
//end::put-license-execute
|
//end::put-license-execute
|
||||||
|
|
||||||
//tag::put-license-response
|
//tag::put-license-response
|
||||||
|
@ -98,7 +98,7 @@ public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
listener = new LatchedActionListener<>(listener, latch);
|
listener = new LatchedActionListener<>(listener, latch);
|
||||||
|
|
||||||
// tag::put-license-execute-async
|
// tag::put-license-execute-async
|
||||||
client.xpack().license().putLicenseAsync(
|
client.license().putLicenseAsync(
|
||||||
request, RequestOptions.DEFAULT, listener); // <1>
|
request, RequestOptions.DEFAULT, listener); // <1>
|
||||||
// end::put-license-execute-async
|
// end::put-license-execute-async
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
"}");
|
"}");
|
||||||
PutWatchRequest request = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
PutWatchRequest request = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||||
request.setActive(false); // <1>
|
request.setActive(false); // <1>
|
||||||
PutWatchResponse response = client.xpack().watcher().putWatch(request, RequestOptions.DEFAULT);
|
PutWatchResponse response = client.watcher().putWatch(request, RequestOptions.DEFAULT);
|
||||||
//end::x-pack-put-watch-execute
|
//end::x-pack-put-watch-execute
|
||||||
|
|
||||||
//tag::x-pack-put-watch-response
|
//tag::x-pack-put-watch-response
|
||||||
|
@ -85,7 +85,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
listener = new LatchedActionListener<>(listener, latch);
|
listener = new LatchedActionListener<>(listener, latch);
|
||||||
|
|
||||||
// tag::x-pack-put-watch-execute-async
|
// tag::x-pack-put-watch-execute-async
|
||||||
client.xpack().watcher().putWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
client.watcher().putWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||||
// end::x-pack-put-watch-execute-async
|
// end::x-pack-put-watch-execute-async
|
||||||
|
|
||||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||||
|
@ -94,7 +94,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
{
|
{
|
||||||
//tag::x-pack-delete-watch-execute
|
//tag::x-pack-delete-watch-execute
|
||||||
DeleteWatchRequest request = new DeleteWatchRequest("my_watch_id");
|
DeleteWatchRequest request = new DeleteWatchRequest("my_watch_id");
|
||||||
DeleteWatchResponse response = client.xpack().watcher().deleteWatch(request, RequestOptions.DEFAULT);
|
DeleteWatchResponse response = client.watcher().deleteWatch(request, RequestOptions.DEFAULT);
|
||||||
//end::x-pack-delete-watch-execute
|
//end::x-pack-delete-watch-execute
|
||||||
|
|
||||||
//tag::x-pack-delete-watch-response
|
//tag::x-pack-delete-watch-response
|
||||||
|
@ -125,7 +125,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||||
listener = new LatchedActionListener<>(listener, latch);
|
listener = new LatchedActionListener<>(listener, latch);
|
||||||
|
|
||||||
// tag::x-pack-delete-watch-execute-async
|
// tag::x-pack-delete-watch-execute-async
|
||||||
client.xpack().watcher().deleteWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
client.watcher().deleteWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||||
// end::x-pack-delete-watch-execute-async
|
// end::x-pack-delete-watch-execute-async
|
||||||
|
|
||||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||||
|
|
|
@ -22,6 +22,7 @@ Will return, for example:
|
||||||
"successful" : 1,
|
"successful" : 1,
|
||||||
"failed" : 0
|
"failed" : 0
|
||||||
},
|
},
|
||||||
|
"cluster_uuid": "YjAvIhsCQ9CbjWZb2qJw3Q",
|
||||||
"cluster_name": "elasticsearch",
|
"cluster_name": "elasticsearch",
|
||||||
"timestamp": 1459427693515,
|
"timestamp": 1459427693515,
|
||||||
"status": "green",
|
"status": "green",
|
||||||
|
|
|
@ -96,7 +96,7 @@ final class Compiler {
|
||||||
if (statefulFactoryClass != null && statefulFactoryClass.getName().equals(name)) {
|
if (statefulFactoryClass != null && statefulFactoryClass.getName().equals(name)) {
|
||||||
return statefulFactoryClass;
|
return statefulFactoryClass;
|
||||||
}
|
}
|
||||||
Class<?> found = painlessLookup.getClassFromBinaryName(name);
|
Class<?> found = painlessLookup.canonicalTypeNameToType(name.replace('$', '.'));
|
||||||
|
|
||||||
return found != null ? found : super.findClass(name);
|
return found != null ? found : super.findClass(name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -187,7 +187,7 @@ public final class Def {
|
||||||
String key = PainlessLookupUtility.buildPainlessMethodKey(name, arity);
|
String key = PainlessLookupUtility.buildPainlessMethodKey(name, arity);
|
||||||
// check whitelist for matching method
|
// check whitelist for matching method
|
||||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(clazz);
|
PainlessClass struct = painlessLookup.lookupPainlessClass(clazz);
|
||||||
|
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
PainlessMethod method = struct.methods.get(key);
|
PainlessMethod method = struct.methods.get(key);
|
||||||
|
@ -197,7 +197,7 @@ public final class Def {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (Class<?> iface : clazz.getInterfaces()) {
|
for (Class<?> iface : clazz.getInterfaces()) {
|
||||||
struct = painlessLookup.getPainlessStructFromJavaClass(iface);
|
struct = painlessLookup.lookupPainlessClass(iface);
|
||||||
|
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
PainlessMethod method = struct.methods.get(key);
|
PainlessMethod method = struct.methods.get(key);
|
||||||
|
@ -326,8 +326,8 @@ public final class Def {
|
||||||
*/
|
*/
|
||||||
static MethodHandle lookupReference(PainlessLookup painlessLookup, MethodHandles.Lookup methodHandlesLookup, String interfaceClass,
|
static MethodHandle lookupReference(PainlessLookup painlessLookup, MethodHandles.Lookup methodHandlesLookup, String interfaceClass,
|
||||||
Class<?> receiverClass, String name) throws Throwable {
|
Class<?> receiverClass, String name) throws Throwable {
|
||||||
Class<?> interfaceType = painlessLookup.getJavaClassFromPainlessType(interfaceClass);
|
Class<?> interfaceType = painlessLookup.canonicalTypeNameToType(interfaceClass);
|
||||||
PainlessMethod interfaceMethod = painlessLookup.getPainlessStructFromJavaClass(interfaceType).functionalMethod;
|
PainlessMethod interfaceMethod = painlessLookup.lookupPainlessClass(interfaceType).functionalMethod;
|
||||||
if (interfaceMethod == null) {
|
if (interfaceMethod == null) {
|
||||||
throw new IllegalArgumentException("Class [" + interfaceClass + "] is not a functional interface");
|
throw new IllegalArgumentException("Class [" + interfaceClass + "] is not a functional interface");
|
||||||
}
|
}
|
||||||
|
@ -345,7 +345,7 @@ public final class Def {
|
||||||
final FunctionRef ref;
|
final FunctionRef ref;
|
||||||
if ("this".equals(type)) {
|
if ("this".equals(type)) {
|
||||||
// user written method
|
// user written method
|
||||||
PainlessMethod interfaceMethod = painlessLookup.getPainlessStructFromJavaClass(clazz).functionalMethod;
|
PainlessMethod interfaceMethod = painlessLookup.lookupPainlessClass(clazz).functionalMethod;
|
||||||
if (interfaceMethod == null) {
|
if (interfaceMethod == null) {
|
||||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(clazz) + "], not a functional interface");
|
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(clazz) + "], not a functional interface");
|
||||||
|
@ -419,7 +419,7 @@ public final class Def {
|
||||||
static MethodHandle lookupGetter(PainlessLookup painlessLookup, Class<?> receiverClass, String name) {
|
static MethodHandle lookupGetter(PainlessLookup painlessLookup, Class<?> receiverClass, String name) {
|
||||||
// first try whitelist
|
// first try whitelist
|
||||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(clazz);
|
PainlessClass struct = painlessLookup.lookupPainlessClass(clazz);
|
||||||
|
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
MethodHandle handle = struct.getterMethodHandles.get(name);
|
MethodHandle handle = struct.getterMethodHandles.get(name);
|
||||||
|
@ -429,7 +429,7 @@ public final class Def {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||||
struct = painlessLookup.getPainlessStructFromJavaClass(iface);
|
struct = painlessLookup.lookupPainlessClass(iface);
|
||||||
|
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
MethodHandle handle = struct.getterMethodHandles.get(name);
|
MethodHandle handle = struct.getterMethodHandles.get(name);
|
||||||
|
@ -490,7 +490,7 @@ public final class Def {
|
||||||
static MethodHandle lookupSetter(PainlessLookup painlessLookup, Class<?> receiverClass, String name) {
|
static MethodHandle lookupSetter(PainlessLookup painlessLookup, Class<?> receiverClass, String name) {
|
||||||
// first try whitelist
|
// first try whitelist
|
||||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(clazz);
|
PainlessClass struct = painlessLookup.lookupPainlessClass(clazz);
|
||||||
|
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
MethodHandle handle = struct.setterMethodHandles.get(name);
|
MethodHandle handle = struct.setterMethodHandles.get(name);
|
||||||
|
@ -500,7 +500,7 @@ public final class Def {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||||
struct = painlessLookup.getPainlessStructFromJavaClass(iface);
|
struct = painlessLookup.lookupPainlessClass(iface);
|
||||||
|
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
MethodHandle handle = struct.setterMethodHandles.get(name);
|
MethodHandle handle = struct.setterMethodHandles.get(name);
|
||||||
|
|
|
@ -90,10 +90,10 @@ public class FunctionRef {
|
||||||
PainlessLookup painlessLookup, Class<?> expected, String type, String call, int numCaptures) {
|
PainlessLookup painlessLookup, Class<?> expected, String type, String call, int numCaptures) {
|
||||||
|
|
||||||
if ("new".equals(call)) {
|
if ("new".equals(call)) {
|
||||||
return new FunctionRef(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
return new FunctionRef(expected, painlessLookup.lookupPainlessClass(expected).functionalMethod,
|
||||||
lookup(painlessLookup, expected, type), numCaptures);
|
lookup(painlessLookup, expected, type), numCaptures);
|
||||||
} else {
|
} else {
|
||||||
return new FunctionRef(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
return new FunctionRef(expected, painlessLookup.lookupPainlessClass(expected).functionalMethod,
|
||||||
lookup(painlessLookup, expected, type, call, numCaptures > 0), numCaptures);
|
lookup(painlessLookup, expected, type, call, numCaptures > 0), numCaptures);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -230,14 +230,14 @@ public class FunctionRef {
|
||||||
private static PainlessConstructor lookup(PainlessLookup painlessLookup, Class<?> expected, String type) {
|
private static PainlessConstructor lookup(PainlessLookup painlessLookup, Class<?> expected, String type) {
|
||||||
// check its really a functional interface
|
// check its really a functional interface
|
||||||
// for e.g. Comparable
|
// for e.g. Comparable
|
||||||
PainlessMethod method = painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod;
|
PainlessMethod method = painlessLookup.lookupPainlessClass(expected).functionalMethod;
|
||||||
if (method == null) {
|
if (method == null) {
|
||||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::new] " +
|
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::new] " +
|
||||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||||
}
|
}
|
||||||
|
|
||||||
// lookup requested constructor
|
// lookup requested constructor
|
||||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(painlessLookup.getJavaClassFromPainlessType(type));
|
PainlessClass struct = painlessLookup.lookupPainlessClass(painlessLookup.canonicalTypeNameToType(type));
|
||||||
PainlessConstructor impl = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(method.typeParameters.size()));
|
PainlessConstructor impl = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(method.typeParameters.size()));
|
||||||
|
|
||||||
if (impl == null) {
|
if (impl == null) {
|
||||||
|
@ -254,14 +254,14 @@ public class FunctionRef {
|
||||||
String type, String call, boolean receiverCaptured) {
|
String type, String call, boolean receiverCaptured) {
|
||||||
// check its really a functional interface
|
// check its really a functional interface
|
||||||
// for e.g. Comparable
|
// for e.g. Comparable
|
||||||
PainlessMethod method = painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod;
|
PainlessMethod method = painlessLookup.lookupPainlessClass(expected).functionalMethod;
|
||||||
if (method == null) {
|
if (method == null) {
|
||||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||||
}
|
}
|
||||||
|
|
||||||
// lookup requested method
|
// lookup requested method
|
||||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(painlessLookup.getJavaClassFromPainlessType(type));
|
PainlessClass struct = painlessLookup.lookupPainlessClass(painlessLookup.canonicalTypeNameToType(type));
|
||||||
final PainlessMethod impl;
|
final PainlessMethod impl;
|
||||||
// look for a static impl first
|
// look for a static impl first
|
||||||
PainlessMethod staticImpl =
|
PainlessMethod staticImpl =
|
||||||
|
|
|
@ -57,7 +57,7 @@ public class PainlessExplainError extends Error {
|
||||||
if (objectToExplain != null) {
|
if (objectToExplain != null) {
|
||||||
toString = objectToExplain.toString();
|
toString = objectToExplain.toString();
|
||||||
javaClassName = objectToExplain.getClass().getName();
|
javaClassName = objectToExplain.getClass().getName();
|
||||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(objectToExplain.getClass());
|
PainlessClass struct = painlessLookup.lookupPainlessClass(objectToExplain.getClass());
|
||||||
if (struct != null) {
|
if (struct != null) {
|
||||||
painlessClassName = PainlessLookupUtility.typeToCanonicalTypeName(objectToExplain.getClass());
|
painlessClassName = PainlessLookupUtility.typeToCanonicalTypeName(objectToExplain.getClass());
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,7 +190,7 @@ public class ScriptClassInfo {
|
||||||
componentType = componentType.getComponentType();
|
componentType = componentType.getComponentType();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (painlessLookup.getPainlessStructFromJavaClass(componentType) == null) {
|
if (painlessLookup.lookupPainlessClass(componentType) == null) {
|
||||||
throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType));
|
throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ final class EnhancedPainlessLexer extends PainlessLexer {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected boolean isType(String name) {
|
protected boolean isType(String name) {
|
||||||
return painlessLookup.isSimplePainlessType(name);
|
return painlessLookup.isValidCanonicalClassName(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -19,41 +19,119 @@
|
||||||
|
|
||||||
package org.elasticsearch.painless.lookup;
|
package org.elasticsearch.painless.lookup;
|
||||||
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessConstructorKey;
|
||||||
|
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessFieldKey;
|
||||||
|
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessMethodKey;
|
||||||
|
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.typeToCanonicalTypeName;
|
||||||
|
|
||||||
/**
|
|
||||||
* The entire API for Painless. Also used as a whitelist for checking for legal
|
|
||||||
* methods and fields during at both compile-time and runtime.
|
|
||||||
*/
|
|
||||||
public final class PainlessLookup {
|
public final class PainlessLookup {
|
||||||
|
|
||||||
public Collection<Class<?>> getStructs() {
|
|
||||||
return classesToPainlessClasses.keySet();
|
|
||||||
}
|
|
||||||
|
|
||||||
private final Map<String, Class<?>> canonicalClassNamesToClasses;
|
private final Map<String, Class<?>> canonicalClassNamesToClasses;
|
||||||
private final Map<Class<?>, PainlessClass> classesToPainlessClasses;
|
private final Map<Class<?>, PainlessClass> classesToPainlessClasses;
|
||||||
|
|
||||||
PainlessLookup(Map<String, Class<?>> canonicalClassNamesToClasses, Map<Class<?>, PainlessClass> classesToPainlessClasses) {
|
PainlessLookup(Map<String, Class<?>> canonicalClassNamesToClasses, Map<Class<?>, PainlessClass> classesToPainlessClasses) {
|
||||||
|
Objects.requireNonNull(canonicalClassNamesToClasses);
|
||||||
|
Objects.requireNonNull(classesToPainlessClasses);
|
||||||
|
|
||||||
this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses);
|
this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses);
|
||||||
this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses);
|
this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Class<?> getClassFromBinaryName(String painlessType) {
|
public boolean isValidCanonicalClassName(String canonicalClassName) {
|
||||||
return canonicalClassNamesToClasses.get(painlessType.replace('$', '.'));
|
Objects.requireNonNull(canonicalClassName);
|
||||||
|
|
||||||
|
return canonicalClassNamesToClasses.containsKey(canonicalClassName);
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isSimplePainlessType(String painlessType) {
|
public Class<?> canonicalTypeNameToType(String painlessType) {
|
||||||
return canonicalClassNamesToClasses.containsKey(painlessType);
|
Objects.requireNonNull(painlessType);
|
||||||
}
|
|
||||||
|
|
||||||
public PainlessClass getPainlessStructFromJavaClass(Class<?> clazz) {
|
|
||||||
return classesToPainlessClasses.get(clazz);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Class<?> getJavaClassFromPainlessType(String painlessType) {
|
|
||||||
return PainlessLookupUtility.canonicalTypeNameToType(painlessType, canonicalClassNamesToClasses);
|
return PainlessLookupUtility.canonicalTypeNameToType(painlessType, canonicalClassNamesToClasses);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Set<Class<?>> getClasses() {
|
||||||
|
return classesToPainlessClasses.keySet();
|
||||||
|
}
|
||||||
|
|
||||||
|
public PainlessClass lookupPainlessClass(Class<?> targetClass) {
|
||||||
|
return classesToPainlessClasses.get(targetClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
public PainlessConstructor lookupPainlessConstructor(Class<?> targetClass, int constructorArity) {
|
||||||
|
Objects.requireNonNull(targetClass);
|
||||||
|
|
||||||
|
PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass);
|
||||||
|
String painlessConstructorKey = buildPainlessConstructorKey(constructorArity);
|
||||||
|
|
||||||
|
if (targetPainlessClass == null) {
|
||||||
|
throw new IllegalArgumentException("target class [" + typeToCanonicalTypeName(targetClass) + "] " +
|
||||||
|
"not found for constructor [" + painlessConstructorKey + "]");
|
||||||
|
}
|
||||||
|
|
||||||
|
PainlessConstructor painlessConstructor = targetPainlessClass.constructors.get(painlessConstructorKey);
|
||||||
|
|
||||||
|
if (painlessConstructor == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"constructor [" + typeToCanonicalTypeName(targetClass) + ", " + painlessConstructorKey + "] not found");
|
||||||
|
}
|
||||||
|
|
||||||
|
return painlessConstructor;
|
||||||
|
}
|
||||||
|
|
||||||
|
public PainlessMethod lookupPainlessMethod(Class<?> targetClass, boolean isStatic, String methodName, int methodArity) {
|
||||||
|
Objects.requireNonNull(targetClass);
|
||||||
|
Objects.requireNonNull(methodName);
|
||||||
|
|
||||||
|
if (targetClass.isPrimitive()) {
|
||||||
|
targetClass = PainlessLookupUtility.typeToBoxedType(targetClass);
|
||||||
|
}
|
||||||
|
|
||||||
|
PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass);
|
||||||
|
String painlessMethodKey = buildPainlessMethodKey(methodName, methodArity);
|
||||||
|
|
||||||
|
if (targetPainlessClass == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"target class [" + typeToCanonicalTypeName(targetClass) + "] not found for method [" + painlessMethodKey + "]");
|
||||||
|
}
|
||||||
|
|
||||||
|
PainlessMethod painlessMethod = isStatic ?
|
||||||
|
targetPainlessClass.staticMethods.get(painlessMethodKey) :
|
||||||
|
targetPainlessClass.methods.get(painlessMethodKey);
|
||||||
|
|
||||||
|
if (painlessMethod == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"method [" + typeToCanonicalTypeName(targetClass) + ", " + painlessMethodKey + "] not found");
|
||||||
|
}
|
||||||
|
|
||||||
|
return painlessMethod;
|
||||||
|
}
|
||||||
|
|
||||||
|
public PainlessField lookupPainlessField(Class<?> targetClass, boolean isStatic, String fieldName) {
|
||||||
|
Objects.requireNonNull(targetClass);
|
||||||
|
Objects.requireNonNull(fieldName);
|
||||||
|
|
||||||
|
PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass);
|
||||||
|
String painlessFieldKey = buildPainlessFieldKey(fieldName);
|
||||||
|
|
||||||
|
if (targetPainlessClass == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"target class [" + typeToCanonicalTypeName(targetClass) + "] not found for field [" + painlessFieldKey + "]");
|
||||||
|
}
|
||||||
|
|
||||||
|
PainlessField painlessField = isStatic ?
|
||||||
|
targetPainlessClass.staticFields.get(painlessFieldKey) :
|
||||||
|
targetPainlessClass.fields.get(painlessFieldKey);
|
||||||
|
|
||||||
|
if (painlessField == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"field [" + typeToCanonicalTypeName(targetClass) + ", " + painlessFieldKey + "] not found");
|
||||||
|
}
|
||||||
|
|
||||||
|
return painlessField;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@ public final class EExplicit extends AExpression {
|
||||||
@Override
|
@Override
|
||||||
void analyze(Locals locals) {
|
void analyze(Locals locals) {
|
||||||
try {
|
try {
|
||||||
actual = locals.getPainlessLookup().getJavaClassFromPainlessType(type);
|
actual = locals.getPainlessLookup().canonicalTypeNameToType(type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ public final class EFunctionRef extends AExpression implements ILambda {
|
||||||
try {
|
try {
|
||||||
if ("this".equals(type)) {
|
if ("this".equals(type)) {
|
||||||
// user's own function
|
// user's own function
|
||||||
PainlessMethod interfaceMethod = locals.getPainlessLookup().getPainlessStructFromJavaClass(expected).functionalMethod;
|
PainlessMethod interfaceMethod = locals.getPainlessLookup().lookupPainlessClass(expected).functionalMethod;
|
||||||
if (interfaceMethod == null) {
|
if (interfaceMethod == null) {
|
||||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||||
|
|
|
@ -58,7 +58,7 @@ public final class EInstanceof extends AExpression {
|
||||||
|
|
||||||
// ensure the specified type is part of the definition
|
// ensure the specified type is part of the definition
|
||||||
try {
|
try {
|
||||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,7 +121,7 @@ public final class ELambda extends AExpression implements ILambda {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// we know the method statically, infer return type and any unknown/def types
|
// we know the method statically, infer return type and any unknown/def types
|
||||||
interfaceMethod = locals.getPainlessLookup().getPainlessStructFromJavaClass(expected).functionalMethod;
|
interfaceMethod = locals.getPainlessLookup().lookupPainlessClass(expected).functionalMethod;
|
||||||
if (interfaceMethod == null) {
|
if (interfaceMethod == null) {
|
||||||
throw createError(new IllegalArgumentException("Cannot pass lambda to " +
|
throw createError(new IllegalArgumentException("Cannot pass lambda to " +
|
||||||
"[" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface"));
|
"[" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface"));
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
import org.elasticsearch.painless.lookup.def;
|
import org.elasticsearch.painless.lookup.def;
|
||||||
import org.objectweb.asm.Type;
|
import org.objectweb.asm.Type;
|
||||||
|
@ -64,18 +63,16 @@ public final class EListInit extends AExpression {
|
||||||
|
|
||||||
actual = ArrayList.class;
|
actual = ArrayList.class;
|
||||||
|
|
||||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors.get(
|
try {
|
||||||
PainlessLookupUtility.buildPainlessConstructorKey(0));
|
constructor = locals.getPainlessLookup().lookupPainlessConstructor(actual, 0);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
if (constructor == null) {
|
throw createError(iae);
|
||||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
method = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).methods
|
try {
|
||||||
.get(PainlessLookupUtility.buildPainlessMethodKey("add", 1));
|
method = locals.getPainlessLookup().lookupPainlessMethod(actual, false, "add", 1);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
if (method == null) {
|
throw createError(iae);
|
||||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int index = 0; index < values.size(); ++index) {
|
for (int index = 0; index < values.size(); ++index) {
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
import org.elasticsearch.painless.lookup.def;
|
import org.elasticsearch.painless.lookup.def;
|
||||||
import org.objectweb.asm.Type;
|
import org.objectweb.asm.Type;
|
||||||
|
@ -70,18 +69,16 @@ public final class EMapInit extends AExpression {
|
||||||
|
|
||||||
actual = HashMap.class;
|
actual = HashMap.class;
|
||||||
|
|
||||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors.get(
|
try {
|
||||||
PainlessLookupUtility.buildPainlessConstructorKey(0));
|
constructor = locals.getPainlessLookup().lookupPainlessConstructor(actual, 0);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
if (constructor == null) {
|
throw createError(iae);
|
||||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
method = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).methods
|
try {
|
||||||
.get(PainlessLookupUtility.buildPainlessMethodKey("put", 2));
|
method = locals.getPainlessLookup().lookupPainlessMethod(actual, false, "put", 2);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
if (method == null) {
|
throw createError(iae);
|
||||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (keys.size() != values.size()) {
|
if (keys.size() != values.size()) {
|
||||||
|
|
|
@ -61,7 +61,7 @@ public final class ENewArray extends AExpression {
|
||||||
Class<?> clazz;
|
Class<?> clazz;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.painless.Globals;
|
||||||
import org.elasticsearch.painless.Locals;
|
import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||||
import org.objectweb.asm.Type;
|
import org.objectweb.asm.Type;
|
||||||
|
@ -60,15 +59,17 @@ public final class ENewObj extends AExpression {
|
||||||
@Override
|
@Override
|
||||||
void analyze(Locals locals) {
|
void analyze(Locals locals) {
|
||||||
try {
|
try {
|
||||||
actual = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
actual = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||||
}
|
}
|
||||||
|
|
||||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual);
|
try {
|
||||||
constructor = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(arguments.size()));
|
constructor = locals.getPainlessLookup().lookupPainlessConstructor(actual, arguments.size());
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
throw createError(iae);
|
||||||
|
}
|
||||||
|
|
||||||
if (constructor != null) {
|
|
||||||
Class<?>[] types = new Class<?>[constructor.typeParameters.size()];
|
Class<?>[] types = new Class<?>[constructor.typeParameters.size()];
|
||||||
constructor.typeParameters.toArray(types);
|
constructor.typeParameters.toArray(types);
|
||||||
|
|
||||||
|
@ -88,10 +89,6 @@ public final class ENewObj extends AExpression {
|
||||||
}
|
}
|
||||||
|
|
||||||
statement = true;
|
statement = true;
|
||||||
} else {
|
|
||||||
throw createError(new IllegalArgumentException(
|
|
||||||
"Unknown new call on type [" + PainlessLookupUtility.typeToCanonicalTypeName(actual) + "]."));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -48,7 +48,7 @@ public final class EStatic extends AExpression {
|
||||||
@Override
|
@Override
|
||||||
void analyze(Locals locals) {
|
void analyze(Locals locals) {
|
||||||
try {
|
try {
|
||||||
actual = locals.getPainlessLookup().getJavaClassFromPainlessType(type);
|
actual = locals.getPainlessLookup().canonicalTypeNameToType(type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,8 +23,6 @@ import org.elasticsearch.painless.Globals;
|
||||||
import org.elasticsearch.painless.Locals;
|
import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
import org.elasticsearch.painless.lookup.def;
|
import org.elasticsearch.painless.lookup.def;
|
||||||
|
|
||||||
|
@ -66,26 +64,16 @@ public final class PCallInvoke extends AExpression {
|
||||||
prefix.expected = prefix.actual;
|
prefix.expected = prefix.actual;
|
||||||
prefix = prefix.cast(locals);
|
prefix = prefix.cast(locals);
|
||||||
|
|
||||||
if (prefix.actual.isArray()) {
|
if (prefix.actual == def.class) {
|
||||||
throw createError(new IllegalArgumentException("Illegal call [" + name + "] on array type."));
|
|
||||||
}
|
|
||||||
|
|
||||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(prefix.actual);
|
|
||||||
|
|
||||||
if (prefix.actual.isPrimitive()) {
|
|
||||||
struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(PainlessLookupUtility.typeToBoxedType(prefix.actual));
|
|
||||||
}
|
|
||||||
|
|
||||||
String methodKey = PainlessLookupUtility.buildPainlessMethodKey(name, arguments.size());
|
|
||||||
PainlessMethod method = prefix instanceof EStatic ? struct.staticMethods.get(methodKey) : struct.methods.get(methodKey);
|
|
||||||
|
|
||||||
if (method != null) {
|
|
||||||
sub = new PSubCallInvoke(location, method, prefix.actual, arguments);
|
|
||||||
} else if (prefix.actual == def.class) {
|
|
||||||
sub = new PSubDefCall(location, name, arguments);
|
sub = new PSubDefCall(location, name, arguments);
|
||||||
} else {
|
} else {
|
||||||
throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments " +
|
try {
|
||||||
"on type [" + PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual) + "]."));
|
PainlessMethod method =
|
||||||
|
locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, prefix instanceof EStatic, name, arguments.size());
|
||||||
|
sub = new PSubCallInvoke(location, method, prefix.actual, arguments);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
throw createError(iae);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nullSafe) {
|
if (nullSafe) {
|
||||||
|
|
|
@ -23,8 +23,6 @@ import org.elasticsearch.painless.Globals;
|
||||||
import org.elasticsearch.painless.Locals;
|
import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessField;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
import org.elasticsearch.painless.lookup.def;
|
import org.elasticsearch.painless.lookup.def;
|
||||||
|
@ -67,26 +65,34 @@ public final class PField extends AStoreable {
|
||||||
} else if (prefix.actual == def.class) {
|
} else if (prefix.actual == def.class) {
|
||||||
sub = new PSubDefField(location, value);
|
sub = new PSubDefField(location, value);
|
||||||
} else {
|
} else {
|
||||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(prefix.actual);
|
try {
|
||||||
PainlessField field = prefix instanceof EStatic ? struct.staticFields.get(value) : struct.fields.get(value);
|
sub = new PSubField(location,
|
||||||
|
locals.getPainlessLookup().lookupPainlessField(prefix.actual, prefix instanceof EStatic, value));
|
||||||
|
} catch (IllegalArgumentException fieldIAE) {
|
||||||
|
PainlessMethod getter;
|
||||||
|
PainlessMethod setter;
|
||||||
|
|
||||||
if (field != null) {
|
try {
|
||||||
sub = new PSubField(location, field);
|
getter = locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, false,
|
||||||
} else {
|
"get" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0);
|
||||||
PainlessMethod getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(
|
} catch (IllegalArgumentException getIAE) {
|
||||||
"get" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0));
|
try {
|
||||||
|
getter = locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, false,
|
||||||
if (getter == null) {
|
"is" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0);
|
||||||
getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(
|
} catch (IllegalArgumentException isIAE) {
|
||||||
"is" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0));
|
getter = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
PainlessMethod setter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(
|
try {
|
||||||
"set" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 1));
|
setter = locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, false,
|
||||||
|
"set" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0);
|
||||||
|
} catch (IllegalArgumentException setIAE) {
|
||||||
|
setter = null;
|
||||||
|
}
|
||||||
|
|
||||||
if (getter != null || setter != null) {
|
if (getter != null || setter != null) {
|
||||||
sub = new PSubShortcut(
|
sub = new PSubShortcut(location, value, PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual), getter, setter);
|
||||||
location, value, PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual), getter, setter);
|
|
||||||
} else {
|
} else {
|
||||||
EConstant index = new EConstant(location, value);
|
EConstant index = new EConstant(location, value);
|
||||||
index.analyze(locals);
|
index.analyze(locals);
|
||||||
|
@ -99,12 +105,11 @@ public final class PField extends AStoreable {
|
||||||
sub = new PSubListShortcut(location, prefix.actual, index);
|
sub = new PSubListShortcut(location, prefix.actual, index);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (sub == null) {
|
if (sub == null) {
|
||||||
throw createError(new IllegalArgumentException(
|
throw createError(fieldIAE);
|
||||||
"Unknown field [" + value + "] for type [" + PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual) + "]."));
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nullSafe) {
|
if (nullSafe) {
|
||||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.WriterConstants;
|
import org.elasticsearch.painless.WriterConstants;
|
||||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
|
|
||||||
|
@ -56,11 +55,14 @@ final class PSubListShortcut extends AStoreable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void analyze(Locals locals) {
|
void analyze(Locals locals) {
|
||||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(targetClass);
|
|
||||||
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
|
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
|
||||||
|
|
||||||
getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("get", 1));
|
try {
|
||||||
setter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("set", 2));
|
getter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "get", 1);
|
||||||
|
setter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "set", 2);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
throw createError(iae);
|
||||||
|
}
|
||||||
|
|
||||||
if (getter != null && (getter.returnType == void.class || getter.typeParameters.size() != 1 ||
|
if (getter != null && (getter.returnType == void.class || getter.typeParameters.size() != 1 ||
|
||||||
getter.typeParameters.get(0) != int.class)) {
|
getter.typeParameters.get(0) != int.class)) {
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.painless.Globals;
|
||||||
import org.elasticsearch.painless.Locals;
|
import org.elasticsearch.painless.Locals;
|
||||||
import org.elasticsearch.painless.Location;
|
import org.elasticsearch.painless.Location;
|
||||||
import org.elasticsearch.painless.MethodWriter;
|
import org.elasticsearch.painless.MethodWriter;
|
||||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
|
||||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||||
|
|
||||||
|
@ -55,11 +54,14 @@ final class PSubMapShortcut extends AStoreable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void analyze(Locals locals) {
|
void analyze(Locals locals) {
|
||||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(targetClass);
|
|
||||||
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
|
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
|
||||||
|
|
||||||
getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("get", 1));
|
try {
|
||||||
setter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("put", 2));
|
getter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "get", 1);
|
||||||
|
setter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "put", 2);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
|
throw createError(iae);
|
||||||
|
}
|
||||||
|
|
||||||
if (getter != null && (getter.returnType == void.class || getter.typeParameters.size() != 1)) {
|
if (getter != null && (getter.returnType == void.class || getter.typeParameters.size() != 1)) {
|
||||||
throw createError(new IllegalArgumentException("Illegal map get shortcut for type [" + canonicalClassName + "]."));
|
throw createError(new IllegalArgumentException("Illegal map get shortcut for type [" + canonicalClassName + "]."));
|
||||||
|
|
|
@ -67,7 +67,7 @@ public final class SCatch extends AStatement {
|
||||||
Class<?> clazz;
|
Class<?> clazz;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ public final class SDeclaration extends AStatement {
|
||||||
Class<?> clazz;
|
Class<?> clazz;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class SEach extends AStatement {
|
||||||
Class<?> clazz;
|
Class<?> clazz;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||||
}
|
}
|
||||||
|
|
|
@ -121,7 +121,7 @@ public final class SFunction extends AStatement {
|
||||||
|
|
||||||
void generateSignature(PainlessLookup painlessLookup) {
|
void generateSignature(PainlessLookup painlessLookup) {
|
||||||
try {
|
try {
|
||||||
returnType = painlessLookup.getJavaClassFromPainlessType(rtnTypeStr);
|
returnType = painlessLookup.canonicalTypeNameToType(rtnTypeStr);
|
||||||
} catch (IllegalArgumentException exception) {
|
} catch (IllegalArgumentException exception) {
|
||||||
throw createError(new IllegalArgumentException("Illegal return type [" + rtnTypeStr + "] for function [" + name + "]."));
|
throw createError(new IllegalArgumentException("Illegal return type [" + rtnTypeStr + "] for function [" + name + "]."));
|
||||||
}
|
}
|
||||||
|
@ -135,7 +135,7 @@ public final class SFunction extends AStatement {
|
||||||
|
|
||||||
for (int param = 0; param < this.paramTypeStrs.size(); ++param) {
|
for (int param = 0; param < this.paramTypeStrs.size(); ++param) {
|
||||||
try {
|
try {
|
||||||
Class<?> paramType = painlessLookup.getJavaClassFromPainlessType(this.paramTypeStrs.get(param));
|
Class<?> paramType = painlessLookup.canonicalTypeNameToType(this.paramTypeStrs.get(param));
|
||||||
|
|
||||||
paramClasses[param] = PainlessLookupUtility.typeToJavaType(paramType);
|
paramClasses[param] = PainlessLookupUtility.typeToJavaType(paramType);
|
||||||
paramTypes.add(paramType);
|
paramTypes.add(paramType);
|
||||||
|
|
|
@ -76,12 +76,10 @@ final class SSubEachIterable extends AStatement {
|
||||||
if (expression.actual == def.class) {
|
if (expression.actual == def.class) {
|
||||||
method = null;
|
method = null;
|
||||||
} else {
|
} else {
|
||||||
method = locals.getPainlessLookup().getPainlessStructFromJavaClass(expression.actual).methods
|
try {
|
||||||
.get(PainlessLookupUtility.buildPainlessMethodKey("iterator", 0));
|
method = locals.getPainlessLookup().lookupPainlessMethod(expression.actual, false, "iterator", 0);
|
||||||
|
} catch (IllegalArgumentException iae) {
|
||||||
if (method == null) {
|
throw createError(iae);
|
||||||
throw createError(new IllegalArgumentException("Unable to create iterator for the type " +
|
|
||||||
"[" + PainlessLookupUtility.typeToCanonicalTypeName(expression.actual) + "]."));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,12 +23,12 @@ package org.elasticsearch.painless;
|
||||||
public class OverloadTests extends ScriptTestCase {
|
public class OverloadTests extends ScriptTestCase {
|
||||||
|
|
||||||
public void testMethod() {
|
public void testMethod() {
|
||||||
assertEquals(2, exec("return 'abc123abc'.indexOf('c');"));
|
//assertEquals(2, exec("return 'abc123abc'.indexOf('c');"));
|
||||||
assertEquals(8, exec("return 'abc123abc'.indexOf('c', 3);"));
|
//assertEquals(8, exec("return 'abc123abc'.indexOf('c', 3);"));
|
||||||
IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {
|
IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {
|
||||||
exec("return 'abc123abc'.indexOf('c', 3, 'bogus');");
|
exec("return 'abc123abc'.indexOf('c', 3, 'bogus');");
|
||||||
});
|
});
|
||||||
assertTrue(expected.getMessage().contains("[indexOf] with [3] arguments"));
|
assertTrue(expected.getMessage().contains("[java.lang.String, indexOf/3]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMethodDynamic() {
|
public void testMethodDynamic() {
|
||||||
|
|
|
@ -45,9 +45,9 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static java.util.Comparator.comparing;
|
import static java.util.Comparator.comparing;
|
||||||
import static java.util.stream.Collectors.toList;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generates an API reference from the method and type whitelists in {@link PainlessLookup}.
|
* Generates an API reference from the method and type whitelists in {@link PainlessLookup}.
|
||||||
|
@ -74,9 +74,10 @@ public class PainlessDocGenerator {
|
||||||
Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE),
|
Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE),
|
||||||
false, StandardCharsets.UTF_8.name())) {
|
false, StandardCharsets.UTF_8.name())) {
|
||||||
emitGeneratedWarning(indexStream);
|
emitGeneratedWarning(indexStream);
|
||||||
List<Class<?>> classes = PAINLESS_LOOKUP.getStructs().stream().sorted(comparing(Class::getCanonicalName)).collect(toList());
|
List<Class<?>> classes = PAINLESS_LOOKUP.getClasses().stream().sorted(
|
||||||
|
Comparator.comparing(Class::getCanonicalName)).collect(Collectors.toList());
|
||||||
for (Class<?> clazz : classes) {
|
for (Class<?> clazz : classes) {
|
||||||
PainlessClass struct = PAINLESS_LOOKUP.getPainlessStructFromJavaClass(clazz);
|
PainlessClass struct = PAINLESS_LOOKUP.lookupPainlessClass(clazz);
|
||||||
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(clazz);
|
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(clazz);
|
||||||
|
|
||||||
if (clazz.isPrimitive()) {
|
if (clazz.isPrimitive()) {
|
||||||
|
|
|
@ -252,7 +252,7 @@ public class RegexTests extends ScriptTestCase {
|
||||||
IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> {
|
IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> {
|
||||||
exec("Pattern.compile('aa')");
|
exec("Pattern.compile('aa')");
|
||||||
});
|
});
|
||||||
assertEquals("Unknown call [compile] with [1] arguments on type [java.util.regex.Pattern].", e.getMessage());
|
assertTrue(e.getMessage().contains("[java.util.regex.Pattern, compile/1]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBadRegexPattern() {
|
public void testBadRegexPattern() {
|
||||||
|
|
|
@ -404,7 +404,7 @@ public class NodeToStringTests extends ESTestCase {
|
||||||
|
|
||||||
public void testPSubCallInvoke() {
|
public void testPSubCallInvoke() {
|
||||||
Location l = new Location(getTestName(), 0);
|
Location l = new Location(getTestName(), 0);
|
||||||
PainlessClass c = painlessLookup.getPainlessStructFromJavaClass(Integer.class);
|
PainlessClass c = painlessLookup.lookupPainlessClass(Integer.class);
|
||||||
PainlessMethod m = c.methods.get(PainlessLookupUtility.buildPainlessMethodKey("toString", 0));
|
PainlessMethod m = c.methods.get(PainlessLookupUtility.buildPainlessMethodKey("toString", 0));
|
||||||
PSubCallInvoke node = new PSubCallInvoke(l, m, null, emptyList());
|
PSubCallInvoke node = new PSubCallInvoke(l, m, null, emptyList());
|
||||||
node.prefix = new EVariable(l, "a");
|
node.prefix = new EVariable(l, "a");
|
||||||
|
@ -459,7 +459,7 @@ public class NodeToStringTests extends ESTestCase {
|
||||||
|
|
||||||
public void testPSubField() {
|
public void testPSubField() {
|
||||||
Location l = new Location(getTestName(), 0);
|
Location l = new Location(getTestName(), 0);
|
||||||
PainlessClass s = painlessLookup.getPainlessStructFromJavaClass(Boolean.class);
|
PainlessClass s = painlessLookup.lookupPainlessClass(Boolean.class);
|
||||||
PainlessField f = s.staticFields.get("TRUE");
|
PainlessField f = s.staticFields.get("TRUE");
|
||||||
PSubField node = new PSubField(l, f);
|
PSubField node = new PSubField(l, f);
|
||||||
node.prefix = new EStatic(l, "Boolean");
|
node.prefix = new EStatic(l, "Boolean");
|
||||||
|
@ -497,7 +497,7 @@ public class NodeToStringTests extends ESTestCase {
|
||||||
|
|
||||||
public void testPSubShortcut() {
|
public void testPSubShortcut() {
|
||||||
Location l = new Location(getTestName(), 0);
|
Location l = new Location(getTestName(), 0);
|
||||||
PainlessClass s = painlessLookup.getPainlessStructFromJavaClass(FeatureTest.class);
|
PainlessClass s = painlessLookup.lookupPainlessClass(FeatureTest.class);
|
||||||
PainlessMethod getter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("getX", 0));
|
PainlessMethod getter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("getX", 0));
|
||||||
PainlessMethod setter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("setX", 1));
|
PainlessMethod setter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("setX", 1));
|
||||||
PSubShortcut node = new PSubShortcut(l, "x", FeatureTest.class.getName(), getter, setter);
|
PSubShortcut node = new PSubShortcut(l, "x", FeatureTest.class.getName(), getter, setter);
|
||||||
|
|
|
@ -29,3 +29,40 @@
|
||||||
- is_true: nodes.fs
|
- is_true: nodes.fs
|
||||||
- is_true: nodes.plugins
|
- is_true: nodes.plugins
|
||||||
- is_true: nodes.network_types
|
- is_true: nodes.network_types
|
||||||
|
|
||||||
|
---
|
||||||
|
"get cluster stats returns cluster_uuid at the top level":
|
||||||
|
- skip:
|
||||||
|
version: " - 6.99.99"
|
||||||
|
reason: "cluster stats including cluster_uuid at the top level is new in v6.5.0 and higher"
|
||||||
|
|
||||||
|
- do:
|
||||||
|
cluster.stats: {}
|
||||||
|
|
||||||
|
- is_true: cluster_uuid
|
||||||
|
- is_true: timestamp
|
||||||
|
- is_true: cluster_name
|
||||||
|
- match: {status: green}
|
||||||
|
- gte: { indices.count: 0}
|
||||||
|
- is_true: indices.docs
|
||||||
|
- is_true: indices.store
|
||||||
|
- is_true: indices.fielddata
|
||||||
|
- is_true: indices.query_cache
|
||||||
|
- is_true: indices.completion
|
||||||
|
- is_true: indices.segments
|
||||||
|
- gte: { nodes.count.total: 1}
|
||||||
|
- gte: { nodes.count.master: 1}
|
||||||
|
- gte: { nodes.count.data: 1}
|
||||||
|
- gte: { nodes.count.ingest: 0}
|
||||||
|
- gte: { nodes.count.coordinating_only: 0}
|
||||||
|
- is_true: nodes.os
|
||||||
|
- is_true: nodes.os.mem.total_in_bytes
|
||||||
|
- is_true: nodes.os.mem.free_in_bytes
|
||||||
|
- is_true: nodes.os.mem.used_in_bytes
|
||||||
|
- gte: { nodes.os.mem.free_percent: 0 }
|
||||||
|
- gte: { nodes.os.mem.used_percent: 0 }
|
||||||
|
- is_true: nodes.process
|
||||||
|
- is_true: nodes.jvm
|
||||||
|
- is_true: nodes.fs
|
||||||
|
- is_true: nodes.plugins
|
||||||
|
- is_true: nodes.network_types
|
||||||
|
|
|
@ -40,15 +40,18 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
||||||
ClusterStatsIndices indicesStats;
|
ClusterStatsIndices indicesStats;
|
||||||
ClusterHealthStatus status;
|
ClusterHealthStatus status;
|
||||||
long timestamp;
|
long timestamp;
|
||||||
|
String clusterUUID;
|
||||||
|
|
||||||
ClusterStatsResponse() {
|
ClusterStatsResponse() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public ClusterStatsResponse(long timestamp,
|
public ClusterStatsResponse(long timestamp,
|
||||||
|
String clusterUUID,
|
||||||
ClusterName clusterName,
|
ClusterName clusterName,
|
||||||
List<ClusterStatsNodeResponse> nodes,
|
List<ClusterStatsNodeResponse> nodes,
|
||||||
List<FailedNodeException> failures) {
|
List<FailedNodeException> failures) {
|
||||||
super(clusterName, nodes, failures);
|
super(clusterName, nodes, failures);
|
||||||
|
this.clusterUUID = clusterUUID;
|
||||||
this.timestamp = timestamp;
|
this.timestamp = timestamp;
|
||||||
nodesStats = new ClusterStatsNodes(nodes);
|
nodesStats = new ClusterStatsNodes(nodes);
|
||||||
indicesStats = new ClusterStatsIndices(nodes);
|
indicesStats = new ClusterStatsIndices(nodes);
|
||||||
|
@ -61,6 +64,10 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public String getClusterUUID() {
|
||||||
|
return this.clusterUUID;
|
||||||
|
}
|
||||||
|
|
||||||
public long getTimestamp() {
|
public long getTimestamp() {
|
||||||
return this.timestamp;
|
return this.timestamp;
|
||||||
}
|
}
|
||||||
|
@ -111,6 +118,7 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||||
|
builder.field("cluster_uuid", getClusterUUID());
|
||||||
builder.field("timestamp", getTimestamp());
|
builder.field("timestamp", getTimestamp());
|
||||||
if (status != null) {
|
if (status != null) {
|
||||||
builder.field("status", status.name().toLowerCase(Locale.ROOT));
|
builder.field("status", status.name().toLowerCase(Locale.ROOT));
|
||||||
|
|
|
@ -74,6 +74,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||||
List<ClusterStatsNodeResponse> responses, List<FailedNodeException> failures) {
|
List<ClusterStatsNodeResponse> responses, List<FailedNodeException> failures) {
|
||||||
return new ClusterStatsResponse(
|
return new ClusterStatsResponse(
|
||||||
System.currentTimeMillis(),
|
System.currentTimeMillis(),
|
||||||
|
clusterService.state().metaData().clusterUUID(),
|
||||||
clusterService.getClusterName(),
|
clusterService.getClusterName(),
|
||||||
responses,
|
responses,
|
||||||
failures);
|
failures);
|
||||||
|
|
|
@ -144,7 +144,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
switch (indexResult.getResultType()) {
|
switch (indexResult.getResultType()) {
|
||||||
case SUCCESS:
|
case SUCCESS:
|
||||||
IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(),
|
IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(),
|
||||||
indexResult.getSeqNo(), primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated());
|
indexResult.getSeqNo(), indexResult.getTerm(), indexResult.getVersion(), indexResult.isCreated());
|
||||||
return new BulkItemResultHolder(response, indexResult, bulkItemRequest);
|
return new BulkItemResultHolder(response, indexResult, bulkItemRequest);
|
||||||
case FAILURE:
|
case FAILURE:
|
||||||
return new BulkItemResultHolder(null, indexResult, bulkItemRequest);
|
return new BulkItemResultHolder(null, indexResult, bulkItemRequest);
|
||||||
|
@ -161,7 +161,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
switch (deleteResult.getResultType()) {
|
switch (deleteResult.getResultType()) {
|
||||||
case SUCCESS:
|
case SUCCESS:
|
||||||
DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(),
|
DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(),
|
||||||
deleteResult.getSeqNo(), primary.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound());
|
deleteResult.getSeqNo(), deleteResult.getTerm(), deleteResult.getVersion(), deleteResult.isFound());
|
||||||
return new BulkItemResultHolder(response, deleteResult, bulkItemRequest);
|
return new BulkItemResultHolder(response, deleteResult, bulkItemRequest);
|
||||||
case FAILURE:
|
case FAILURE:
|
||||||
return new BulkItemResultHolder(null, deleteResult, bulkItemRequest);
|
return new BulkItemResultHolder(null, deleteResult, bulkItemRequest);
|
||||||
|
@ -300,7 +300,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
assert result instanceof Engine.IndexResult : result.getClass();
|
assert result instanceof Engine.IndexResult : result.getClass();
|
||||||
final IndexRequest updateIndexRequest = translate.action();
|
final IndexRequest updateIndexRequest = translate.action();
|
||||||
final IndexResponse indexResponse = new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(),
|
final IndexResponse indexResponse = new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(),
|
||||||
result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated());
|
result.getSeqNo(), result.getTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated());
|
||||||
updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(),
|
updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(),
|
||||||
indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getPrimaryTerm(), indexResponse.getVersion(),
|
indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getPrimaryTerm(), indexResponse.getVersion(),
|
||||||
indexResponse.getResult());
|
indexResponse.getResult());
|
||||||
|
@ -320,7 +320,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
final DeleteRequest updateDeleteRequest = translate.action();
|
final DeleteRequest updateDeleteRequest = translate.action();
|
||||||
|
|
||||||
final DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), updateDeleteRequest.type(), updateDeleteRequest.id(),
|
final DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), updateDeleteRequest.type(), updateDeleteRequest.id(),
|
||||||
result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound());
|
result.getSeqNo(), result.getTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound());
|
||||||
|
|
||||||
updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(),
|
updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(),
|
||||||
deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), deleteResponse.getPrimaryTerm(),
|
deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), deleteResponse.getPrimaryTerm(),
|
||||||
|
@ -356,7 +356,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
} catch (Exception failure) {
|
} catch (Exception failure) {
|
||||||
// we may fail translating a update to index or delete operation
|
// we may fail translating a update to index or delete operation
|
||||||
// we use index result to communicate failure while translating update request
|
// we use index result to communicate failure while translating update request
|
||||||
final Engine.Result result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbers.UNASSIGNED_SEQ_NO);
|
final Engine.Result result = primary.getFailedIndexResult(failure, updateRequest.version());
|
||||||
return new BulkItemResultHolder(null, result, primaryItemRequest);
|
return new BulkItemResultHolder(null, result, primaryItemRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -559,7 +559,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
() ->
|
() ->
|
||||||
primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
|
primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
|
||||||
request.getAutoGeneratedTimestamp(), request.isRetry()),
|
request.getAutoGeneratedTimestamp(), request.isRetry()),
|
||||||
e -> new Engine.IndexResult(e, request.version()),
|
e -> primary.getFailedIndexResult(e, request.version()),
|
||||||
mappingUpdater);
|
mappingUpdater);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -567,7 +567,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||||
MappingUpdatePerformer mappingUpdater) throws Exception {
|
MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||||
return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(),
|
return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(),
|
||||||
() -> primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType()),
|
() -> primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType()),
|
||||||
e -> new Engine.DeleteResult(e, request.version()),
|
e -> primary.getFailedDeleteResult(e, request.version()),
|
||||||
mappingUpdater);
|
mappingUpdater);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -929,7 +929,7 @@ public abstract class TransportReplicationAction<
|
||||||
if (actualAllocationId.equals(allocationId) == false) {
|
if (actualAllocationId.equals(allocationId) == false) {
|
||||||
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
|
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
|
||||||
}
|
}
|
||||||
final long actualTerm = indexShard.getPrimaryTerm();
|
final long actualTerm = indexShard.getPendingPrimaryTerm();
|
||||||
if (actualTerm != primaryTerm) {
|
if (actualTerm != primaryTerm) {
|
||||||
throw new ShardNotFoundException(shardId, "expected aID [{}] with term [{}] but found [{}]", allocationId,
|
throw new ShardNotFoundException(shardId, "expected aID [{}] with term [{}] but found [{}]", allocationId,
|
||||||
primaryTerm, actualTerm);
|
primaryTerm, actualTerm);
|
||||||
|
@ -983,7 +983,7 @@ public abstract class TransportReplicationAction<
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean isRelocated() {
|
public boolean isRelocated() {
|
||||||
return indexShard.isPrimaryMode() == false;
|
return indexShard.isRelocatedPrimary();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -304,6 +304,7 @@ public abstract class Engine implements Closeable {
|
||||||
private final Operation.TYPE operationType;
|
private final Operation.TYPE operationType;
|
||||||
private final Result.Type resultType;
|
private final Result.Type resultType;
|
||||||
private final long version;
|
private final long version;
|
||||||
|
private final long term;
|
||||||
private final long seqNo;
|
private final long seqNo;
|
||||||
private final Exception failure;
|
private final Exception failure;
|
||||||
private final SetOnce<Boolean> freeze = new SetOnce<>();
|
private final SetOnce<Boolean> freeze = new SetOnce<>();
|
||||||
|
@ -311,19 +312,21 @@ public abstract class Engine implements Closeable {
|
||||||
private Translog.Location translogLocation;
|
private Translog.Location translogLocation;
|
||||||
private long took;
|
private long took;
|
||||||
|
|
||||||
protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) {
|
protected Result(Operation.TYPE operationType, Exception failure, long version, long term, long seqNo) {
|
||||||
this.operationType = operationType;
|
this.operationType = operationType;
|
||||||
this.failure = Objects.requireNonNull(failure);
|
this.failure = Objects.requireNonNull(failure);
|
||||||
this.version = version;
|
this.version = version;
|
||||||
|
this.term = term;
|
||||||
this.seqNo = seqNo;
|
this.seqNo = seqNo;
|
||||||
this.requiredMappingUpdate = null;
|
this.requiredMappingUpdate = null;
|
||||||
this.resultType = Type.FAILURE;
|
this.resultType = Type.FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Result(Operation.TYPE operationType, long version, long seqNo) {
|
protected Result(Operation.TYPE operationType, long version, long term, long seqNo) {
|
||||||
this.operationType = operationType;
|
this.operationType = operationType;
|
||||||
this.version = version;
|
this.version = version;
|
||||||
this.seqNo = seqNo;
|
this.seqNo = seqNo;
|
||||||
|
this.term = term;
|
||||||
this.failure = null;
|
this.failure = null;
|
||||||
this.requiredMappingUpdate = null;
|
this.requiredMappingUpdate = null;
|
||||||
this.resultType = Type.SUCCESS;
|
this.resultType = Type.SUCCESS;
|
||||||
|
@ -333,6 +336,7 @@ public abstract class Engine implements Closeable {
|
||||||
this.operationType = operationType;
|
this.operationType = operationType;
|
||||||
this.version = Versions.NOT_FOUND;
|
this.version = Versions.NOT_FOUND;
|
||||||
this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||||
|
this.term = 0L;
|
||||||
this.failure = null;
|
this.failure = null;
|
||||||
this.requiredMappingUpdate = requiredMappingUpdate;
|
this.requiredMappingUpdate = requiredMappingUpdate;
|
||||||
this.resultType = Type.MAPPING_UPDATE_REQUIRED;
|
this.resultType = Type.MAPPING_UPDATE_REQUIRED;
|
||||||
|
@ -357,6 +361,10 @@ public abstract class Engine implements Closeable {
|
||||||
return seqNo;
|
return seqNo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getTerm() {
|
||||||
|
return term;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the operation was aborted due to missing mappings, this method will return the mappings
|
* If the operation was aborted due to missing mappings, this method will return the mappings
|
||||||
* that are required to complete the operation.
|
* that are required to complete the operation.
|
||||||
|
@ -415,20 +423,20 @@ public abstract class Engine implements Closeable {
|
||||||
|
|
||||||
private final boolean created;
|
private final boolean created;
|
||||||
|
|
||||||
public IndexResult(long version, long seqNo, boolean created) {
|
public IndexResult(long version, long term, long seqNo, boolean created) {
|
||||||
super(Operation.TYPE.INDEX, version, seqNo);
|
super(Operation.TYPE.INDEX, version, term, seqNo);
|
||||||
this.created = created;
|
this.created = created;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* use in case of the index operation failed before getting to internal engine
|
* use in case of the index operation failed before getting to internal engine
|
||||||
**/
|
**/
|
||||||
public IndexResult(Exception failure, long version) {
|
public IndexResult(Exception failure, long version, long term) {
|
||||||
this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO);
|
this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexResult(Exception failure, long version, long seqNo) {
|
public IndexResult(Exception failure, long version, long term, long seqNo) {
|
||||||
super(Operation.TYPE.INDEX, failure, version, seqNo);
|
super(Operation.TYPE.INDEX, failure, version, term, seqNo);
|
||||||
this.created = false;
|
this.created = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -447,20 +455,20 @@ public abstract class Engine implements Closeable {
|
||||||
|
|
||||||
private final boolean found;
|
private final boolean found;
|
||||||
|
|
||||||
public DeleteResult(long version, long seqNo, boolean found) {
|
public DeleteResult(long version, long term, long seqNo, boolean found) {
|
||||||
super(Operation.TYPE.DELETE, version, seqNo);
|
super(Operation.TYPE.DELETE, version, term, seqNo);
|
||||||
this.found = found;
|
this.found = found;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* use in case of the delete operation failed before getting to internal engine
|
* use in case of the delete operation failed before getting to internal engine
|
||||||
**/
|
**/
|
||||||
public DeleteResult(Exception failure, long version) {
|
public DeleteResult(Exception failure, long version, long term) {
|
||||||
this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
|
this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DeleteResult(Exception failure, long version, long seqNo, boolean found) {
|
public DeleteResult(Exception failure, long version, long term, long seqNo, boolean found) {
|
||||||
super(Operation.TYPE.DELETE, failure, version, seqNo);
|
super(Operation.TYPE.DELETE, failure, version, term, seqNo);
|
||||||
this.found = found;
|
this.found = found;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -477,12 +485,12 @@ public abstract class Engine implements Closeable {
|
||||||
|
|
||||||
public static class NoOpResult extends Result {
|
public static class NoOpResult extends Result {
|
||||||
|
|
||||||
NoOpResult(long seqNo) {
|
NoOpResult(long term, long seqNo) {
|
||||||
super(Operation.TYPE.NO_OP, 0, seqNo);
|
super(Operation.TYPE.NO_OP, term, 0, seqNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
NoOpResult(long seqNo, Exception failure) {
|
NoOpResult(long term, long seqNo, Exception failure) {
|
||||||
super(Operation.TYPE.NO_OP, failure, 0, seqNo);
|
super(Operation.TYPE.NO_OP, failure, term, 0, seqNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -736,6 +736,10 @@ public class InternalEngine extends Engine {
|
||||||
return localCheckpointTracker.generateSeqNo();
|
return localCheckpointTracker.generateSeqNo();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private long getPrimaryTerm() {
|
||||||
|
return engineConfig.getPrimaryTermSupplier().getAsLong();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public IndexResult index(Index index) throws IOException {
|
public IndexResult index(Index index) throws IOException {
|
||||||
assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field();
|
assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field();
|
||||||
|
@ -788,7 +792,7 @@ public class InternalEngine extends Engine {
|
||||||
indexResult = indexIntoLucene(index, plan);
|
indexResult = indexIntoLucene(index, plan);
|
||||||
} else {
|
} else {
|
||||||
indexResult = new IndexResult(
|
indexResult = new IndexResult(
|
||||||
plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||||
}
|
}
|
||||||
if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||||
final Translog.Location location;
|
final Translog.Location location;
|
||||||
|
@ -900,7 +904,7 @@ public class InternalEngine extends Engine {
|
||||||
currentVersion, index.version(), currentNotFoundOrDeleted)) {
|
currentVersion, index.version(), currentNotFoundOrDeleted)) {
|
||||||
final VersionConflictEngineException e =
|
final VersionConflictEngineException e =
|
||||||
new VersionConflictEngineException(shardId, index, currentVersion, currentNotFoundOrDeleted);
|
new VersionConflictEngineException(shardId, index, currentVersion, currentNotFoundOrDeleted);
|
||||||
plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion);
|
plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm());
|
||||||
} else {
|
} else {
|
||||||
plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted,
|
plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted,
|
||||||
generateSeqNoForOperation(index),
|
generateSeqNoForOperation(index),
|
||||||
|
@ -930,7 +934,7 @@ public class InternalEngine extends Engine {
|
||||||
assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false);
|
assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false);
|
||||||
addDocs(index.docs(), indexWriter);
|
addDocs(index.docs(), indexWriter);
|
||||||
}
|
}
|
||||||
return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
return new IndexResult(plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
if (indexWriter.getTragicException() == null) {
|
if (indexWriter.getTragicException() == null) {
|
||||||
/* There is no tragic event recorded so this must be a document failure.
|
/* There is no tragic event recorded so this must be a document failure.
|
||||||
|
@ -946,7 +950,7 @@ public class InternalEngine extends Engine {
|
||||||
* we return a `MATCH_ANY` version to indicate no document was index. The value is
|
* we return a `MATCH_ANY` version to indicate no document was index. The value is
|
||||||
* not used anyway
|
* not used anyway
|
||||||
*/
|
*/
|
||||||
return new IndexResult(ex, Versions.MATCH_ANY, plan.seqNoForIndexing);
|
return new IndexResult(ex, Versions.MATCH_ANY, getPrimaryTerm(), plan.seqNoForIndexing);
|
||||||
} else {
|
} else {
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
|
@ -1019,8 +1023,8 @@ public class InternalEngine extends Engine {
|
||||||
}
|
}
|
||||||
|
|
||||||
static IndexingStrategy skipDueToVersionConflict(
|
static IndexingStrategy skipDueToVersionConflict(
|
||||||
VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion) {
|
VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long term) {
|
||||||
final IndexResult result = new IndexResult(e, currentVersion);
|
final IndexResult result = new IndexResult(e, currentVersion, term);
|
||||||
return new IndexingStrategy(
|
return new IndexingStrategy(
|
||||||
currentNotFoundOrDeleted, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result);
|
currentNotFoundOrDeleted, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result);
|
||||||
}
|
}
|
||||||
|
@ -1097,7 +1101,7 @@ public class InternalEngine extends Engine {
|
||||||
deleteResult = deleteInLucene(delete, plan);
|
deleteResult = deleteInLucene(delete, plan);
|
||||||
} else {
|
} else {
|
||||||
deleteResult = new DeleteResult(
|
deleteResult = new DeleteResult(
|
||||||
plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||||
}
|
}
|
||||||
if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||||
final Translog.Location location;
|
final Translog.Location location;
|
||||||
|
@ -1178,7 +1182,7 @@ public class InternalEngine extends Engine {
|
||||||
final DeletionStrategy plan;
|
final DeletionStrategy plan;
|
||||||
if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) {
|
if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) {
|
||||||
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted);
|
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted);
|
||||||
plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, currentlyDeleted);
|
plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted);
|
||||||
} else {
|
} else {
|
||||||
plan = DeletionStrategy.processNormally(
|
plan = DeletionStrategy.processNormally(
|
||||||
currentlyDeleted,
|
currentlyDeleted,
|
||||||
|
@ -1201,12 +1205,12 @@ public class InternalEngine extends Engine {
|
||||||
new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(),
|
new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(),
|
||||||
engineConfig.getThreadPool().relativeTimeInMillis()));
|
engineConfig.getThreadPool().relativeTimeInMillis()));
|
||||||
return new DeleteResult(
|
return new DeleteResult(
|
||||||
plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||||
} catch (Exception ex) {
|
} catch (Exception ex) {
|
||||||
if (indexWriter.getTragicException() == null) {
|
if (indexWriter.getTragicException() == null) {
|
||||||
// there is no tragic event and such it must be a document level failure
|
// there is no tragic event and such it must be a document level failure
|
||||||
return new DeleteResult(
|
return new DeleteResult(
|
||||||
ex, plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
ex, plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||||
} else {
|
} else {
|
||||||
throw ex;
|
throw ex;
|
||||||
}
|
}
|
||||||
|
@ -1237,9 +1241,9 @@ public class InternalEngine extends Engine {
|
||||||
}
|
}
|
||||||
|
|
||||||
static DeletionStrategy skipDueToVersionConflict(
|
static DeletionStrategy skipDueToVersionConflict(
|
||||||
VersionConflictEngineException e, long currentVersion, boolean currentlyDeleted) {
|
VersionConflictEngineException e, long currentVersion, long term, boolean currentlyDeleted) {
|
||||||
final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||||
final DeleteResult deleteResult = new DeleteResult(e, currentVersion, unassignedSeqNo, currentlyDeleted == false);
|
final DeleteResult deleteResult = new DeleteResult(e, currentVersion, term, unassignedSeqNo, currentlyDeleted == false);
|
||||||
return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult);
|
return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1268,7 +1272,7 @@ public class InternalEngine extends Engine {
|
||||||
try (ReleasableLock ignored = readLock.acquire()) {
|
try (ReleasableLock ignored = readLock.acquire()) {
|
||||||
noOpResult = innerNoOp(noOp);
|
noOpResult = innerNoOp(noOp);
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
noOpResult = new NoOpResult(noOp.seqNo(), e);
|
noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo(), e);
|
||||||
}
|
}
|
||||||
return noOpResult;
|
return noOpResult;
|
||||||
}
|
}
|
||||||
|
@ -1278,7 +1282,7 @@ public class InternalEngine extends Engine {
|
||||||
assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED;
|
assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED;
|
||||||
final long seqNo = noOp.seqNo();
|
final long seqNo = noOp.seqNo();
|
||||||
try {
|
try {
|
||||||
final NoOpResult noOpResult = new NoOpResult(noOp.seqNo());
|
final NoOpResult noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo());
|
||||||
if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||||
final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason()));
|
final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason()));
|
||||||
noOpResult.setTranslogLocation(location);
|
noOpResult.setTranslogLocation(location);
|
||||||
|
|
|
@ -85,6 +85,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
||||||
* computation from that point on.
|
* computation from that point on.
|
||||||
*/
|
*/
|
||||||
volatile boolean primaryMode;
|
volatile boolean primaryMode;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Boolean flag that indicates if a relocation handoff is in progress. A handoff is started by calling {@link #startRelocationHandoff}
|
* Boolean flag that indicates if a relocation handoff is in progress. A handoff is started by calling {@link #startRelocationHandoff}
|
||||||
* and is finished by either calling {@link #completeRelocationHandoff} or {@link #abortRelocationHandoff}, depending on whether the
|
* and is finished by either calling {@link #completeRelocationHandoff} or {@link #abortRelocationHandoff}, depending on whether the
|
||||||
|
@ -102,6 +103,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
||||||
*/
|
*/
|
||||||
boolean handoffInProgress;
|
boolean handoffInProgress;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Boolean flag that indicates whether a relocation handoff completed (see {@link #completeRelocationHandoff}).
|
||||||
|
*/
|
||||||
|
volatile boolean relocated;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The global checkpoint tracker relies on the property that cluster state updates are applied in-order. After transferring a primary
|
* The global checkpoint tracker relies on the property that cluster state updates are applied in-order. After transferring a primary
|
||||||
* context from the primary relocation source to the target and initializing the target, it is possible for the target to apply a
|
* context from the primary relocation source to the target and initializing the target, it is possible for the target to apply a
|
||||||
|
@ -260,6 +266,13 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
||||||
return primaryMode;
|
return primaryMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns whether the replication tracker has relocated away to another shard copy.
|
||||||
|
*/
|
||||||
|
public boolean isRelocated() {
|
||||||
|
return relocated;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication
|
* Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication
|
||||||
* as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however.
|
* as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however.
|
||||||
|
@ -287,6 +300,9 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
||||||
// relocation handoff can only occur in primary mode
|
// relocation handoff can only occur in primary mode
|
||||||
assert !handoffInProgress || primaryMode;
|
assert !handoffInProgress || primaryMode;
|
||||||
|
|
||||||
|
// a relocated copy is not in primary mode
|
||||||
|
assert !relocated || !primaryMode;
|
||||||
|
|
||||||
// the current shard is marked as in-sync when the global checkpoint tracker operates in primary mode
|
// the current shard is marked as in-sync when the global checkpoint tracker operates in primary mode
|
||||||
assert !primaryMode || checkpoints.get(shardAllocationId).inSync;
|
assert !primaryMode || checkpoints.get(shardAllocationId).inSync;
|
||||||
|
|
||||||
|
@ -766,8 +782,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
||||||
assert invariant();
|
assert invariant();
|
||||||
assert primaryMode;
|
assert primaryMode;
|
||||||
assert handoffInProgress;
|
assert handoffInProgress;
|
||||||
|
assert relocated == false;
|
||||||
primaryMode = false;
|
primaryMode = false;
|
||||||
handoffInProgress = false;
|
handoffInProgress = false;
|
||||||
|
relocated = true;
|
||||||
// forget all checkpoint information except for global checkpoint of current shard
|
// forget all checkpoint information except for global checkpoint of current shard
|
||||||
checkpoints.entrySet().stream().forEach(e -> {
|
checkpoints.entrySet().stream().forEach(e -> {
|
||||||
final CheckpointState cps = e.getValue();
|
final CheckpointState cps = e.getValue();
|
||||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.cluster.routing.RecoverySource;
|
||||||
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
|
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
|
||||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||||
import org.elasticsearch.common.Booleans;
|
import org.elasticsearch.common.Booleans;
|
||||||
|
import org.elasticsearch.common.CheckedRunnable;
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||||
|
@ -192,7 +193,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
|
|
||||||
protected volatile ShardRouting shardRouting;
|
protected volatile ShardRouting shardRouting;
|
||||||
protected volatile IndexShardState state;
|
protected volatile IndexShardState state;
|
||||||
protected volatile long primaryTerm;
|
protected volatile long pendingPrimaryTerm; // see JavaDocs for getPendingPrimaryTerm
|
||||||
|
protected volatile long operationPrimaryTerm;
|
||||||
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
|
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
|
||||||
final EngineFactory engineFactory;
|
final EngineFactory engineFactory;
|
||||||
|
|
||||||
|
@ -315,7 +317,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool);
|
indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool);
|
||||||
searcherWrapper = indexSearcherWrapper;
|
searcherWrapper = indexSearcherWrapper;
|
||||||
primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
|
pendingPrimaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
|
||||||
|
operationPrimaryTerm = pendingPrimaryTerm;
|
||||||
refreshListeners = buildRefreshListeners();
|
refreshListeners = buildRefreshListeners();
|
||||||
lastSearcherAccess.set(threadPool.relativeTimeInMillis());
|
lastSearcherAccess.set(threadPool.relativeTimeInMillis());
|
||||||
persistMetadata(path, indexSettings, shardRouting, null, logger);
|
persistMetadata(path, indexSettings, shardRouting, null, logger);
|
||||||
|
@ -365,10 +368,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the primary term the index shard is on. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
|
* USE THIS METHOD WITH CARE!
|
||||||
|
* Returns the primary term the index shard is supposed to be on. In case of primary promotion or when a replica learns about
|
||||||
|
* a new term due to a new primary, the term that's exposed here will not be the term that the shard internally uses to assign
|
||||||
|
* to operations. The shard will auto-correct its internal operation term, but this might take time.
|
||||||
|
* See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
|
||||||
*/
|
*/
|
||||||
public long getPrimaryTerm() {
|
public long getPendingPrimaryTerm() {
|
||||||
return this.primaryTerm;
|
return this.pendingPrimaryTerm;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -418,7 +425,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
"a primary relocation is completed by the master, but primary mode is not active " + currentRouting;
|
"a primary relocation is completed by the master, but primary mode is not active " + currentRouting;
|
||||||
|
|
||||||
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
|
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
|
||||||
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isPrimaryMode() == false &&
|
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() &&
|
||||||
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
|
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
|
||||||
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery
|
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery
|
||||||
// failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two
|
// failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two
|
||||||
|
@ -431,7 +438,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
final CountDownLatch shardStateUpdated = new CountDownLatch(1);
|
final CountDownLatch shardStateUpdated = new CountDownLatch(1);
|
||||||
|
|
||||||
if (newRouting.primary()) {
|
if (newRouting.primary()) {
|
||||||
if (newPrimaryTerm == primaryTerm) {
|
if (newPrimaryTerm == pendingPrimaryTerm) {
|
||||||
if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) {
|
if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) {
|
||||||
// the master started a recovering primary, activate primary mode.
|
// the master started a recovering primary, activate primary mode.
|
||||||
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
||||||
|
@ -454,10 +461,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
assert newRouting.initializing() == false :
|
assert newRouting.initializing() == false :
|
||||||
"a started primary shard should never update its term; "
|
"a started primary shard should never update its term; "
|
||||||
+ "shard " + newRouting + ", "
|
+ "shard " + newRouting + ", "
|
||||||
+ "current term [" + primaryTerm + "], "
|
+ "current term [" + pendingPrimaryTerm + "], "
|
||||||
+ "new term [" + newPrimaryTerm + "]";
|
+ "new term [" + newPrimaryTerm + "]";
|
||||||
assert newPrimaryTerm > primaryTerm :
|
assert newPrimaryTerm > pendingPrimaryTerm :
|
||||||
"primary terms can only go up; current term [" + primaryTerm + "], new term [" + newPrimaryTerm + "]";
|
"primary terms can only go up; current term [" + pendingPrimaryTerm + "], new term [" + newPrimaryTerm + "]";
|
||||||
/*
|
/*
|
||||||
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
|
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
|
||||||
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is
|
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is
|
||||||
|
@ -468,12 +475,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
if (resyncStarted == false) {
|
if (resyncStarted == false) {
|
||||||
throw new IllegalStateException("cannot start resync while it's already in progress");
|
throw new IllegalStateException("cannot start resync while it's already in progress");
|
||||||
}
|
}
|
||||||
indexShardOperationPermits.asyncBlockOperations(
|
bumpPrimaryTerm(newPrimaryTerm,
|
||||||
30,
|
|
||||||
TimeUnit.MINUTES,
|
|
||||||
() -> {
|
() -> {
|
||||||
shardStateUpdated.await();
|
shardStateUpdated.await();
|
||||||
|
assert pendingPrimaryTerm == newPrimaryTerm :
|
||||||
|
"shard term changed on primary. expected [" + newPrimaryTerm + "] but was [" + pendingPrimaryTerm + "]" +
|
||||||
|
", current routing: " + currentRouting + ", new routing: " + newRouting;
|
||||||
|
assert operationPrimaryTerm == newPrimaryTerm;
|
||||||
try {
|
try {
|
||||||
|
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
||||||
/*
|
/*
|
||||||
* If this shard was serving as a replica shard when another shard was promoted to primary then the state of
|
* If this shard was serving as a replica shard when another shard was promoted to primary then the state of
|
||||||
* its local checkpoint tracker was reset during the primary term transition. In particular, the local
|
* its local checkpoint tracker was reset during the primary term transition. In particular, the local
|
||||||
|
@ -517,10 +527,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
} catch (final AlreadyClosedException e) {
|
} catch (final AlreadyClosedException e) {
|
||||||
// okay, the index was deleted
|
// okay, the index was deleted
|
||||||
}
|
}
|
||||||
},
|
});
|
||||||
e -> failShard("exception during primary term transition", e));
|
|
||||||
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
|
||||||
primaryTerm = newPrimaryTerm;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// set this last, once we finished updating all internal state.
|
// set this last, once we finished updating all internal state.
|
||||||
|
@ -528,8 +535,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
|
|
||||||
assert this.shardRouting.primary() == false ||
|
assert this.shardRouting.primary() == false ||
|
||||||
this.shardRouting.started() == false || // note that we use started and not active to avoid relocating shards
|
this.shardRouting.started() == false || // note that we use started and not active to avoid relocating shards
|
||||||
|
this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning
|
||||||
this.replicationTracker.isPrimaryMode()
|
this.replicationTracker.isPrimaryMode()
|
||||||
: "an started primary must be in primary mode " + this.shardRouting;
|
: "a started primary with non-pending operation term must be in primary mode " + this.shardRouting;
|
||||||
shardStateUpdated.countDown();
|
shardStateUpdated.countDown();
|
||||||
}
|
}
|
||||||
if (currentRouting != null && currentRouting.active() == false && newRouting.active()) {
|
if (currentRouting != null && currentRouting.active() == false && newRouting.active()) {
|
||||||
|
@ -590,7 +598,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
consumer.accept(primaryContext);
|
consumer.accept(primaryContext);
|
||||||
synchronized (mutex) {
|
synchronized (mutex) {
|
||||||
verifyRelocatingState();
|
verifyRelocatingState();
|
||||||
replicationTracker.completeRelocationHandoff(); // make changes to primaryMode flag only under mutex
|
replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under mutex
|
||||||
}
|
}
|
||||||
} catch (final Exception e) {
|
} catch (final Exception e) {
|
||||||
try {
|
try {
|
||||||
|
@ -655,21 +663,22 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse,
|
public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse,
|
||||||
long autoGeneratedTimestamp, boolean isRetry) throws IOException {
|
long autoGeneratedTimestamp, boolean isRetry) throws IOException {
|
||||||
assert versionType.validateVersionForWrites(version);
|
assert versionType.validateVersionForWrites(version);
|
||||||
return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp,
|
return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, operationPrimaryTerm, version, versionType, autoGeneratedTimestamp,
|
||||||
isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse);
|
isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long version, long autoGeneratedTimeStamp,
|
public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long version, long autoGeneratedTimeStamp,
|
||||||
boolean isRetry, SourceToParse sourceToParse)
|
boolean isRetry, SourceToParse sourceToParse)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return applyIndexOperation(seqNo, primaryTerm, version, null, autoGeneratedTimeStamp, isRetry,
|
return applyIndexOperation(seqNo, operationPrimaryTerm, version, null, autoGeneratedTimeStamp, isRetry,
|
||||||
Engine.Operation.Origin.REPLICA, sourceToParse);
|
Engine.Operation.Origin.REPLICA, sourceToParse);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, @Nullable VersionType versionType,
|
private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, @Nullable VersionType versionType,
|
||||||
long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin,
|
long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin,
|
||||||
SourceToParse sourceToParse) throws IOException {
|
SourceToParse sourceToParse) throws IOException {
|
||||||
assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
|
assert opPrimaryTerm <= this.operationPrimaryTerm: "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm
|
||||||
|
+ "]";
|
||||||
ensureWriteAllowed(origin);
|
ensureWriteAllowed(origin);
|
||||||
Engine.Index operation;
|
Engine.Index operation;
|
||||||
try {
|
try {
|
||||||
|
@ -686,7 +695,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
// can not raise an exception that may block any replication of previous operations to the
|
// can not raise an exception that may block any replication of previous operations to the
|
||||||
// replicas
|
// replicas
|
||||||
verifyNotClosed(e);
|
verifyNotClosed(e);
|
||||||
return new Engine.IndexResult(e, version, seqNo);
|
return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
return index(getEngine(), operation);
|
return index(getEngine(), operation);
|
||||||
|
@ -723,12 +732,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
public Engine.NoOpResult markSeqNoAsNoop(long seqNo, String reason) throws IOException {
|
public Engine.NoOpResult markSeqNoAsNoop(long seqNo, String reason) throws IOException {
|
||||||
return markSeqNoAsNoop(seqNo, primaryTerm, reason, Engine.Operation.Origin.REPLICA);
|
return markSeqNoAsNoop(seqNo, operationPrimaryTerm, reason, Engine.Operation.Origin.REPLICA);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Engine.NoOpResult markSeqNoAsNoop(long seqNo, long opPrimaryTerm, String reason,
|
private Engine.NoOpResult markSeqNoAsNoop(long seqNo, long opPrimaryTerm, String reason,
|
||||||
Engine.Operation.Origin origin) throws IOException {
|
Engine.Operation.Origin origin) throws IOException {
|
||||||
assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
|
assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm
|
||||||
|
+ "]";
|
||||||
long startTime = System.nanoTime();
|
long startTime = System.nanoTime();
|
||||||
ensureWriteAllowed(origin);
|
ensureWriteAllowed(origin);
|
||||||
final Engine.NoOp noOp = new Engine.NoOp(seqNo, opPrimaryTerm, origin, startTime, reason);
|
final Engine.NoOp noOp = new Engine.NoOp(seqNo, opPrimaryTerm, origin, startTime, reason);
|
||||||
|
@ -743,20 +753,29 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
return engine.noOp(noOp);
|
return engine.noOp(noOp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Engine.IndexResult getFailedIndexResult(Exception e, long version) {
|
||||||
|
return new Engine.IndexResult(e, version, operationPrimaryTerm);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Engine.DeleteResult getFailedDeleteResult(Exception e, long version) {
|
||||||
|
return new Engine.DeleteResult(e, version, operationPrimaryTerm);
|
||||||
|
}
|
||||||
|
|
||||||
public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType)
|
public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert versionType.validateVersionForWrites(version);
|
assert versionType.validateVersionForWrites(version);
|
||||||
return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType,
|
return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, operationPrimaryTerm, version, type, id, versionType,
|
||||||
Engine.Operation.Origin.PRIMARY);
|
Engine.Operation.Origin.PRIMARY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long version, String type, String id) throws IOException {
|
public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long version, String type, String id) throws IOException {
|
||||||
return applyDeleteOperation(seqNo, primaryTerm, version, type, id, null, Engine.Operation.Origin.REPLICA);
|
return applyDeleteOperation(seqNo, operationPrimaryTerm, version, type, id, null, Engine.Operation.Origin.REPLICA);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, long version, String type, String id,
|
private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, long version, String type, String id,
|
||||||
@Nullable VersionType versionType, Engine.Operation.Origin origin) throws IOException {
|
@Nullable VersionType versionType, Engine.Operation.Origin origin) throws IOException {
|
||||||
assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
|
assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm
|
||||||
|
+ "]";
|
||||||
ensureWriteAllowed(origin);
|
ensureWriteAllowed(origin);
|
||||||
// When there is a single type, the unique identifier is only composed of the _id,
|
// When there is a single type, the unique identifier is only composed of the _id,
|
||||||
// so there is no way to differenciate foo#1 from bar#1. This is especially an issue
|
// so there is no way to differenciate foo#1 from bar#1. This is especially an issue
|
||||||
|
@ -772,7 +791,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
return new Engine.DeleteResult(update);
|
return new Engine.DeleteResult(update);
|
||||||
}
|
}
|
||||||
} catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
|
} catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
|
||||||
return new Engine.DeleteResult(e, version, seqNo, false);
|
return new Engine.DeleteResult(e, version, operationPrimaryTerm, seqNo, false);
|
||||||
}
|
}
|
||||||
final Term uid = extractUidForDelete(type, id);
|
final Term uid = extractUidForDelete(type, id);
|
||||||
final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version,
|
final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version,
|
||||||
|
@ -1209,7 +1228,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) {
|
public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) {
|
||||||
getEngine().trimOperationsFromTranslog(primaryTerm, aboveSeqNo);
|
getEngine().trimOperationsFromTranslog(operationPrimaryTerm, aboveSeqNo);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException {
|
public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException {
|
||||||
|
@ -2082,10 +2101,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns whether the shard is in primary mode, i.e., in charge of replicating changes (see {@link ReplicationTracker}).
|
* Returns whether the shard is a relocated primary, i.e. not in charge anymore of replicating changes (see {@link ReplicationTracker}).
|
||||||
*/
|
*/
|
||||||
public boolean isPrimaryMode() {
|
public boolean isRelocatedPrimary() {
|
||||||
return replicationTracker.isPrimaryMode();
|
assert shardRouting.primary() : "only call isRelocatedPrimary on primary shard";
|
||||||
|
return replicationTracker.isRelocated();
|
||||||
}
|
}
|
||||||
|
|
||||||
class ShardEventListener implements Engine.EventListener {
|
class ShardEventListener implements Engine.EventListener {
|
||||||
|
@ -2175,7 +2195,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
|
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
|
||||||
Collections.singletonList(refreshListeners),
|
Collections.singletonList(refreshListeners),
|
||||||
Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
|
Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
|
||||||
indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, this::getPrimaryTerm);
|
indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2194,7 +2214,25 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo);
|
indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
private final Object primaryTermMutex = new Object();
|
private <E extends Exception> void bumpPrimaryTerm(long newPrimaryTerm, final CheckedRunnable<E> onBlocked) {
|
||||||
|
assert Thread.holdsLock(mutex);
|
||||||
|
assert newPrimaryTerm > pendingPrimaryTerm;
|
||||||
|
assert operationPrimaryTerm <= pendingPrimaryTerm;
|
||||||
|
final CountDownLatch termUpdated = new CountDownLatch(1);
|
||||||
|
indexShardOperationPermits.asyncBlockOperations(30, TimeUnit.MINUTES, () -> {
|
||||||
|
assert operationPrimaryTerm <= pendingPrimaryTerm;
|
||||||
|
termUpdated.await();
|
||||||
|
// indexShardOperationPermits doesn't guarantee that async submissions are executed
|
||||||
|
// in the order submitted. We need to guard against another term bump
|
||||||
|
if (operationPrimaryTerm < newPrimaryTerm) {
|
||||||
|
operationPrimaryTerm = newPrimaryTerm;
|
||||||
|
onBlocked.run();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
e -> failShard("exception during primary term transition", e));
|
||||||
|
pendingPrimaryTerm = newPrimaryTerm;
|
||||||
|
termUpdated.countDown();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Acquire a replica operation permit whenever the shard is ready for indexing (see
|
* Acquire a replica operation permit whenever the shard is ready for indexing (see
|
||||||
|
@ -2203,7 +2241,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
* {@link IllegalStateException}. If permit acquisition is delayed, the listener will be invoked on the executor with the specified
|
* {@link IllegalStateException}. If permit acquisition is delayed, the listener will be invoked on the executor with the specified
|
||||||
* name.
|
* name.
|
||||||
*
|
*
|
||||||
* @param operationPrimaryTerm the operation primary term
|
* @param opPrimaryTerm the operation primary term
|
||||||
* @param globalCheckpoint the global checkpoint associated with the request
|
* @param globalCheckpoint the global checkpoint associated with the request
|
||||||
* @param onPermitAcquired the listener for permit acquisition
|
* @param onPermitAcquired the listener for permit acquisition
|
||||||
* @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed
|
* @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed
|
||||||
|
@ -2211,15 +2249,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
* the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object
|
* the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object
|
||||||
* isn't used
|
* isn't used
|
||||||
*/
|
*/
|
||||||
public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final long globalCheckpoint,
|
public void acquireReplicaOperationPermit(final long opPrimaryTerm, final long globalCheckpoint,
|
||||||
final ActionListener<Releasable> onPermitAcquired, final String executorOnDelay,
|
final ActionListener<Releasable> onPermitAcquired, final String executorOnDelay,
|
||||||
final Object debugInfo) {
|
final Object debugInfo) {
|
||||||
verifyNotClosed();
|
verifyNotClosed();
|
||||||
verifyReplicationTarget();
|
verifyReplicationTarget();
|
||||||
final boolean globalCheckpointUpdated;
|
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||||
if (operationPrimaryTerm > primaryTerm) {
|
synchronized (mutex) {
|
||||||
synchronized (primaryTermMutex) {
|
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||||
if (operationPrimaryTerm > primaryTerm) {
|
|
||||||
IndexShardState shardState = state();
|
IndexShardState shardState = state();
|
||||||
// only roll translog and update primary term if shard has made it past recovery
|
// only roll translog and update primary term if shard has made it past recovery
|
||||||
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
|
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
|
||||||
|
@ -2229,11 +2266,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
shardState != IndexShardState.STARTED) {
|
shardState != IndexShardState.STARTED) {
|
||||||
throw new IndexShardNotStartedException(shardId, shardState);
|
throw new IndexShardNotStartedException(shardId, shardState);
|
||||||
}
|
}
|
||||||
try {
|
|
||||||
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
|
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||||
assert operationPrimaryTerm > primaryTerm :
|
bumpPrimaryTerm(opPrimaryTerm, () -> {
|
||||||
"shard term already update. op term [" + operationPrimaryTerm + "], shardTerm [" + primaryTerm + "]";
|
|
||||||
primaryTerm = operationPrimaryTerm;
|
|
||||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
|
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
|
||||||
final long currentGlobalCheckpoint = getGlobalCheckpoint();
|
final long currentGlobalCheckpoint = getGlobalCheckpoint();
|
||||||
final long localCheckpoint;
|
final long localCheckpoint;
|
||||||
|
@ -2244,42 +2279,33 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
}
|
}
|
||||||
logger.trace(
|
logger.trace(
|
||||||
"detected new primary with primary term [{}], resetting local checkpoint from [{}] to [{}]",
|
"detected new primary with primary term [{}], resetting local checkpoint from [{}] to [{}]",
|
||||||
operationPrimaryTerm,
|
opPrimaryTerm,
|
||||||
getLocalCheckpoint(),
|
getLocalCheckpoint(),
|
||||||
localCheckpoint);
|
localCheckpoint);
|
||||||
getEngine().resetLocalCheckpoint(localCheckpoint);
|
getEngine().resetLocalCheckpoint(localCheckpoint);
|
||||||
getEngine().rollTranslogGeneration();
|
getEngine().rollTranslogGeneration();
|
||||||
});
|
});
|
||||||
globalCheckpointUpdated = true;
|
|
||||||
} catch (final Exception e) {
|
|
||||||
onPermitAcquired.onFailure(e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
globalCheckpointUpdated = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
}
|
||||||
globalCheckpointUpdated = false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assert operationPrimaryTerm <= primaryTerm
|
assert opPrimaryTerm <= pendingPrimaryTerm
|
||||||
: "operation primary term [" + operationPrimaryTerm + "] should be at most [" + primaryTerm + "]";
|
: "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]";
|
||||||
indexShardOperationPermits.acquire(
|
indexShardOperationPermits.acquire(
|
||||||
new ActionListener<Releasable>() {
|
new ActionListener<Releasable>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(final Releasable releasable) {
|
public void onResponse(final Releasable releasable) {
|
||||||
if (operationPrimaryTerm < primaryTerm) {
|
if (opPrimaryTerm < operationPrimaryTerm) {
|
||||||
releasable.close();
|
releasable.close();
|
||||||
final String message = String.format(
|
final String message = String.format(
|
||||||
Locale.ROOT,
|
Locale.ROOT,
|
||||||
"%s operation primary term [%d] is too old (current [%d])",
|
"%s operation primary term [%d] is too old (current [%d])",
|
||||||
shardId,
|
shardId,
|
||||||
operationPrimaryTerm,
|
opPrimaryTerm,
|
||||||
primaryTerm);
|
operationPrimaryTerm);
|
||||||
onPermitAcquired.onFailure(new IllegalStateException(message));
|
onPermitAcquired.onFailure(new IllegalStateException(message));
|
||||||
} else {
|
} else {
|
||||||
if (globalCheckpointUpdated == false) {
|
|
||||||
try {
|
try {
|
||||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "operation");
|
updateGlobalCheckpointOnReplica(globalCheckpoint, "operation");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
@ -2287,7 +2313,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||||
onPermitAcquired.onFailure(e);
|
onPermitAcquired.onFailure(e);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
onPermitAcquired.onResponse(releasable);
|
onPermitAcquired.onResponse(releasable);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,7 +19,6 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.shard;
|
package org.elasticsearch.index.shard;
|
||||||
|
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
|
||||||
import org.elasticsearch.Assertions;
|
import org.elasticsearch.Assertions;
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
|
@ -29,10 +28,12 @@ import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.lease.Releasable;
|
import org.elasticsearch.common.lease.Releasable;
|
||||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
|
||||||
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
@ -59,7 +60,7 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
final Semaphore semaphore = new Semaphore(TOTAL_PERMITS, true); // fair to ensure a blocking thread is not starved
|
final Semaphore semaphore = new Semaphore(TOTAL_PERMITS, true); // fair to ensure a blocking thread is not starved
|
||||||
private final List<DelayedOperation> delayedOperations = new ArrayList<>(); // operations that are delayed
|
private final List<DelayedOperation> delayedOperations = new ArrayList<>(); // operations that are delayed
|
||||||
private volatile boolean closed;
|
private volatile boolean closed;
|
||||||
private boolean delayed; // does not need to be volatile as all accesses are done under a lock on this
|
private int queuedBlockOperations; // does not need to be volatile as all accesses are done under a lock on this
|
||||||
|
|
||||||
// only valid when assertions are enabled. Key is AtomicBoolean associated with each permit to ensure close once semantics.
|
// only valid when assertions are enabled. Key is AtomicBoolean associated with each permit to ensure close once semantics.
|
||||||
// Value is a tuple, with a some debug information supplied by the caller and a stack trace of the acquiring thread
|
// Value is a tuple, with a some debug information supplied by the caller and a stack trace of the acquiring thread
|
||||||
|
@ -102,9 +103,6 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
final long timeout,
|
final long timeout,
|
||||||
final TimeUnit timeUnit,
|
final TimeUnit timeUnit,
|
||||||
final CheckedRunnable<E> onBlocked) throws InterruptedException, TimeoutException, E {
|
final CheckedRunnable<E> onBlocked) throws InterruptedException, TimeoutException, E {
|
||||||
if (closed) {
|
|
||||||
throw new IndexShardClosedException(shardId);
|
|
||||||
}
|
|
||||||
delayOperations();
|
delayOperations();
|
||||||
try {
|
try {
|
||||||
doBlockOperations(timeout, timeUnit, onBlocked);
|
doBlockOperations(timeout, timeUnit, onBlocked);
|
||||||
|
@ -147,13 +145,12 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void delayOperations() {
|
private void delayOperations() {
|
||||||
synchronized (this) {
|
if (closed) {
|
||||||
if (delayed) {
|
throw new IndexShardClosedException(shardId);
|
||||||
throw new IllegalStateException("operations are already delayed");
|
|
||||||
} else {
|
|
||||||
assert delayedOperations.isEmpty();
|
|
||||||
delayed = true;
|
|
||||||
}
|
}
|
||||||
|
synchronized (this) {
|
||||||
|
assert queuedBlockOperations > 0 || delayedOperations.isEmpty();
|
||||||
|
queuedBlockOperations++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +161,7 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
if (Assertions.ENABLED) {
|
if (Assertions.ENABLED) {
|
||||||
// since delayed is not volatile, we have to synchronize even here for visibility
|
// since delayed is not volatile, we have to synchronize even here for visibility
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
assert delayed;
|
assert queuedBlockOperations > 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (semaphore.tryAcquire(TOTAL_PERMITS, timeout, timeUnit)) {
|
if (semaphore.tryAcquire(TOTAL_PERMITS, timeout, timeUnit)) {
|
||||||
|
@ -182,10 +179,14 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
private void releaseDelayedOperations() {
|
private void releaseDelayedOperations() {
|
||||||
final List<DelayedOperation> queuedActions;
|
final List<DelayedOperation> queuedActions;
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
assert delayed;
|
assert queuedBlockOperations > 0;
|
||||||
|
queuedBlockOperations--;
|
||||||
|
if (queuedBlockOperations == 0) {
|
||||||
queuedActions = new ArrayList<>(delayedOperations);
|
queuedActions = new ArrayList<>(delayedOperations);
|
||||||
delayedOperations.clear();
|
delayedOperations.clear();
|
||||||
delayed = false;
|
} else {
|
||||||
|
queuedActions = Collections.emptyList();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!queuedActions.isEmpty()) {
|
if (!queuedActions.isEmpty()) {
|
||||||
/*
|
/*
|
||||||
|
@ -242,7 +243,7 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
final Releasable releasable;
|
final Releasable releasable;
|
||||||
try {
|
try {
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
if (delayed) {
|
if (queuedBlockOperations > 0) {
|
||||||
final Supplier<StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(false);
|
final Supplier<StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(false);
|
||||||
final ActionListener<Releasable> wrappedListener;
|
final ActionListener<Releasable> wrappedListener;
|
||||||
if (executorOnDelay != null) {
|
if (executorOnDelay != null) {
|
||||||
|
@ -308,6 +309,11 @@ final class IndexShardOperationPermits implements Closeable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
synchronized boolean isBlocked() {
|
||||||
|
return queuedBlockOperations > 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied
|
* @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied
|
||||||
* when the permit was acquired plus a stack traces that was captured when the permit was request.
|
* when the permit was acquired plus a stack traces that was captured when the permit was request.
|
||||||
|
|
|
@ -136,7 +136,7 @@ public class PrimaryReplicaSyncer extends AbstractComponent {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
resync(shardId, indexShard.routingEntry().allocationId().getId(), indexShard.getPrimaryTerm(), wrappedSnapshot,
|
resync(shardId, indexShard.routingEntry().allocationId().getId(), indexShard.getPendingPrimaryTerm(), wrappedSnapshot,
|
||||||
startingSeqNo, maxSeqNo, resyncListener);
|
startingSeqNo, maxSeqNo, resyncListener);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -394,7 +394,7 @@ final class StoreRecovery {
|
||||||
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
|
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
|
||||||
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
|
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
|
||||||
final String translogUUID = Translog.createEmptyTranslog(
|
final String translogUUID = Translog.createEmptyTranslog(
|
||||||
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm());
|
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPendingPrimaryTerm());
|
||||||
store.associateIndexWithNewTranslog(translogUUID);
|
store.associateIndexWithNewTranslog(translogUUID);
|
||||||
} else if (indexShouldExists) {
|
} else if (indexShouldExists) {
|
||||||
// since we recover from local, just fill the files and size
|
// since we recover from local, just fill the files and size
|
||||||
|
@ -409,11 +409,12 @@ final class StoreRecovery {
|
||||||
} else {
|
} else {
|
||||||
store.createEmpty();
|
store.createEmpty();
|
||||||
final String translogUUID = Translog.createEmptyTranslog(
|
final String translogUUID = Translog.createEmptyTranslog(
|
||||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPrimaryTerm());
|
indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId,
|
||||||
|
indexShard.getPendingPrimaryTerm());
|
||||||
store.associateIndexWithNewTranslog(translogUUID);
|
store.associateIndexWithNewTranslog(translogUUID);
|
||||||
}
|
}
|
||||||
indexShard.openEngineAndRecoverFromTranslog();
|
indexShard.openEngineAndRecoverFromTranslog();
|
||||||
indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm());
|
indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm());
|
||||||
indexShard.finalizeRecovery();
|
indexShard.finalizeRecovery();
|
||||||
indexShard.postRecovery("post recovery from shard_store");
|
indexShard.postRecovery("post recovery from shard_store");
|
||||||
} catch (EngineException | IOException e) {
|
} catch (EngineException | IOException e) {
|
||||||
|
@ -458,11 +459,11 @@ final class StoreRecovery {
|
||||||
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
|
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
|
||||||
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
|
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
|
||||||
final String translogUUID = Translog.createEmptyTranslog(
|
final String translogUUID = Translog.createEmptyTranslog(
|
||||||
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm());
|
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPendingPrimaryTerm());
|
||||||
store.associateIndexWithNewTranslog(translogUUID);
|
store.associateIndexWithNewTranslog(translogUUID);
|
||||||
assert indexShard.shardRouting.primary() : "only primary shards can recover from store";
|
assert indexShard.shardRouting.primary() : "only primary shards can recover from store";
|
||||||
indexShard.openEngineAndRecoverFromTranslog();
|
indexShard.openEngineAndRecoverFromTranslog();
|
||||||
indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm());
|
indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm());
|
||||||
indexShard.finalizeRecovery();
|
indexShard.finalizeRecovery();
|
||||||
indexShard.postRecovery("restore done");
|
indexShard.postRecovery("restore done");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|
|
@ -491,7 +491,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||||
try (ReleasableLock ignored = readLock.acquire()) {
|
try (ReleasableLock ignored = readLock.acquire()) {
|
||||||
ensureOpen();
|
ensureOpen();
|
||||||
if (operation.primaryTerm() > current.getPrimaryTerm()) {
|
if (operation.primaryTerm() > current.getPrimaryTerm()) {
|
||||||
throw new IllegalArgumentException("Operation term is newer than the current term;"
|
assert false :
|
||||||
|
"Operation term is newer than the current term; "
|
||||||
|
+ "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]";
|
||||||
|
throw new IllegalArgumentException("Operation term is newer than the current term; "
|
||||||
+ "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]");
|
+ "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]");
|
||||||
}
|
}
|
||||||
return current.add(bytes, operation.seqNo());
|
return current.add(bytes, operation.seqNo());
|
||||||
|
|
|
@ -250,7 +250,7 @@ public class RecoverySourceHandler {
|
||||||
try (Releasable ignored = FutureUtils.get(permit)) {
|
try (Releasable ignored = FutureUtils.get(permit)) {
|
||||||
// check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent
|
// check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent
|
||||||
// races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated()
|
// races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated()
|
||||||
if (primary.isPrimaryMode() == false) {
|
if (primary.isRelocatedPrimary()) {
|
||||||
throw new IndexShardRelocatedException(primary.shardId());
|
throw new IndexShardRelocatedException(primary.shardId());
|
||||||
}
|
}
|
||||||
runnable.run();
|
runnable.run();
|
||||||
|
|
|
@ -443,7 +443,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
}
|
}
|
||||||
// TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2
|
// TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2
|
||||||
final String translogUUID = Translog.createEmptyTranslog(
|
final String translogUUID = Translog.createEmptyTranslog(
|
||||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPrimaryTerm());
|
indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId,
|
||||||
|
indexShard.getPendingPrimaryTerm());
|
||||||
store.associateIndexWithNewTranslog(translogUUID);
|
store.associateIndexWithNewTranslog(translogUUID);
|
||||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||||
// this is a fatal exception at this stage.
|
// this is a fatal exception at this stage.
|
||||||
|
|
|
@ -441,7 +441,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
|
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
|
||||||
|
|
||||||
Exception err = new ElasticsearchException("I'm dead <(x.x)>");
|
Exception err = new ElasticsearchException("I'm dead <(x.x)>");
|
||||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
|
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0);
|
||||||
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
|
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
|
||||||
replicaRequest);
|
replicaRequest);
|
||||||
|
|
||||||
|
@ -478,7 +478,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
|
|
||||||
Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
|
Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
|
||||||
"I'm conflicted <(;_;)>");
|
"I'm conflicted <(;_;)>");
|
||||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
|
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0);
|
||||||
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
|
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
|
||||||
replicaRequest);
|
replicaRequest);
|
||||||
|
|
||||||
|
@ -516,7 +516,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
|
|
||||||
boolean created = randomBoolean();
|
boolean created = randomBoolean();
|
||||||
Translog.Location resultLocation = new Translog.Location(42, 42, 42);
|
Translog.Location resultLocation = new Translog.Location(42, 42, 42);
|
||||||
Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation);
|
Engine.IndexResult indexResult = new FakeResult(1, 1, 1, created, resultLocation);
|
||||||
DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created);
|
DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created);
|
||||||
BulkItemResultHolder goodResults =
|
BulkItemResultHolder goodResults =
|
||||||
new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
|
new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
|
||||||
|
@ -559,7 +559,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
Translog.Location newLocation = new Translog.Location(1, 1, 1);
|
Translog.Location newLocation = new Translog.Location(1, 1, 1);
|
||||||
final long version = randomNonNegativeLong();
|
final long version = randomNonNegativeLong();
|
||||||
final long seqNo = randomNonNegativeLong();
|
final long seqNo = randomNonNegativeLong();
|
||||||
Engine.IndexResult indexResult = new IndexResultWithLocation(version, seqNo, created, newLocation);
|
Engine.IndexResult indexResult = new IndexResultWithLocation(version, 0L, seqNo, created, newLocation);
|
||||||
results = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
|
results = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
|
||||||
assertThat(TransportShardBulkAction.calculateTranslogLocation(original, results),
|
assertThat(TransportShardBulkAction.calculateTranslogLocation(original, results),
|
||||||
equalTo(newLocation));
|
equalTo(newLocation));
|
||||||
|
@ -629,8 +629,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
|
|
||||||
public class IndexResultWithLocation extends Engine.IndexResult {
|
public class IndexResultWithLocation extends Engine.IndexResult {
|
||||||
private final Translog.Location location;
|
private final Translog.Location location;
|
||||||
public IndexResultWithLocation(long version, long seqNo, boolean created, Translog.Location newLocation) {
|
public IndexResultWithLocation(long version, long term, long seqNo, boolean created, Translog.Location newLocation) {
|
||||||
super(version, seqNo, created);
|
super(version, term, seqNo, created);
|
||||||
this.location = newLocation;
|
this.location = newLocation;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -647,8 +647,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
BulkItemRequest request = new BulkItemRequest(0, updateRequest);
|
BulkItemRequest request = new BulkItemRequest(0, updateRequest);
|
||||||
Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
|
Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
|
||||||
"I'm conflicted <(;_;)>");
|
"I'm conflicted <(;_;)>");
|
||||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
|
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0);
|
||||||
Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, true);
|
Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, 1, true);
|
||||||
DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED;
|
DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED;
|
||||||
DocWriteResponse.Result deleteWriteResult = DocWriteResponse.Result.DELETED;
|
DocWriteResponse.Result deleteWriteResult = DocWriteResponse.Result.DELETED;
|
||||||
IndexRequest indexRequest = new IndexRequest("index", "_doc", "id");
|
IndexRequest indexRequest = new IndexRequest("index", "_doc", "id");
|
||||||
|
@ -830,8 +830,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
||||||
|
|
||||||
private final Translog.Location location;
|
private final Translog.Location location;
|
||||||
|
|
||||||
protected FakeResult(long version, long seqNo, boolean created, Translog.Location location) {
|
protected FakeResult(long version, long term, long seqNo, boolean created, Translog.Location location) {
|
||||||
super(version, seqNo, created);
|
super(version, term, seqNo, created);
|
||||||
this.location = location;
|
this.location = location;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -256,7 +256,6 @@ public class SearchAsyncActionTests extends ESTestCase {
|
||||||
assertEquals(10, numRequests.get());
|
assertEquals(10, numRequests.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29242")
|
|
||||||
public void testFanOutAndCollect() throws InterruptedException {
|
public void testFanOutAndCollect() throws InterruptedException {
|
||||||
SearchRequest request = new SearchRequest();
|
SearchRequest request = new SearchRequest();
|
||||||
request.allowPartialSearchResults(true);
|
request.allowPartialSearchResults(true);
|
||||||
|
@ -347,6 +346,9 @@ public class SearchAsyncActionTests extends ESTestCase {
|
||||||
sendReleaseSearchContext(result.getRequestId(), new MockConnection(result.node), OriginalIndices.NONE);
|
sendReleaseSearchContext(result.getRequestId(), new MockConnection(result.node), OriginalIndices.NONE);
|
||||||
}
|
}
|
||||||
responseListener.onResponse(response);
|
responseListener.onResponse(response);
|
||||||
|
if (latch.getCount() == 0) {
|
||||||
|
throw new AssertionError("Running a search phase after the latch has reached 0 !!!!");
|
||||||
|
}
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -587,8 +587,6 @@ public class TransportReplicationActionTests extends ESTestCase {
|
||||||
|
|
||||||
public void testPrimaryReference() throws Exception {
|
public void testPrimaryReference() throws Exception {
|
||||||
final IndexShard shard = mock(IndexShard.class);
|
final IndexShard shard = mock(IndexShard.class);
|
||||||
final long primaryTerm = 1 + randomInt(200);
|
|
||||||
when(shard.getPrimaryTerm()).thenReturn(primaryTerm);
|
|
||||||
|
|
||||||
AtomicBoolean closed = new AtomicBoolean();
|
AtomicBoolean closed = new AtomicBoolean();
|
||||||
Releasable releasable = () -> {
|
Releasable releasable = () -> {
|
||||||
|
@ -683,9 +681,9 @@ public class TransportReplicationActionTests extends ESTestCase {
|
||||||
|
|
||||||
|
|
||||||
final IndexShard shard = mock(IndexShard.class);
|
final IndexShard shard = mock(IndexShard.class);
|
||||||
when(shard.getPrimaryTerm()).thenReturn(primaryTerm);
|
when(shard.getPendingPrimaryTerm()).thenReturn(primaryTerm);
|
||||||
when(shard.routingEntry()).thenReturn(routingEntry);
|
when(shard.routingEntry()).thenReturn(routingEntry);
|
||||||
when(shard.isPrimaryMode()).thenReturn(true);
|
when(shard.isRelocatedPrimary()).thenReturn(false);
|
||||||
IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId);
|
IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId);
|
||||||
Set<String> inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) :
|
Set<String> inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) :
|
||||||
clusterService.state().metaData().index(index).inSyncAllocationIds(0);
|
clusterService.state().metaData().index(index).inSyncAllocationIds(0);
|
||||||
|
@ -1201,7 +1199,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
||||||
doAnswer(invocation -> {
|
doAnswer(invocation -> {
|
||||||
long term = (Long)invocation.getArguments()[0];
|
long term = (Long)invocation.getArguments()[0];
|
||||||
ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[2];
|
ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[2];
|
||||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||||
if (term < primaryTerm) {
|
if (term < primaryTerm) {
|
||||||
throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
|
throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
|
||||||
shardId, term, primaryTerm));
|
shardId, term, primaryTerm));
|
||||||
|
@ -1219,9 +1217,9 @@ public class TransportReplicationActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
return routing;
|
return routing;
|
||||||
});
|
});
|
||||||
when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false);
|
when(indexShard.isRelocatedPrimary()).thenAnswer(invocationOnMock -> isRelocated.get());
|
||||||
doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
|
doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
|
||||||
when(indexShard.getPrimaryTerm()).thenAnswer(i ->
|
when(indexShard.getPendingPrimaryTerm()).thenAnswer(i ->
|
||||||
clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
|
clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
|
||||||
return indexShard;
|
return indexShard;
|
||||||
}
|
}
|
||||||
|
|
|
@ -454,7 +454,7 @@ public class TransportWriteActionTests extends ESTestCase {
|
||||||
doAnswer(invocation -> {
|
doAnswer(invocation -> {
|
||||||
long term = (Long)invocation.getArguments()[0];
|
long term = (Long)invocation.getArguments()[0];
|
||||||
ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[1];
|
ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[1];
|
||||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||||
if (term < primaryTerm) {
|
if (term < primaryTerm) {
|
||||||
throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
|
throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
|
||||||
shardId, term, primaryTerm));
|
shardId, term, primaryTerm));
|
||||||
|
@ -472,9 +472,9 @@ public class TransportWriteActionTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
return routing;
|
return routing;
|
||||||
});
|
});
|
||||||
when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false);
|
when(indexShard.isRelocatedPrimary()).thenAnswer(invocationOnMock -> isRelocated.get());
|
||||||
doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
|
doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
|
||||||
when(indexShard.getPrimaryTerm()).thenAnswer(i ->
|
when(indexShard.getPendingPrimaryTerm()).thenAnswer(i ->
|
||||||
clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
|
clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
|
||||||
return indexShard;
|
return indexShard;
|
||||||
}
|
}
|
||||||
|
|
|
@ -76,7 +76,7 @@ public class ShardStateIT extends ESIntegTestCase {
|
||||||
if (indexService != null) {
|
if (indexService != null) {
|
||||||
for (IndexShard shard : indexService) {
|
for (IndexShard shard : indexService) {
|
||||||
assertThat("term mismatch for shard " + shard.shardId(),
|
assertThat("term mismatch for shard " + shard.shardId(),
|
||||||
shard.getPrimaryTerm(), equalTo(metaData.primaryTerm(shard.shardId().id())));
|
shard.getPendingPrimaryTerm(), equalTo(metaData.primaryTerm(shard.shardId().id())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,8 +59,10 @@ import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.CyclicBarrier;
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder;
|
import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder;
|
||||||
import static org.hamcrest.Matchers.anyOf;
|
import static org.hamcrest.Matchers.anyOf;
|
||||||
|
@ -221,7 +223,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.info("--> promoting replica to primary " + replica1.routingEntry());
|
logger.info("--> promoting replica to primary " + replica1.routingEntry());
|
||||||
shards.promoteReplicaToPrimary(replica1);
|
shards.promoteReplicaToPrimary(replica1).get();
|
||||||
indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"2\"}", XContentType.JSON);
|
indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"2\"}", XContentType.JSON);
|
||||||
shards.index(indexRequest);
|
shards.index(indexRequest);
|
||||||
shards.refresh("test");
|
shards.refresh("test");
|
||||||
|
@ -234,6 +236,102 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testReplicaTermIncrementWithConcurrentPrimaryPromotion() throws Exception {
|
||||||
|
Map<String, String> mappings =
|
||||||
|
Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
|
||||||
|
try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) {
|
||||||
|
shards.startAll();
|
||||||
|
long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm();
|
||||||
|
List<IndexShard> replicas = shards.getReplicas();
|
||||||
|
IndexShard replica1 = replicas.get(0);
|
||||||
|
IndexShard replica2 = replicas.get(1);
|
||||||
|
|
||||||
|
shards.promoteReplicaToPrimary(replica1, (shard, listener) -> {});
|
||||||
|
long newReplica1Term = replica1.getPendingPrimaryTerm();
|
||||||
|
assertEquals(primaryPrimaryTerm + 1, newReplica1Term);
|
||||||
|
|
||||||
|
assertEquals(primaryPrimaryTerm, replica2.getPendingPrimaryTerm());
|
||||||
|
|
||||||
|
IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON);
|
||||||
|
BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, replica1);
|
||||||
|
|
||||||
|
CyclicBarrier barrier = new CyclicBarrier(2);
|
||||||
|
Thread t1 = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
barrier.await();
|
||||||
|
indexOnReplica(replicationRequest, shards, replica2, newReplica1Term);
|
||||||
|
} catch (IllegalStateException ise) {
|
||||||
|
assertThat(ise.getMessage(), containsString("is too old"));
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Thread t2 = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
barrier.await();
|
||||||
|
shards.promoteReplicaToPrimary(replica2).get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
t2.start();
|
||||||
|
t1.start();
|
||||||
|
t1.join();
|
||||||
|
t2.join();
|
||||||
|
|
||||||
|
assertEquals(newReplica1Term + 1, replica2.getPendingPrimaryTerm());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testReplicaOperationWithConcurrentPrimaryPromotion() throws Exception {
|
||||||
|
Map<String, String> mappings =
|
||||||
|
Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
|
||||||
|
try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(1, mappings))) {
|
||||||
|
shards.startAll();
|
||||||
|
long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm();
|
||||||
|
IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON);
|
||||||
|
BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary());
|
||||||
|
|
||||||
|
List<IndexShard> replicas = shards.getReplicas();
|
||||||
|
IndexShard replica = replicas.get(0);
|
||||||
|
|
||||||
|
CyclicBarrier barrier = new CyclicBarrier(2);
|
||||||
|
AtomicBoolean successFullyIndexed = new AtomicBoolean();
|
||||||
|
Thread t1 = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
barrier.await();
|
||||||
|
indexOnReplica(replicationRequest, shards, replica, primaryPrimaryTerm);
|
||||||
|
successFullyIndexed.set(true);
|
||||||
|
} catch (IllegalStateException ise) {
|
||||||
|
assertThat(ise.getMessage(), containsString("is too old"));
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Thread t2 = new Thread(() -> {
|
||||||
|
try {
|
||||||
|
barrier.await();
|
||||||
|
shards.promoteReplicaToPrimary(replica).get();
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
t2.start();
|
||||||
|
t1.start();
|
||||||
|
t1.join();
|
||||||
|
t2.join();
|
||||||
|
|
||||||
|
assertEquals(primaryPrimaryTerm + 1, replica.getPendingPrimaryTerm());
|
||||||
|
if (successFullyIndexed.get()) {
|
||||||
|
try(Translog.Snapshot snapshot = getTranslog(replica).newSnapshot()) {
|
||||||
|
assertThat(snapshot.totalOperations(), equalTo(1));
|
||||||
|
Translog.Operation op = snapshot.next();
|
||||||
|
assertThat(op.primaryTerm(), equalTo(primaryPrimaryTerm));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* test document failures (failures after seq_no generation) are added as noop operation to the translog
|
* test document failures (failures after seq_no generation) are added as noop operation to the translog
|
||||||
* for primary and replica shards
|
* for primary and replica shards
|
||||||
|
@ -255,7 +353,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
.source("{}", XContentType.JSON)
|
.source("{}", XContentType.JSON)
|
||||||
);
|
);
|
||||||
assertTrue(response.isFailed());
|
assertTrue(response.isFailed());
|
||||||
assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPrimaryTerm(), failureMessage);
|
assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPendingPrimaryTerm(), failureMessage);
|
||||||
shards.assertAllEqual(0);
|
shards.assertAllEqual(0);
|
||||||
|
|
||||||
// add some replicas
|
// add some replicas
|
||||||
|
@ -269,7 +367,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
.source("{}", XContentType.JSON)
|
.source("{}", XContentType.JSON)
|
||||||
);
|
);
|
||||||
assertTrue(response.isFailed());
|
assertTrue(response.isFailed());
|
||||||
assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPrimaryTerm(), failureMessage);
|
assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPendingPrimaryTerm(), failureMessage);
|
||||||
shards.assertAllEqual(0);
|
shards.assertAllEqual(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -361,7 +459,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
||||||
// Make sure that peer-recovery transfers all but non-overridden operations.
|
// Make sure that peer-recovery transfers all but non-overridden operations.
|
||||||
IndexShard replica3 = shards.addReplica();
|
IndexShard replica3 = shards.addReplica();
|
||||||
logger.info("--> Promote replica2 as the primary");
|
logger.info("--> Promote replica2 as the primary");
|
||||||
shards.promoteReplicaToPrimary(replica2);
|
shards.promoteReplicaToPrimary(replica2).get();
|
||||||
logger.info("--> Recover replica3 from replica2");
|
logger.info("--> Recover replica3 from replica2");
|
||||||
recoverReplica(replica3, replica2, true);
|
recoverReplica(replica3, replica2, true);
|
||||||
try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) {
|
try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) {
|
||||||
|
|
|
@ -245,7 +245,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
shards.promoteReplicaToPrimary(newPrimary);
|
shards.promoteReplicaToPrimary(newPrimary).get();
|
||||||
|
|
||||||
// check that local checkpoint of new primary is properly tracked after primary promotion
|
// check that local checkpoint of new primary is properly tracked after primary promotion
|
||||||
assertThat(newPrimary.getLocalCheckpoint(), equalTo(totalDocs - 1L));
|
assertThat(newPrimary.getLocalCheckpoint(), equalTo(totalDocs - 1L));
|
||||||
|
@ -432,7 +432,8 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
||||||
while ((next = snapshot.next()) != null) {
|
while ((next = snapshot.next()) != null) {
|
||||||
translogOperations++;
|
translogOperations++;
|
||||||
assertThat("unexpected op: " + next, (int)next.seqNo(), lessThan(initialDocs + extraDocs));
|
assertThat("unexpected op: " + next, (int)next.seqNo(), lessThan(initialDocs + extraDocs));
|
||||||
assertThat("unexpected primaryTerm: " + next.primaryTerm(), next.primaryTerm(), is(oldPrimary.getPrimaryTerm()));
|
assertThat("unexpected primaryTerm: " + next.primaryTerm(), next.primaryTerm(),
|
||||||
|
is(oldPrimary.getPendingPrimaryTerm()));
|
||||||
final Translog.Source source = next.getSource();
|
final Translog.Source source = next.getSource();
|
||||||
assertThat(source.source.utf8ToString(), is("{ \"f\": \"normal\"}"));
|
assertThat(source.source.utf8ToString(), is("{ \"f\": \"normal\"}"));
|
||||||
}
|
}
|
||||||
|
|
|
@ -770,8 +770,10 @@ public class ReplicationTrackerTests extends ESTestCase {
|
||||||
assertThat(newPrimary.routingTable, equalTo(oldPrimary.routingTable));
|
assertThat(newPrimary.routingTable, equalTo(oldPrimary.routingTable));
|
||||||
assertThat(newPrimary.replicationGroup, equalTo(oldPrimary.replicationGroup));
|
assertThat(newPrimary.replicationGroup, equalTo(oldPrimary.replicationGroup));
|
||||||
|
|
||||||
|
assertFalse(oldPrimary.relocated);
|
||||||
oldPrimary.completeRelocationHandoff();
|
oldPrimary.completeRelocationHandoff();
|
||||||
assertFalse(oldPrimary.primaryMode);
|
assertFalse(oldPrimary.primaryMode);
|
||||||
|
assertTrue(oldPrimary.relocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testIllegalStateExceptionIfUnknownAllocationId() {
|
public void testIllegalStateExceptionIfUnknownAllocationId() {
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
||||||
public static void setupThreadPool() {
|
public static void setupThreadPool() {
|
||||||
int writeThreadPoolSize = randomIntBetween(1, 2);
|
int writeThreadPoolSize = randomIntBetween(1, 2);
|
||||||
int writeThreadPoolQueueSize = randomIntBetween(1, 2);
|
int writeThreadPoolQueueSize = randomIntBetween(1, 2);
|
||||||
threadPool = new TestThreadPool("IndexShardOperationsLockTests",
|
threadPool = new TestThreadPool("IndexShardOperationPermitsTests",
|
||||||
Settings.builder()
|
Settings.builder()
|
||||||
.put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize)
|
.put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize)
|
||||||
.put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize)
|
.put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize)
|
||||||
|
@ -100,7 +100,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
||||||
assertThat(permits.getActiveOperationsCount(), equalTo(0));
|
assertThat(permits.getActiveOperationsCount(), equalTo(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testAllOperationsInvoked() throws InterruptedException, TimeoutException, ExecutionException {
|
public void testAllOperationsInvoked() throws InterruptedException, TimeoutException {
|
||||||
int numThreads = 10;
|
int numThreads = 10;
|
||||||
|
|
||||||
class DummyException extends RuntimeException {}
|
class DummyException extends RuntimeException {}
|
||||||
|
@ -187,7 +187,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
||||||
future.get().close();
|
future.get().close();
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testOperationsIfClosed() throws ExecutionException, InterruptedException {
|
public void testOperationsIfClosed() {
|
||||||
PlainActionFuture<Releasable> future = new PlainActionFuture<>();
|
PlainActionFuture<Releasable> future = new PlainActionFuture<>();
|
||||||
permits.close();
|
permits.close();
|
||||||
permits.acquire(future, ThreadPool.Names.GENERIC, true, "");
|
permits.acquire(future, ThreadPool.Names.GENERIC, true, "");
|
||||||
|
@ -195,10 +195,12 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
||||||
assertThat(exception.getCause(), instanceOf(IndexShardClosedException.class));
|
assertThat(exception.getCause(), instanceOf(IndexShardClosedException.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testBlockIfClosed() throws ExecutionException, InterruptedException {
|
public void testBlockIfClosed() {
|
||||||
permits.close();
|
permits.close();
|
||||||
expectThrows(IndexShardClosedException.class, () -> permits.blockOperations(randomInt(10), TimeUnit.MINUTES,
|
expectThrows(IndexShardClosedException.class, () -> permits.blockOperations(randomInt(10), TimeUnit.MINUTES,
|
||||||
() -> { throw new IllegalArgumentException("fake error"); }));
|
() -> { throw new IllegalArgumentException("fake error"); }));
|
||||||
|
expectThrows(IndexShardClosedException.class, () -> permits.asyncBlockOperations(randomInt(10), TimeUnit.MINUTES,
|
||||||
|
() -> { throw new IllegalArgumentException("fake error"); }, e -> { throw new AssertionError(e); }));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testOperationsDelayedIfBlock() throws ExecutionException, InterruptedException, TimeoutException {
|
public void testOperationsDelayedIfBlock() throws ExecutionException, InterruptedException, TimeoutException {
|
||||||
|
@ -210,6 +212,36 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
||||||
future.get(1, TimeUnit.HOURS).close();
|
future.get(1, TimeUnit.HOURS).close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testGetBlockWhenBlocked() throws ExecutionException, InterruptedException, TimeoutException {
|
||||||
|
PlainActionFuture<Releasable> future = new PlainActionFuture<>();
|
||||||
|
final CountDownLatch blockAcquired = new CountDownLatch(1);
|
||||||
|
final CountDownLatch releaseBlock = new CountDownLatch(1);
|
||||||
|
final AtomicBoolean blocked = new AtomicBoolean();
|
||||||
|
try (Releasable ignored = blockAndWait()) {
|
||||||
|
permits.acquire(future, ThreadPool.Names.GENERIC, true, "");
|
||||||
|
|
||||||
|
permits.asyncBlockOperations(
|
||||||
|
30,
|
||||||
|
TimeUnit.MINUTES,
|
||||||
|
() -> {
|
||||||
|
blocked.set(true);
|
||||||
|
blockAcquired.countDown();
|
||||||
|
releaseBlock.await();
|
||||||
|
},
|
||||||
|
e -> {
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
});
|
||||||
|
assertFalse(blocked.get());
|
||||||
|
assertFalse(future.isDone());
|
||||||
|
}
|
||||||
|
blockAcquired.await();
|
||||||
|
assertTrue(blocked.get());
|
||||||
|
assertFalse(future.isDone());
|
||||||
|
releaseBlock.countDown();
|
||||||
|
|
||||||
|
future.get(1, TimeUnit.HOURS).close();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests that the ThreadContext is restored when a operation is executed after it has been delayed due to a block
|
* Tests that the ThreadContext is restored when a operation is executed after it has been delayed due to a block
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -297,7 +297,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
// expected
|
// expected
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
indexShard.acquireReplicaOperationPermit(indexShard.getPendingPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
||||||
ThreadPool.Names.WRITE, "");
|
ThreadPool.Names.WRITE, "");
|
||||||
fail("we should not be able to increment anymore");
|
fail("we should not be able to increment anymore");
|
||||||
} catch (IndexShardClosedException e) {
|
} catch (IndexShardClosedException e) {
|
||||||
|
@ -308,7 +308,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOException {
|
public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOException {
|
||||||
IndexShard indexShard = newShard(false);
|
IndexShard indexShard = newShard(false);
|
||||||
expectThrows(IndexShardNotStartedException.class, () ->
|
expectThrows(IndexShardNotStartedException.class, () ->
|
||||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100),
|
indexShard.acquireReplicaOperationPermit(indexShard.getPendingPrimaryTerm() + randomIntBetween(1, 100),
|
||||||
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.WRITE, ""));
|
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.WRITE, ""));
|
||||||
closeShards(indexShard);
|
closeShards(indexShard);
|
||||||
}
|
}
|
||||||
|
@ -331,7 +331,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
indexShard.acquireReplicaOperationPermit(
|
indexShard.acquireReplicaOperationPermit(
|
||||||
indexShard.getPrimaryTerm(),
|
indexShard.getPendingPrimaryTerm(),
|
||||||
indexShard.getGlobalCheckpoint(),
|
indexShard.getGlobalCheckpoint(),
|
||||||
new ActionListener<Releasable>() {
|
new ActionListener<Releasable>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -418,16 +418,13 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This test makes sure that people can use the shard routing entry to check whether a shard was already promoted to
|
* This test makes sure that people can use the shard routing entry + take an operation permit to check whether a shard was already
|
||||||
* a primary. Concretely this means, that when we publish the routing entry via {@link IndexShard#routingEntry()} the following
|
* promoted to a primary.
|
||||||
* should have happened
|
|
||||||
* 1) Internal state (ala ReplicationTracker) have been updated
|
|
||||||
* 2) Primary term is set to the new term
|
|
||||||
*/
|
*/
|
||||||
public void testPublishingOrderOnPromotion() throws IOException, InterruptedException, BrokenBarrierException {
|
public void testPublishingOrderOnPromotion() throws IOException, InterruptedException, BrokenBarrierException {
|
||||||
final IndexShard indexShard = newShard(false);
|
final IndexShard indexShard = newShard(false);
|
||||||
recoveryEmptyReplica(indexShard, randomBoolean());
|
recoveryEmptyReplica(indexShard, randomBoolean());
|
||||||
final long promotedTerm = indexShard.getPrimaryTerm() + 1;
|
final long promotedTerm = indexShard.getPendingPrimaryTerm() + 1;
|
||||||
final CyclicBarrier barrier = new CyclicBarrier(2);
|
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||||
final AtomicBoolean stop = new AtomicBoolean();
|
final AtomicBoolean stop = new AtomicBoolean();
|
||||||
final Thread thread = new Thread(() -> {
|
final Thread thread = new Thread(() -> {
|
||||||
|
@ -438,10 +435,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
}
|
}
|
||||||
while(stop.get() == false) {
|
while(stop.get() == false) {
|
||||||
if (indexShard.routingEntry().primary()) {
|
if (indexShard.routingEntry().primary()) {
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(promotedTerm));
|
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(promotedTerm));
|
||||||
|
final PlainActionFuture<Releasable> permitAcquiredFuture = new PlainActionFuture<>();
|
||||||
|
indexShard.acquirePrimaryOperationPermit(permitAcquiredFuture, ThreadPool.Names.SAME, "bla");
|
||||||
|
try (Releasable ignored = permitAcquiredFuture.actionGet()) {
|
||||||
assertThat(indexShard.getReplicationGroup(), notNullValue());
|
assertThat(indexShard.getReplicationGroup(), notNullValue());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
thread.start();
|
thread.start();
|
||||||
|
|
||||||
|
@ -504,7 +505,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
|
|
||||||
// promote the replica
|
// promote the replica
|
||||||
final ShardRouting replicaRouting = indexShard.routingEntry();
|
final ShardRouting replicaRouting = indexShard.routingEntry();
|
||||||
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 10000);
|
final long newPrimaryTerm = indexShard.getPendingPrimaryTerm() + between(1, 10000);
|
||||||
final ShardRouting primaryRouting =
|
final ShardRouting primaryRouting =
|
||||||
newShardRouting(
|
newShardRouting(
|
||||||
replicaRouting.shardId(),
|
replicaRouting.shardId(),
|
||||||
|
@ -558,7 +559,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
ShardRouting replicaRouting = indexShard.routingEntry();
|
ShardRouting replicaRouting = indexShard.routingEntry();
|
||||||
ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null,
|
ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null,
|
||||||
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
|
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
|
||||||
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000);
|
final long newPrimaryTerm = indexShard.getPendingPrimaryTerm() + between(1, 1000);
|
||||||
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
|
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
|
||||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
}, 0L,
|
}, 0L,
|
||||||
|
@ -568,11 +569,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
} else {
|
} else {
|
||||||
indexShard = newStartedShard(true);
|
indexShard = newStartedShard(true);
|
||||||
}
|
}
|
||||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||||
assertEquals(0, indexShard.getActiveOperationsCount());
|
assertEquals(0, indexShard.getActiveOperationsCount());
|
||||||
if (indexShard.routingEntry().isRelocationTarget() == false) {
|
if (indexShard.routingEntry().isRelocationTarget() == false) {
|
||||||
try {
|
try {
|
||||||
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.WRITE, "");
|
final PlainActionFuture<Releasable> permitAcquiredFuture = new PlainActionFuture<>();
|
||||||
|
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), permitAcquiredFuture,
|
||||||
|
ThreadPool.Names.WRITE, "");
|
||||||
|
permitAcquiredFuture.actionGet();
|
||||||
fail("shard shouldn't accept operations as replica");
|
fail("shard shouldn't accept operations as replica");
|
||||||
} catch (IllegalStateException ignored) {
|
} catch (IllegalStateException ignored) {
|
||||||
|
|
||||||
|
@ -650,7 +654,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary")));
|
assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary")));
|
||||||
}
|
}
|
||||||
|
|
||||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||||
final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration;
|
final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration;
|
||||||
|
|
||||||
final Releasable operation1;
|
final Releasable operation1;
|
||||||
|
@ -728,7 +732,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
ActionListener<Releasable> listener = new ActionListener<Releasable>() {
|
ActionListener<Releasable> listener = new ActionListener<Releasable>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(Releasable releasable) {
|
public void onResponse(Releasable releasable) {
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
||||||
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
||||||
|
@ -765,7 +769,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
barrier.await();
|
barrier.await();
|
||||||
if (indexShard.state() == IndexShardState.CREATED || indexShard.state() == IndexShardState.RECOVERING) {
|
if (indexShard.state() == IndexShardState.CREATED || indexShard.state() == IndexShardState.RECOVERING) {
|
||||||
barrier.await();
|
barrier.await();
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm));
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertThat(onFailure.get(), instanceOf(IndexShardNotStartedException.class));
|
assertThat(onFailure.get(), instanceOf(IndexShardNotStartedException.class));
|
||||||
Releasables.close(operation1);
|
Releasables.close(operation1);
|
||||||
|
@ -774,18 +778,19 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
// our operation should be blocked until the previous operations complete
|
// our operation should be blocked until the previous operations complete
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertNull(onFailure.get());
|
assertNull(onFailure.get());
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
assertThat(indexShard.operationPrimaryTerm, equalTo(primaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||||
Releasables.close(operation1);
|
Releasables.close(operation1);
|
||||||
// our operation should still be blocked
|
// our operation should still be blocked
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
assertNull(onFailure.get());
|
assertNull(onFailure.get());
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
assertThat(indexShard.operationPrimaryTerm, equalTo(primaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||||
Releasables.close(operation2);
|
Releasables.close(operation2);
|
||||||
barrier.await();
|
barrier.await();
|
||||||
// now lock acquisition should have succeeded
|
// now lock acquisition should have succeeded
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
assertThat(indexShard.operationPrimaryTerm, equalTo(newPrimaryTerm));
|
||||||
|
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||||
if (engineClosed) {
|
if (engineClosed) {
|
||||||
assertFalse(onResponse.get());
|
assertFalse(onResponse.get());
|
||||||
|
@ -884,7 +889,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
|
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
indexShard.acquireReplicaOperationPermit(
|
indexShard.acquireReplicaOperationPermit(
|
||||||
indexShard.getPrimaryTerm() + 1,
|
indexShard.getPendingPrimaryTerm() + 1,
|
||||||
globalCheckpoint,
|
globalCheckpoint,
|
||||||
new ActionListener<Releasable>() {
|
new ActionListener<Releasable>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -906,7 +911,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
final CountDownLatch resyncLatch = new CountDownLatch(1);
|
final CountDownLatch resyncLatch = new CountDownLatch(1);
|
||||||
indexShard.updateShardState(
|
indexShard.updateShardState(
|
||||||
newRouting,
|
newRouting,
|
||||||
indexShard.getPrimaryTerm() + 1,
|
indexShard.getPendingPrimaryTerm() + 1,
|
||||||
(s, r) -> resyncLatch.countDown(),
|
(s, r) -> resyncLatch.countDown(),
|
||||||
1L,
|
1L,
|
||||||
Collections.singleton(newRouting.allocationId().getId()),
|
Collections.singleton(newRouting.allocationId().getId()),
|
||||||
|
@ -938,7 +943,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
Math.toIntExact(indexShard.getLocalCheckpoint()));
|
Math.toIntExact(indexShard.getLocalCheckpoint()));
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
final CountDownLatch latch = new CountDownLatch(1);
|
||||||
indexShard.acquireReplicaOperationPermit(
|
indexShard.acquireReplicaOperationPermit(
|
||||||
indexShard.primaryTerm + 1,
|
indexShard.pendingPrimaryTerm + 1,
|
||||||
globalCheckpoint,
|
globalCheckpoint,
|
||||||
new ActionListener<Releasable>() {
|
new ActionListener<Releasable>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -975,7 +980,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
final CyclicBarrier barrier = new CyclicBarrier(3);
|
final CyclicBarrier barrier = new CyclicBarrier(3);
|
||||||
final CountDownLatch latch = new CountDownLatch(2);
|
final CountDownLatch latch = new CountDownLatch(2);
|
||||||
|
|
||||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||||
final AtomicLong counter = new AtomicLong();
|
final AtomicLong counter = new AtomicLong();
|
||||||
final AtomicReference<Exception> onFailure = new AtomicReference<>();
|
final AtomicReference<Exception> onFailure = new AtomicReference<>();
|
||||||
|
|
||||||
|
@ -993,7 +998,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(Releasable releasable) {
|
public void onResponse(Releasable releasable) {
|
||||||
counter.incrementAndGet();
|
counter.incrementAndGet();
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm + increment));
|
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm + increment));
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
releasable.close();
|
releasable.close();
|
||||||
}
|
}
|
||||||
|
@ -1037,7 +1042,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertThat(counter.get(), equalTo(2L));
|
assertThat(counter.get(), equalTo(2L));
|
||||||
}
|
}
|
||||||
|
|
||||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm + Math.max(firstIncrement, secondIncrement)));
|
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm + Math.max(firstIncrement, secondIncrement)));
|
||||||
|
|
||||||
closeShards(indexShard);
|
closeShards(indexShard);
|
||||||
}
|
}
|
||||||
|
@ -1416,14 +1421,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
recoveryThread.start();
|
recoveryThread.start();
|
||||||
latch.await();
|
latch.await();
|
||||||
// recovery can only be finalized after we release the current primaryOperationLock
|
// recovery can only be finalized after we release the current primaryOperationLock
|
||||||
assertTrue(shard.isPrimaryMode());
|
assertFalse(shard.isRelocatedPrimary());
|
||||||
}
|
}
|
||||||
// recovery can be now finalized
|
// recovery can be now finalized
|
||||||
recoveryThread.join();
|
recoveryThread.join();
|
||||||
assertFalse(shard.isPrimaryMode());
|
assertTrue(shard.isRelocatedPrimary());
|
||||||
try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) {
|
try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) {
|
||||||
// lock can again be acquired
|
// lock can again be acquired
|
||||||
assertFalse(shard.isPrimaryMode());
|
assertTrue(shard.isRelocatedPrimary());
|
||||||
}
|
}
|
||||||
|
|
||||||
closeShards(shard);
|
closeShards(shard);
|
||||||
|
@ -1465,7 +1470,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
|
|
||||||
public void testStressRelocated() throws Exception {
|
public void testStressRelocated() throws Exception {
|
||||||
final IndexShard shard = newStartedShard(true);
|
final IndexShard shard = newStartedShard(true);
|
||||||
assertTrue(shard.isPrimaryMode());
|
assertFalse(shard.isRelocatedPrimary());
|
||||||
IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"));
|
IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"));
|
||||||
final int numThreads = randomIntBetween(2, 4);
|
final int numThreads = randomIntBetween(2, 4);
|
||||||
Thread[] indexThreads = new Thread[numThreads];
|
Thread[] indexThreads = new Thread[numThreads];
|
||||||
|
@ -1501,14 +1506,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
assertThat(relocated.get(), equalTo(false));
|
assertThat(relocated.get(), equalTo(false));
|
||||||
assertThat(shard.getActiveOperationsCount(), greaterThan(0));
|
assertThat(shard.getActiveOperationsCount(), greaterThan(0));
|
||||||
// ensure we only transition after pending operations completed
|
// ensure we only transition after pending operations completed
|
||||||
assertTrue(shard.isPrimaryMode());
|
assertFalse(shard.isRelocatedPrimary());
|
||||||
// complete pending operations
|
// complete pending operations
|
||||||
barrier.await();
|
barrier.await();
|
||||||
// complete recovery/relocation
|
// complete recovery/relocation
|
||||||
recoveryThread.join();
|
recoveryThread.join();
|
||||||
// ensure relocated successfully once pending operations are done
|
// ensure relocated successfully once pending operations are done
|
||||||
assertThat(relocated.get(), equalTo(true));
|
assertThat(relocated.get(), equalTo(true));
|
||||||
assertFalse(shard.isPrimaryMode());
|
assertTrue(shard.isRelocatedPrimary());
|
||||||
assertThat(shard.getActiveOperationsCount(), equalTo(0));
|
assertThat(shard.getActiveOperationsCount(), equalTo(0));
|
||||||
|
|
||||||
for (Thread indexThread : indexThreads) {
|
for (Thread indexThread : indexThreads) {
|
||||||
|
@ -1572,7 +1577,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
cyclicBarrier.await();
|
cyclicBarrier.await();
|
||||||
relocationThread.join();
|
relocationThread.join();
|
||||||
cancellingThread.join();
|
cancellingThread.join();
|
||||||
if (shard.isPrimaryMode() == false) {
|
if (shard.isRelocatedPrimary()) {
|
||||||
logger.debug("shard was relocated successfully");
|
logger.debug("shard was relocated successfully");
|
||||||
assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class));
|
assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class));
|
||||||
assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true));
|
assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true));
|
||||||
|
@ -1719,7 +1724,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
while ((operation = snapshot.next()) != null) {
|
while ((operation = snapshot.next()) != null) {
|
||||||
if (operation.opType() == Translog.Operation.Type.NO_OP) {
|
if (operation.opType() == Translog.Operation.Type.NO_OP) {
|
||||||
numNoops++;
|
numNoops++;
|
||||||
assertEquals(newShard.getPrimaryTerm(), operation.primaryTerm());
|
assertEquals(newShard.getPendingPrimaryTerm(), operation.primaryTerm());
|
||||||
assertEquals(0, operation.seqNo());
|
assertEquals(0, operation.seqNo());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1826,7 +1831,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
flushShard(shard);
|
flushShard(shard);
|
||||||
assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1"));
|
assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1"));
|
||||||
// Simulate resync (without rollback): Noop #1, index #2
|
// Simulate resync (without rollback): Noop #1, index #2
|
||||||
acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1);
|
acquireReplicaOperationPermitBlockingly(shard, shard.pendingPrimaryTerm + 1);
|
||||||
shard.markSeqNoAsNoop(1, "test");
|
shard.markSeqNoAsNoop(1, "test");
|
||||||
shard.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
|
shard.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
|
||||||
SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON));
|
SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON));
|
||||||
|
@ -1837,7 +1842,8 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
IndexShard newShard = reinitShard(shard,
|
IndexShard newShard = reinitShard(shard,
|
||||||
newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), true, ShardRoutingState.INITIALIZING,
|
newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), true, ShardRoutingState.INITIALIZING,
|
||||||
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE));
|
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE));
|
||||||
newShard.primaryTerm++;
|
newShard.pendingPrimaryTerm++;
|
||||||
|
newShard.operationPrimaryTerm++;
|
||||||
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||||
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
|
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
|
||||||
assertTrue(newShard.recoverFromStore());
|
assertTrue(newShard.recoverFromStore());
|
||||||
|
@ -1852,7 +1858,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node");
|
ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node");
|
||||||
IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting);
|
IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting);
|
||||||
shard.relocated(primaryContext -> {});
|
shard.relocated(primaryContext -> {});
|
||||||
assertFalse(shard.isPrimaryMode());
|
assertTrue(shard.isRelocatedPrimary());
|
||||||
try {
|
try {
|
||||||
IndexShardTestCase.updateRoutingEntry(shard, origRouting);
|
IndexShardTestCase.updateRoutingEntry(shard, origRouting);
|
||||||
fail("Expected IndexShardRelocatedException");
|
fail("Expected IndexShardRelocatedException");
|
||||||
|
@ -2160,11 +2166,11 @@ public class IndexShardTests extends IndexShardTestCase {
|
||||||
int numCorruptEntries = 0;
|
int numCorruptEntries = 0;
|
||||||
for (int i = 0; i < numTotalEntries; i++) {
|
for (int i = 0; i < numTotalEntries; i++) {
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
operations.add(new Translog.Index("_doc", "1", 0, primary.getPrimaryTerm(), 1,
|
operations.add(new Translog.Index("_doc", "1", 0, primary.getPendingPrimaryTerm(), 1,
|
||||||
"{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1));
|
"{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1));
|
||||||
} else {
|
} else {
|
||||||
// corrupt entry
|
// corrupt entry
|
||||||
operations.add(new Translog.Index("_doc", "2", 1, primary.getPrimaryTerm(), 1,
|
operations.add(new Translog.Index("_doc", "2", 1, primary.getPendingPrimaryTerm(), 1,
|
||||||
"{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1));
|
"{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1));
|
||||||
numCorruptEntries++;
|
numCorruptEntries++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -148,7 +148,7 @@ public class IndexingOperationListenerTests extends ESTestCase{
|
||||||
ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null);
|
ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null);
|
||||||
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong());
|
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong());
|
||||||
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc);
|
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc);
|
||||||
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true));
|
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, true));
|
||||||
assertEquals(0, preIndex.get());
|
assertEquals(0, preIndex.get());
|
||||||
assertEquals(0, postIndex.get());
|
assertEquals(0, postIndex.get());
|
||||||
assertEquals(0, postIndexException.get());
|
assertEquals(0, postIndexException.get());
|
||||||
|
@ -172,7 +172,7 @@ public class IndexingOperationListenerTests extends ESTestCase{
|
||||||
assertEquals(2, postDelete.get());
|
assertEquals(2, postDelete.get());
|
||||||
assertEquals(2, postDeleteException.get());
|
assertEquals(2, postDeleteException.get());
|
||||||
|
|
||||||
compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbers.UNASSIGNED_SEQ_NO, false));
|
compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, false));
|
||||||
assertEquals(0, preIndex.get());
|
assertEquals(0, preIndex.get());
|
||||||
assertEquals(2, postIndex.get());
|
assertEquals(2, postIndex.get());
|
||||||
assertEquals(0, postIndexException.get());
|
assertEquals(0, postIndexException.get());
|
||||||
|
|
|
@ -83,7 +83,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
|
||||||
boolean syncNeeded = numDocs > 0;
|
boolean syncNeeded = numDocs > 0;
|
||||||
|
|
||||||
String allocationId = shard.routingEntry().allocationId().getId();
|
String allocationId = shard.routingEntry().allocationId().getId();
|
||||||
shard.updateShardState(shard.routingEntry(), shard.getPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
||||||
new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet());
|
new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet());
|
||||||
shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint);
|
shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint);
|
||||||
assertEquals(globalCheckPoint, shard.getGlobalCheckpoint());
|
assertEquals(globalCheckPoint, shard.getGlobalCheckpoint());
|
||||||
|
@ -142,7 +142,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
String allocationId = shard.routingEntry().allocationId().getId();
|
String allocationId = shard.routingEntry().allocationId().getId();
|
||||||
shard.updateShardState(shard.routingEntry(), shard.getPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
||||||
new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet());
|
new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet());
|
||||||
|
|
||||||
CountDownLatch syncCalledLatch = new CountDownLatch(1);
|
CountDownLatch syncCalledLatch = new CountDownLatch(1);
|
||||||
|
|
|
@ -2669,7 +2669,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
|
|
||||||
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
|
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
|
||||||
1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
|
1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
|
||||||
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
|
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomPrimaryTerm, randomSeqNum, true);
|
||||||
Translog.Index index = new Translog.Index(eIndex, eIndexResult);
|
Translog.Index index = new Translog.Index(eIndex, eIndexResult);
|
||||||
|
|
||||||
BytesStreamOutput out = new BytesStreamOutput();
|
BytesStreamOutput out = new BytesStreamOutput();
|
||||||
|
@ -2680,7 +2680,7 @@ public class TranslogTests extends ESTestCase {
|
||||||
|
|
||||||
Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm,
|
Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm,
|
||||||
2, VersionType.INTERNAL, Origin.PRIMARY, 0);
|
2, VersionType.INTERNAL, Origin.PRIMARY, 0);
|
||||||
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
|
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomPrimaryTerm, randomSeqNum, true);
|
||||||
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);
|
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);
|
||||||
|
|
||||||
out = new BytesStreamOutput();
|
out = new BytesStreamOutput();
|
||||||
|
|
|
@ -179,12 +179,12 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
||||||
final int initialNumberOfDocs = randomIntBetween(16, 64);
|
final int initialNumberOfDocs = randomIntBetween(16, 64);
|
||||||
for (int i = 0; i < initialNumberOfDocs; i++) {
|
for (int i = 0; i < initialNumberOfDocs; i++) {
|
||||||
final Engine.Index index = getIndex(Integer.toString(i));
|
final Engine.Index index = getIndex(Integer.toString(i));
|
||||||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)));
|
operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)));
|
||||||
}
|
}
|
||||||
final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64);
|
final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64);
|
||||||
for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) {
|
for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) {
|
||||||
final Engine.Index index = getIndex(Integer.toString(i));
|
final Engine.Index index = getIndex(Integer.toString(i));
|
||||||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true)));
|
operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, i - initialNumberOfDocs, true)));
|
||||||
}
|
}
|
||||||
operations.add(null);
|
operations.add(null);
|
||||||
final long startingSeqNo = randomIntBetween(0, numberOfDocsWithValidSequenceNumbers - 1);
|
final long startingSeqNo = randomIntBetween(0, numberOfDocsWithValidSequenceNumbers - 1);
|
||||||
|
@ -395,7 +395,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
||||||
final IndexShard shard = mock(IndexShard.class);
|
final IndexShard shard = mock(IndexShard.class);
|
||||||
when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class));
|
when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class));
|
||||||
when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class));
|
when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class));
|
||||||
when(shard.isPrimaryMode()).thenReturn(false);
|
when(shard.isRelocatedPrimary()).thenReturn(true);
|
||||||
when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class));
|
when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class));
|
||||||
doAnswer(invocation -> {
|
doAnswer(invocation -> {
|
||||||
((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> {});
|
((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> {});
|
||||||
|
@ -444,7 +444,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
||||||
final CancellableThreads cancellableThreads = new CancellableThreads();
|
final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||||
final IndexShard shard = mock(IndexShard.class);
|
final IndexShard shard = mock(IndexShard.class);
|
||||||
final AtomicBoolean freed = new AtomicBoolean(true);
|
final AtomicBoolean freed = new AtomicBoolean(true);
|
||||||
when(shard.isPrimaryMode()).thenReturn(true);
|
when(shard.isRelocatedPrimary()).thenReturn(false);
|
||||||
doAnswer(invocation -> {
|
doAnswer(invocation -> {
|
||||||
freed.set(false);
|
freed.set(false);
|
||||||
((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> freed.set(true));
|
((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> freed.set(true));
|
||||||
|
|
|
@ -201,7 +201,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
// create a new translog
|
// create a new translog
|
||||||
translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs,
|
translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs,
|
||||||
replica.shardId(), replica.getPrimaryTerm());
|
replica.shardId(), replica.getPendingPrimaryTerm());
|
||||||
translogGenToUse = 1;
|
translogGenToUse = 1;
|
||||||
} else {
|
} else {
|
||||||
translogUUIDtoUse = translogGeneration.translogUUID;
|
translogUUIDtoUse = translogGeneration.translogUUID;
|
||||||
|
|
|
@ -35,8 +35,10 @@ import org.elasticsearch.search.sort.SortOrder;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder;
|
import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
@ -105,7 +107,6 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||||
* search for each query. It then does some basic sanity checking of score and hits
|
* search for each query. It then does some basic sanity checking of score and hits
|
||||||
* to make sure the profiling doesn't interfere with the hits being returned
|
* to make sure the profiling doesn't interfere with the hits being returned
|
||||||
*/
|
*/
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32492")
|
|
||||||
public void testProfileMatchesRegular() throws Exception {
|
public void testProfileMatchesRegular() throws Exception {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -150,6 +151,10 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||||
SearchResponse vanillaResponse = responses[0].getResponse();
|
SearchResponse vanillaResponse = responses[0].getResponse();
|
||||||
SearchResponse profileResponse = responses[1].getResponse();
|
SearchResponse profileResponse = responses[1].getResponse();
|
||||||
|
|
||||||
|
assertThat(vanillaResponse.getFailedShards(), equalTo(0));
|
||||||
|
assertThat(profileResponse.getFailedShards(), equalTo(0));
|
||||||
|
assertThat(vanillaResponse.getSuccessfulShards(), equalTo(profileResponse.getSuccessfulShards()));
|
||||||
|
|
||||||
float vanillaMaxScore = vanillaResponse.getHits().getMaxScore();
|
float vanillaMaxScore = vanillaResponse.getHits().getMaxScore();
|
||||||
float profileMaxScore = profileResponse.getHits().getMaxScore();
|
float profileMaxScore = profileResponse.getHits().getMaxScore();
|
||||||
if (Float.isNaN(vanillaMaxScore)) {
|
if (Float.isNaN(vanillaMaxScore)) {
|
||||||
|
@ -160,10 +165,19 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
||||||
vanillaMaxScore, profileMaxScore, 0.001);
|
vanillaMaxScore, profileMaxScore, 0.001);
|
||||||
}
|
}
|
||||||
|
|
||||||
assertThat(
|
if (vanillaResponse.getHits().totalHits != profileResponse.getHits().totalHits) {
|
||||||
"Profile totalHits of [" + profileResponse.getHits().getTotalHits() + "] is not close to Vanilla totalHits ["
|
Set<SearchHit> vanillaSet = new HashSet<>(Arrays.asList(vanillaResponse.getHits().getHits()));
|
||||||
+ vanillaResponse.getHits().getTotalHits() + "]",
|
Set<SearchHit> profileSet = new HashSet<>(Arrays.asList(profileResponse.getHits().getHits()));
|
||||||
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
|
if (vanillaResponse.getHits().totalHits > profileResponse.getHits().totalHits) {
|
||||||
|
vanillaSet.removeAll(profileSet);
|
||||||
|
fail("Vanilla hits were larger than profile hits. Non-overlapping elements were: "
|
||||||
|
+ vanillaSet.toString());
|
||||||
|
} else {
|
||||||
|
profileSet.removeAll(vanillaSet);
|
||||||
|
fail("Profile hits were larger than vanilla hits. Non-overlapping elements were: "
|
||||||
|
+ profileSet.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
|
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
|
||||||
SearchHit[] profileHits = profileResponse.getHits().getHits();
|
SearchHit[] profileHits = profileResponse.getHits().getHits();
|
||||||
|
|
|
@ -84,6 +84,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.Future;
|
import java.util.concurrent.Future;
|
||||||
import java.util.concurrent.FutureTask;
|
import java.util.concurrent.FutureTask;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.function.BiConsumer;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
import java.util.function.Consumer;
|
import java.util.function.Consumer;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
@ -233,7 +234,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
activeIds.add(primary.routingEntry().allocationId().getId());
|
activeIds.add(primary.routingEntry().allocationId().getId());
|
||||||
ShardRouting startedRoutingEntry = ShardRoutingHelper.moveToStarted(primary.routingEntry());
|
ShardRouting startedRoutingEntry = ShardRoutingHelper.moveToStarted(primary.routingEntry());
|
||||||
IndexShardRoutingTable routingTable = routingTable(shr -> shr == primary.routingEntry() ? startedRoutingEntry : shr);
|
IndexShardRoutingTable routingTable = routingTable(shr -> shr == primary.routingEntry() ? startedRoutingEntry : shr);
|
||||||
primary.updateShardState(startedRoutingEntry, primary.getPrimaryTerm(), null,
|
primary.updateShardState(startedRoutingEntry, primary.getPendingPrimaryTerm(), null,
|
||||||
currentClusterStateVersion.incrementAndGet(), activeIds, routingTable, Collections.emptySet());
|
currentClusterStateVersion.incrementAndGet(), activeIds, routingTable, Collections.emptySet());
|
||||||
for (final IndexShard replica : replicas) {
|
for (final IndexShard replica : replicas) {
|
||||||
recoverReplica(replica);
|
recoverReplica(replica);
|
||||||
|
@ -279,20 +280,10 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
/**
|
/**
|
||||||
* promotes the specific replica as the new primary
|
* promotes the specific replica as the new primary
|
||||||
*/
|
*/
|
||||||
public synchronized Future<PrimaryReplicaSyncer.ResyncTask> promoteReplicaToPrimary(IndexShard replica) throws IOException {
|
public Future<PrimaryReplicaSyncer.ResyncTask> promoteReplicaToPrimary(IndexShard replica) throws IOException {
|
||||||
final long newTerm = indexMetaData.primaryTerm(shardId.id()) + 1;
|
|
||||||
IndexMetaData.Builder newMetaData = IndexMetaData.builder(indexMetaData).primaryTerm(shardId.id(), newTerm);
|
|
||||||
indexMetaData = newMetaData.build();
|
|
||||||
assertTrue(replicas.remove(replica));
|
|
||||||
closeShards(primary);
|
|
||||||
primary = replica;
|
|
||||||
assert primary.routingEntry().active() : "only active replicas can be promoted to primary: " + primary.routingEntry();
|
|
||||||
PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<>();
|
PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<>();
|
||||||
ShardRouting primaryRouting = replica.routingEntry().moveActiveReplicaToPrimary();
|
promoteReplicaToPrimary(replica,
|
||||||
IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr);
|
(shard, listener) -> primaryReplicaSyncer.resync(shard,
|
||||||
|
|
||||||
primary.updateShardState(primaryRouting,
|
|
||||||
newTerm, (shard, listener) -> primaryReplicaSyncer.resync(shard,
|
|
||||||
new ActionListener<PrimaryReplicaSyncer.ResyncTask>() {
|
new ActionListener<PrimaryReplicaSyncer.ResyncTask>() {
|
||||||
@Override
|
@Override
|
||||||
public void onResponse(PrimaryReplicaSyncer.ResyncTask resyncTask) {
|
public void onResponse(PrimaryReplicaSyncer.ResyncTask resyncTask) {
|
||||||
|
@ -305,11 +296,27 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
listener.onFailure(e);
|
listener.onFailure(e);
|
||||||
fut.onFailure(e);
|
fut.onFailure(e);
|
||||||
}
|
}
|
||||||
}), currentClusterStateVersion.incrementAndGet(), activeIds(), routingTable, Collections.emptySet());
|
}));
|
||||||
|
|
||||||
return fut;
|
return fut;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public synchronized void promoteReplicaToPrimary(IndexShard replica,
|
||||||
|
BiConsumer<IndexShard, ActionListener<PrimaryReplicaSyncer.ResyncTask>> primaryReplicaSyncer)
|
||||||
|
throws IOException {
|
||||||
|
final long newTerm = indexMetaData.primaryTerm(shardId.id()) + 1;
|
||||||
|
IndexMetaData.Builder newMetaData = IndexMetaData.builder(indexMetaData).primaryTerm(shardId.id(), newTerm);
|
||||||
|
indexMetaData = newMetaData.build();
|
||||||
|
assertTrue(replicas.remove(replica));
|
||||||
|
closeShards(primary);
|
||||||
|
primary = replica;
|
||||||
|
assert primary.routingEntry().active() : "only active replicas can be promoted to primary: " + primary.routingEntry();
|
||||||
|
ShardRouting primaryRouting = replica.routingEntry().moveActiveReplicaToPrimary();
|
||||||
|
IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr);
|
||||||
|
|
||||||
|
primary.updateShardState(primaryRouting, newTerm, primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(),
|
||||||
|
activeIds(), routingTable, Collections.emptySet());
|
||||||
|
}
|
||||||
|
|
||||||
private synchronized Set<String> activeIds() {
|
private synchronized Set<String> activeIds() {
|
||||||
return shardRoutings().stream()
|
return shardRoutings().stream()
|
||||||
.filter(ShardRouting::active).map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet());
|
.filter(ShardRouting::active).map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet());
|
||||||
|
@ -425,7 +432,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
|
|
||||||
private void updateAllocationIDsOnPrimary() throws IOException {
|
private void updateAllocationIDsOnPrimary() throws IOException {
|
||||||
|
|
||||||
primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
|
||||||
|
currentClusterStateVersion.incrementAndGet(),
|
||||||
activeIds(), routingTable(Function.identity()), Collections.emptySet());
|
activeIds(), routingTable(Function.identity()), Collections.emptySet());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -527,7 +535,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
IndexShard replica = replicationGroup.replicas.stream()
|
IndexShard replica = replicationGroup.replicas.stream()
|
||||||
.filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
|
.filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
|
||||||
replica.acquireReplicaOperationPermit(
|
replica.acquireReplicaOperationPermit(
|
||||||
replicationGroup.primary.getPrimaryTerm(),
|
replicationGroup.primary.getPendingPrimaryTerm(),
|
||||||
globalCheckpoint,
|
globalCheckpoint,
|
||||||
new ActionListener<Releasable>() {
|
new ActionListener<Releasable>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -605,7 +613,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
||||||
executeShardBulkOnReplica(request, replica, getPrimaryShard().getPrimaryTerm(), getPrimaryShard().getGlobalCheckpoint());
|
executeShardBulkOnReplica(request, replica, getPrimaryShard().getPendingPrimaryTerm(), getPrimaryShard().getGlobalCheckpoint());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -664,14 +672,18 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
||||||
* indexes the given requests on the supplied replica shard
|
* indexes the given requests on the supplied replica shard
|
||||||
*/
|
*/
|
||||||
void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
|
void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
|
||||||
executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint());
|
indexOnReplica(request, group, replica, group.primary.getPendingPrimaryTerm());
|
||||||
|
}
|
||||||
|
|
||||||
|
void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica, long term) throws Exception {
|
||||||
|
executeShardBulkOnReplica(request, replica, term, group.primary.getGlobalCheckpoint());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Executes the delete request on the given replica shard.
|
* Executes the delete request on the given replica shard.
|
||||||
*/
|
*/
|
||||||
void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
|
void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
|
||||||
executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint());
|
executeShardBulkOnReplica(request, replica, group.primary.getPendingPrimaryTerm(), group.primary.getGlobalCheckpoint());
|
||||||
}
|
}
|
||||||
|
|
||||||
class GlobalCheckpointSync extends ReplicationAction<
|
class GlobalCheckpointSync extends ReplicationAction<
|
||||||
|
|
|
@ -425,7 +425,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
||||||
IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(shardRouting.shardId())
|
IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(shardRouting.shardId())
|
||||||
.addShard(shardRouting)
|
.addShard(shardRouting)
|
||||||
.build();
|
.build();
|
||||||
shard.updateShardState(shardRouting, shard.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
shard.updateShardState(shardRouting, shard.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
||||||
inSyncIds, newRoutingTable, Collections.emptySet());
|
inSyncIds, newRoutingTable, Collections.emptySet());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -514,8 +514,8 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
||||||
request,
|
request,
|
||||||
(int) ByteSizeUnit.MB.toBytes(1),
|
(int) ByteSizeUnit.MB.toBytes(1),
|
||||||
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build());
|
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build());
|
||||||
primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
|
||||||
inSyncIds, routingTable, Collections.emptySet());
|
currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet());
|
||||||
recovery.recoverToTarget();
|
recovery.recoverToTarget();
|
||||||
recoveryTarget.markAsDone();
|
recoveryTarget.markAsDone();
|
||||||
}
|
}
|
||||||
|
@ -536,9 +536,9 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
||||||
Set<String> inSyncIdsWithReplica = new HashSet<>(inSyncIds);
|
Set<String> inSyncIdsWithReplica = new HashSet<>(inSyncIds);
|
||||||
inSyncIdsWithReplica.add(replica.routingEntry().allocationId().getId());
|
inSyncIdsWithReplica.add(replica.routingEntry().allocationId().getId());
|
||||||
// update both primary and replica shard state
|
// update both primary and replica shard state
|
||||||
primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
|
||||||
inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
||||||
replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPrimaryTerm(), null,
|
replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPendingPrimaryTerm(), null,
|
||||||
currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -560,7 +560,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
||||||
.removeShard(replica.routingEntry())
|
.removeShard(replica.routingEntry())
|
||||||
.addShard(routingEntry)
|
.addShard(routingEntry)
|
||||||
.build();
|
.build();
|
||||||
replica.updateShardState(routingEntry, replica.getPrimaryTerm() + 1,
|
replica.updateShardState(routingEntry, replica.getPendingPrimaryTerm() + 1,
|
||||||
(is, listener) ->
|
(is, listener) ->
|
||||||
listener.onResponse(new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())),
|
listener.onResponse(new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())),
|
||||||
currentClusterStateVersion.incrementAndGet(),
|
currentClusterStateVersion.incrementAndGet(),
|
||||||
|
|
|
@ -12,9 +12,10 @@ import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Writeable;
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
||||||
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
|
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
|
||||||
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
|
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
|
||||||
|
@ -32,6 +33,8 @@ import java.util.Map;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The configuration object for the metrics portion of a rollup job config
|
* The configuration object for the metrics portion of a rollup job config
|
||||||
*
|
*
|
||||||
|
@ -48,14 +51,7 @@ import java.util.stream.Collectors;
|
||||||
* ]
|
* ]
|
||||||
* }
|
* }
|
||||||
*/
|
*/
|
||||||
public class MetricConfig implements Writeable, ToXContentFragment {
|
public class MetricConfig implements Writeable, ToXContentObject {
|
||||||
private static final String NAME = "metric_config";
|
|
||||||
|
|
||||||
private String field;
|
|
||||||
private List<String> metrics;
|
|
||||||
|
|
||||||
private static final ParseField FIELD = new ParseField("field");
|
|
||||||
private static final ParseField METRICS = new ParseField("metrics");
|
|
||||||
|
|
||||||
// TODO: replace these with an enum
|
// TODO: replace these with an enum
|
||||||
private static final ParseField MIN = new ParseField("min");
|
private static final ParseField MIN = new ParseField("min");
|
||||||
|
@ -64,27 +60,54 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
||||||
private static final ParseField AVG = new ParseField("avg");
|
private static final ParseField AVG = new ParseField("avg");
|
||||||
private static final ParseField VALUE_COUNT = new ParseField("value_count");
|
private static final ParseField VALUE_COUNT = new ParseField("value_count");
|
||||||
|
|
||||||
public static final ObjectParser<MetricConfig.Builder, Void> PARSER = new ObjectParser<>(NAME, MetricConfig.Builder::new);
|
private static final String NAME = "metrics";
|
||||||
|
private static final String FIELD = "field";
|
||||||
|
private static final String METRICS = "metrics";
|
||||||
|
private static final ConstructingObjectParser<MetricConfig, Void> PARSER;
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(MetricConfig.Builder::setField, FIELD);
|
PARSER = new ConstructingObjectParser<>(NAME, args -> {
|
||||||
PARSER.declareStringArray(MetricConfig.Builder::setMetrics, METRICS);
|
@SuppressWarnings("unchecked") List<String> metrics = (List<String>) args[1];
|
||||||
|
return new MetricConfig((String) args[0], metrics);
|
||||||
|
});
|
||||||
|
PARSER.declareString(constructorArg(), new ParseField(FIELD));
|
||||||
|
PARSER.declareStringArray(constructorArg(), new ParseField(METRICS));
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricConfig(String name, List<String> metrics) {
|
private final String field;
|
||||||
this.field = name;
|
private final List<String> metrics;
|
||||||
|
|
||||||
|
public MetricConfig(final String field, final List<String> metrics) {
|
||||||
|
if (field == null || field.isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("Field must be a non-null, non-empty string");
|
||||||
|
}
|
||||||
|
if (metrics == null || metrics.isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("Metrics must be a non-null, non-empty array of strings");
|
||||||
|
}
|
||||||
|
metrics.forEach(m -> {
|
||||||
|
if (RollupField.SUPPORTED_METRICS.contains(m) == false) {
|
||||||
|
throw new IllegalArgumentException("Unsupported metric [" + m + "]. " +
|
||||||
|
"Supported metrics include: " + RollupField.SUPPORTED_METRICS);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
this.field = field;
|
||||||
this.metrics = metrics;
|
this.metrics = metrics;
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricConfig(StreamInput in) throws IOException {
|
MetricConfig(final StreamInput in) throws IOException {
|
||||||
field = in.readString();
|
field = in.readString();
|
||||||
metrics = in.readList(StreamInput::readString);
|
metrics = in.readList(StreamInput::readString);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the name of the field used in the metric configuration. Never {@code null}.
|
||||||
|
*/
|
||||||
public String getField() {
|
public String getField() {
|
||||||
return field;
|
return field;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the names of the metrics used in the metric configuration. Never {@code null}.
|
||||||
|
*/
|
||||||
public List<String> getMetrics() {
|
public List<String> getMetrics() {
|
||||||
return metrics;
|
return metrics;
|
||||||
}
|
}
|
||||||
|
@ -159,10 +182,13 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||||
builder.field(FIELD.getPreferredName(), field);
|
builder.startObject();
|
||||||
builder.field(METRICS.getPreferredName(), metrics);
|
{
|
||||||
return builder;
|
builder.field(FIELD, field);
|
||||||
|
builder.field(METRICS, metrics);
|
||||||
|
}
|
||||||
|
return builder.endObject();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -172,19 +198,16 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean equals(Object other) {
|
public boolean equals(final Object other) {
|
||||||
if (this == other) {
|
if (this == other) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (other == null || getClass() != other.getClass()) {
|
if (other == null || getClass() != other.getClass()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
MetricConfig that = (MetricConfig) other;
|
final MetricConfig that = (MetricConfig) other;
|
||||||
|
return Objects.equals(field, that.field) && Objects.equals(metrics, that.metrics);
|
||||||
return Objects.equals(this.field, that.field)
|
|
||||||
&& Objects.equals(this.metrics, that.metrics);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -197,52 +220,7 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
||||||
return Strings.toString(this, true, true);
|
return Strings.toString(this, true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static MetricConfig fromXContent(final XContentParser parser) throws IOException {
|
||||||
public static class Builder {
|
return PARSER.parse(parser, null);
|
||||||
private String field;
|
|
||||||
private List<String> metrics;
|
|
||||||
|
|
||||||
public Builder() {
|
|
||||||
}
|
|
||||||
|
|
||||||
public Builder(MetricConfig config) {
|
|
||||||
this.field = config.getField();
|
|
||||||
this.metrics = config.getMetrics();
|
|
||||||
}
|
|
||||||
|
|
||||||
public String getField() {
|
|
||||||
return field;
|
|
||||||
}
|
|
||||||
|
|
||||||
public MetricConfig.Builder setField(String field) {
|
|
||||||
this.field = field;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public List<String> getMetrics() {
|
|
||||||
return metrics;
|
|
||||||
}
|
|
||||||
|
|
||||||
public MetricConfig.Builder setMetrics(List<String> metrics) {
|
|
||||||
this.metrics = metrics;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public MetricConfig build() {
|
|
||||||
if (Strings.isNullOrEmpty(field) == true) {
|
|
||||||
throw new IllegalArgumentException("Parameter [" + FIELD.getPreferredName() + "] must be a non-null, non-empty string.");
|
|
||||||
}
|
|
||||||
if (metrics == null || metrics.isEmpty()) {
|
|
||||||
throw new IllegalArgumentException("Parameter [" + METRICS.getPreferredName()
|
|
||||||
+ "] must be a non-null, non-empty array of strings.");
|
|
||||||
}
|
|
||||||
metrics.forEach(m -> {
|
|
||||||
if (RollupField.SUPPORTED_METRICS.contains(m) == false) {
|
|
||||||
throw new IllegalArgumentException("Unsupported metric [" + m + "]. " +
|
|
||||||
"Supported metrics include: " + RollupField.SUPPORTED_METRICS);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return new MetricConfig(field, metrics);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject {
|
||||||
static {
|
static {
|
||||||
PARSER.declareString(RollupJobConfig.Builder::setId, RollupField.ID);
|
PARSER.declareString(RollupJobConfig.Builder::setId, RollupField.ID);
|
||||||
PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.PARSER.apply(p,c).build(), GROUPS);
|
PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.PARSER.apply(p,c).build(), GROUPS);
|
||||||
PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, (p, c) -> MetricConfig.PARSER.apply(p, c).build(), METRICS);
|
PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, (p, c) -> MetricConfig.fromXContent(p), METRICS);
|
||||||
PARSER.declareString((params, val) ->
|
PARSER.declareString((params, val) ->
|
||||||
params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT);
|
params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT);
|
||||||
PARSER.declareString(RollupJobConfig.Builder::setIndexPattern, INDEX_PATTERN);
|
PARSER.declareString(RollupJobConfig.Builder::setIndexPattern, INDEX_PATTERN);
|
||||||
|
@ -160,10 +160,8 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject {
|
||||||
}
|
}
|
||||||
if (metricsConfig != null) {
|
if (metricsConfig != null) {
|
||||||
builder.startArray(METRICS.getPreferredName());
|
builder.startArray(METRICS.getPreferredName());
|
||||||
for (MetricConfig config : metricsConfig) {
|
for (MetricConfig metric : metricsConfig) {
|
||||||
builder.startObject();
|
metric.toXContent(builder, params);
|
||||||
config.toXContent(builder, params);
|
|
||||||
builder.endObject();
|
|
||||||
}
|
}
|
||||||
builder.endArray();
|
builder.endArray();
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
|
||||||
import org.elasticsearch.node.Node;
|
import org.elasticsearch.node.Node;
|
||||||
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
|
@ -10,7 +10,7 @@ import org.elasticsearch.common.logging.Loggers;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.xpack.core.security.authc.support.Hasher;
|
import org.elasticsearch.xpack.core.security.authc.support.Hasher;
|
||||||
import org.elasticsearch.xpack.core.security.support.Validation;
|
import org.elasticsearch.xpack.core.security.support.Validation;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user;
|
||||||
import org.elasticsearch.action.ActionResponse;
|
import org.elasticsearch.action.ActionResponse;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
|
@ -20,7 +20,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.xpack.core.security.authc.support.Hasher;
|
import org.elasticsearch.xpack.core.security.authc.support.Hasher;
|
||||||
import org.elasticsearch.xpack.core.security.support.Validation;
|
import org.elasticsearch.xpack.core.security.support.Validation;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
|
@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper;
|
import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Base64;
|
import java.util.Base64;
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
package org.elasticsearch.xpack.core.security.authc;
|
package org.elasticsearch.xpack.core.security.authc;
|
||||||
|
|
||||||
import org.elasticsearch.common.Nullable;
|
import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import org.apache.logging.log4j.Logger;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||||
import org.elasticsearch.xpack.core.XPackField;
|
import org.elasticsearch.xpack.core.XPackField;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
||||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||||
import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege;
|
import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege;
|
||||||
import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges;
|
import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges;
|
||||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
|
||||||
import org.elasticsearch.xpack.core.security.support.Validation;
|
import org.elasticsearch.xpack.core.security.support.Validation;
|
||||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||||
|
|
||||||
|
@ -163,7 +162,7 @@ public class RoleDescriptor implements ToXContentObject {
|
||||||
}
|
}
|
||||||
sb.append("], runAs=[").append(Strings.arrayToCommaDelimitedString(runAs));
|
sb.append("], runAs=[").append(Strings.arrayToCommaDelimitedString(runAs));
|
||||||
sb.append("], metadata=[");
|
sb.append("], metadata=[");
|
||||||
MetadataUtils.writeValue(sb, metadata);
|
sb.append(metadata);
|
||||||
sb.append("]]");
|
sb.append("]]");
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,7 +62,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||||
import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
||||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
||||||
import org.elasticsearch.xpack.core.security.support.Exceptions;
|
import org.elasticsearch.xpack.core.security.support.Exceptions;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
|
@ -5,8 +5,6 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.core.security.support;
|
package org.elasticsearch.xpack.core.security.support;
|
||||||
|
|
||||||
import java.lang.reflect.Array;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -19,50 +17,6 @@ public class MetadataUtils {
|
||||||
private MetadataUtils() {
|
private MetadataUtils() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void writeValue(StringBuilder sb, Object object) {
|
|
||||||
if (object == null) {
|
|
||||||
sb.append(object);
|
|
||||||
} else if (object instanceof Map) {
|
|
||||||
sb.append("{");
|
|
||||||
for (Map.Entry<String, Object> entry : ((Map<String, Object>) object).entrySet()) {
|
|
||||||
sb.append(entry.getKey()).append("=");
|
|
||||||
writeValue(sb, entry.getValue());
|
|
||||||
}
|
|
||||||
sb.append("}");
|
|
||||||
|
|
||||||
} else if (object instanceof Collection) {
|
|
||||||
sb.append("[");
|
|
||||||
boolean first = true;
|
|
||||||
for (Object item : (Collection) object) {
|
|
||||||
if (!first) {
|
|
||||||
sb.append(",");
|
|
||||||
}
|
|
||||||
writeValue(sb, item);
|
|
||||||
first = false;
|
|
||||||
}
|
|
||||||
sb.append("]");
|
|
||||||
} else if (object.getClass().isArray()) {
|
|
||||||
sb.append("[");
|
|
||||||
for (int i = 0; i < Array.getLength(object); i++) {
|
|
||||||
if (i != 0) {
|
|
||||||
sb.append(",");
|
|
||||||
}
|
|
||||||
writeValue(sb, Array.get(object, i));
|
|
||||||
}
|
|
||||||
sb.append("]");
|
|
||||||
} else {
|
|
||||||
sb.append(object);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void verifyNoReservedMetadata(Map<String, Object> metadata) {
|
|
||||||
for (String key : metadata.keySet()) {
|
|
||||||
if (key.startsWith(RESERVED_PREFIX)) {
|
|
||||||
throw new IllegalArgumentException("invalid user metadata. [" + key + "] is a reserved for internal use");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static boolean containsReservedMetadata(Map<String, Object> metadata) {
|
public static boolean containsReservedMetadata(Map<String, Object> metadata) {
|
||||||
for (String key : metadata.keySet()) {
|
for (String key : metadata.keySet()) {
|
||||||
if (key.startsWith(RESERVED_PREFIX)) {
|
if (key.startsWith(RESERVED_PREFIX)) {
|
||||||
|
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||||
|
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.authz.privilege.SystemPrivilege;
|
import org.elasticsearch.xpack.core.security.authz.privilege.SystemPrivilege;
|
||||||
|
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
|
|
@ -5,6 +5,8 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* internal user that manages xpack security. Has all cluster/indices permissions.
|
* internal user that manages xpack security. Has all cluster/indices permissions.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.xpack.core.security.user;
|
package org.elasticsearch.xpack.core.security.user;
|
||||||
|
|
||||||
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||||
import org.elasticsearch.xpack.core.security.authz.permission.Role;
|
import org.elasticsearch.xpack.core.security.authz.permission.Role;
|
||||||
import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField;
|
import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField;
|
||||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
|
||||||
import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
|
import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
@ -38,11 +39,7 @@ public class ConfigTestHelpers {
|
||||||
builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build());
|
builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build());
|
||||||
builder.setPageSize(ESTestCase.randomIntBetween(1,10));
|
builder.setPageSize(ESTestCase.randomIntBetween(1,10));
|
||||||
if (ESTestCase.randomBoolean()) {
|
if (ESTestCase.randomBoolean()) {
|
||||||
List<MetricConfig> metrics = IntStream.range(1, ESTestCase.randomIntBetween(1,10))
|
builder.setMetricsConfig(randomMetricsConfigs(ESTestCase.random()));
|
||||||
.mapToObj(n -> ConfigTestHelpers.getMetricConfig().build())
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
|
|
||||||
builder.setMetricsConfig(metrics);
|
|
||||||
}
|
}
|
||||||
return builder;
|
return builder;
|
||||||
}
|
}
|
||||||
|
@ -59,32 +56,6 @@ public class ConfigTestHelpers {
|
||||||
return groupBuilder;
|
return groupBuilder;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static MetricConfig.Builder getMetricConfig() {
|
|
||||||
MetricConfig.Builder builder = new MetricConfig.Builder();
|
|
||||||
builder.setField(ESTestCase.randomAlphaOfLength(15)); // large names so we don't accidentally collide
|
|
||||||
List<String> metrics = new ArrayList<>();
|
|
||||||
if (ESTestCase.randomBoolean()) {
|
|
||||||
metrics.add("min");
|
|
||||||
}
|
|
||||||
if (ESTestCase.randomBoolean()) {
|
|
||||||
metrics.add("max");
|
|
||||||
}
|
|
||||||
if (ESTestCase.randomBoolean()) {
|
|
||||||
metrics.add("sum");
|
|
||||||
}
|
|
||||||
if (ESTestCase.randomBoolean()) {
|
|
||||||
metrics.add("avg");
|
|
||||||
}
|
|
||||||
if (ESTestCase.randomBoolean()) {
|
|
||||||
metrics.add("value_count");
|
|
||||||
}
|
|
||||||
if (metrics.size() == 0) {
|
|
||||||
metrics.add("min");
|
|
||||||
}
|
|
||||||
builder.setMetrics(metrics);
|
|
||||||
return builder;
|
|
||||||
}
|
|
||||||
|
|
||||||
private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"};
|
private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"};
|
||||||
public static String randomPositiveTimeValue() {
|
public static String randomPositiveTimeValue() {
|
||||||
return ESTestCase.randomIntBetween(1, 1000) + ESTestCase.randomFrom(TIME_SUFFIXES);
|
return ESTestCase.randomIntBetween(1, 1000) + ESTestCase.randomFrom(TIME_SUFFIXES);
|
||||||
|
@ -123,6 +94,39 @@ public class ConfigTestHelpers {
|
||||||
return new HistogramGroupConfig(randomInterval(random), randomFields(random));
|
return new HistogramGroupConfig(randomInterval(random), randomFields(random));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static List<MetricConfig> randomMetricsConfigs(final Random random) {
|
||||||
|
final int numMetrics = randomIntBetween(random, 1, 10);
|
||||||
|
final List<MetricConfig> metrics = new ArrayList<>(numMetrics);
|
||||||
|
for (int i = 0; i < numMetrics; i++) {
|
||||||
|
metrics.add(randomMetricConfig(random));
|
||||||
|
}
|
||||||
|
return Collections.unmodifiableList(metrics);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static MetricConfig randomMetricConfig(final Random random) {
|
||||||
|
final String field = randomAsciiAlphanumOfLengthBetween(random, 15, 25); // large names so we don't accidentally collide
|
||||||
|
final List<String> metrics = new ArrayList<>();
|
||||||
|
if (random.nextBoolean()) {
|
||||||
|
metrics.add("min");
|
||||||
|
}
|
||||||
|
if (random.nextBoolean()) {
|
||||||
|
metrics.add("max");
|
||||||
|
}
|
||||||
|
if (random.nextBoolean()) {
|
||||||
|
metrics.add("sum");
|
||||||
|
}
|
||||||
|
if (random.nextBoolean()) {
|
||||||
|
metrics.add("avg");
|
||||||
|
}
|
||||||
|
if (random.nextBoolean()) {
|
||||||
|
metrics.add("value_count");
|
||||||
|
}
|
||||||
|
if (metrics.size() == 0) {
|
||||||
|
metrics.add("min");
|
||||||
|
}
|
||||||
|
return new MetricConfig(field, Collections.unmodifiableList(metrics));
|
||||||
|
}
|
||||||
|
|
||||||
public static TermsGroupConfig randomTermsGroupConfig(final Random random) {
|
public static TermsGroupConfig randomTermsGroupConfig(final Random random) {
|
||||||
return new TermsGroupConfig(randomFields(random));
|
return new TermsGroupConfig(randomFields(random));
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,14 +17,16 @@ import java.util.Collections;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<MetricConfig> {
|
public class MetricConfigSerializingTests extends AbstractSerializingTestCase<MetricConfig> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected MetricConfig doParseInstance(XContentParser parser) throws IOException {
|
protected MetricConfig doParseInstance(final XContentParser parser) throws IOException {
|
||||||
return MetricConfig.PARSER.apply(parser, null).build();
|
return MetricConfig.fromXContent(parser);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -34,24 +36,20 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected MetricConfig createTestInstance() {
|
protected MetricConfig createTestInstance() {
|
||||||
return ConfigTestHelpers.getMetricConfig().build();
|
return ConfigTestHelpers.randomMetricConfig(random());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValidateNoMapping() throws IOException {
|
public void testValidateNoMapping() {
|
||||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||||
|
|
||||||
MetricConfig config = new MetricConfig.Builder()
|
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " +
|
assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " +
|
||||||
"indices matching the index pattern."));
|
"indices matching the index pattern."));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValidateNomatchingField() throws IOException {
|
public void testValidateNomatchingField() {
|
||||||
|
|
||||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||||
|
|
||||||
|
@ -59,17 +57,13 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
||||||
FieldCapabilities fieldCaps = mock(FieldCapabilities.class);
|
FieldCapabilities fieldCaps = mock(FieldCapabilities.class);
|
||||||
responseMap.put("some_other_field", Collections.singletonMap("date", fieldCaps));
|
responseMap.put("some_other_field", Collections.singletonMap("date", fieldCaps));
|
||||||
|
|
||||||
MetricConfig config = new MetricConfig.Builder()
|
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " +
|
assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " +
|
||||||
"indices matching the index pattern."));
|
"indices matching the index pattern."));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValidateFieldWrongType() throws IOException {
|
public void testValidateFieldWrongType() {
|
||||||
|
|
||||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||||
|
|
||||||
|
@ -77,17 +71,13 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
||||||
FieldCapabilities fieldCaps = mock(FieldCapabilities.class);
|
FieldCapabilities fieldCaps = mock(FieldCapabilities.class);
|
||||||
responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps));
|
||||||
|
|
||||||
MetricConfig config = new MetricConfig.Builder()
|
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().get(0), equalTo("The field referenced by a metric group must be a [numeric] type, " +
|
assertThat(e.validationErrors().get(0), equalTo("The field referenced by a metric group must be a [numeric] type, " +
|
||||||
"but found [keyword] for field [my_field]"));
|
"but found [keyword] for field [my_field]"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValidateFieldMatchingNotAggregatable() throws IOException {
|
public void testValidateFieldMatchingNotAggregatable() {
|
||||||
|
|
||||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||||
|
|
||||||
|
@ -96,15 +86,12 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(false);
|
when(fieldCaps.isAggregatable()).thenReturn(false);
|
||||||
responseMap.put("my_field", Collections.singletonMap("long", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("long", fieldCaps));
|
||||||
|
|
||||||
MetricConfig config = new MetricConfig.Builder()
|
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not."));
|
assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not."));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testValidateMatchingField() throws IOException {
|
public void testValidateMatchingField() {
|
||||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||||
|
|
||||||
|
@ -113,10 +100,7 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("long", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("long", fieldCaps));
|
||||||
|
|
||||||
MetricConfig config = new MetricConfig.Builder()
|
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
|
@ -124,70 +108,49 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("double", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("double", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("float", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("float", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("short", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("short", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("byte", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("byte", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("half_float", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("half_float", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("scaled_float", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("scaled_float", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
|
|
||||||
fieldCaps = mock(FieldCapabilities.class);
|
fieldCaps = mock(FieldCapabilities.class);
|
||||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||||
responseMap.put("my_field", Collections.singletonMap("integer", fieldCaps));
|
responseMap.put("my_field", Collections.singletonMap("integer", fieldCaps));
|
||||||
config = new MetricConfig.Builder()
|
config = new MetricConfig("my_field", singletonList("max"));
|
||||||
.setField("my_field")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build();
|
|
||||||
config.validateMappings(responseMap, e);
|
config.validateMappings(responseMap, e);
|
||||||
assertThat(e.validationErrors().size(), equalTo(0));
|
assertThat(e.validationErrors().size(), equalTo(0));
|
||||||
}
|
}
|
|
@ -76,7 +76,7 @@ import org.elasticsearch.test.IndexSettingsModule;
|
||||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
||||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
||||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition;
|
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition;
|
||||||
import org.elasticsearch.xpack.core.security.user.User;
|
import org.elasticsearch.protocol.xpack.security.User;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.mockito.ArgumentCaptor;
|
import org.mockito.ArgumentCaptor;
|
||||||
|
|
|
@ -302,6 +302,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
|
||||||
when(mockNodeResponse.shardsStats()).thenReturn(new ShardStats[]{mockShardStats});
|
when(mockNodeResponse.shardsStats()).thenReturn(new ShardStats[]{mockShardStats});
|
||||||
|
|
||||||
final ClusterStatsResponse clusterStats = new ClusterStatsResponse(1451606400000L,
|
final ClusterStatsResponse clusterStats = new ClusterStatsResponse(1451606400000L,
|
||||||
|
"_cluster",
|
||||||
clusterName,
|
clusterName,
|
||||||
singletonList(mockNodeResponse),
|
singletonList(mockNodeResponse),
|
||||||
emptyList());
|
emptyList());
|
||||||
|
@ -353,6 +354,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
|
||||||
+ (needToEnableTLS ? ",\"cluster_needs_tls\":true" : "")
|
+ (needToEnableTLS ? ",\"cluster_needs_tls\":true" : "")
|
||||||
+ "},"
|
+ "},"
|
||||||
+ "\"cluster_stats\":{"
|
+ "\"cluster_stats\":{"
|
||||||
|
+ "\"cluster_uuid\":\"_cluster\","
|
||||||
+ "\"timestamp\":1451606400000,"
|
+ "\"timestamp\":1451606400000,"
|
||||||
+ "\"status\":\"red\","
|
+ "\"status\":\"red\","
|
||||||
+ "\"indices\":{"
|
+ "\"indices\":{"
|
||||||
|
|
|
@ -26,10 +26,10 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
|
||||||
import org.joda.time.DateTimeZone;
|
import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
public class RollupJobIdentifierUtilTests extends ESTestCase {
|
public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
|
@ -103,10 +103,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||||
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
||||||
job.setGroupConfig(group.build());
|
job.setGroupConfig(group.build());
|
||||||
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max"))));
|
||||||
.setField("bar")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build()));
|
|
||||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||||
|
|
||||||
|
@ -168,10 +165,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||||
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
||||||
job.setGroupConfig(group.build());
|
job.setGroupConfig(group.build());
|
||||||
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max"))));
|
||||||
.setField("bar")
|
|
||||||
.setMetrics(Collections.singletonList("max"))
|
|
||||||
.build()));
|
|
||||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||||
caps.add(cap);
|
caps.add(cap);
|
||||||
|
@ -180,10 +174,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||||
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
||||||
job2.setGroupConfig(group.build());
|
job2.setGroupConfig(group.build());
|
||||||
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("min"))));
|
||||||
.setField("bar")
|
|
||||||
.setMetrics(Collections.singletonList("min"))
|
|
||||||
.build()));
|
|
||||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||||
caps.add(cap2);
|
caps.add(cap2);
|
||||||
|
|
||||||
|
@ -331,12 +322,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
.build())
|
.build())
|
||||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||||
.build())
|
.build())
|
||||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
.setMetricsConfig(
|
||||||
.setField("max_field")
|
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||||
.setMetrics(Collections.singletonList("max")).build(),
|
|
||||||
new MetricConfig.Builder()
|
|
||||||
.setField("avg_field")
|
|
||||||
.setMetrics(Collections.singletonList("avg")).build()))
|
|
||||||
.build();
|
.build();
|
||||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||||
|
|
||||||
|
@ -360,12 +347,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
.setTimeZone(DateTimeZone.UTC)
|
.setTimeZone(DateTimeZone.UTC)
|
||||||
.build())
|
.build())
|
||||||
.build())
|
.build())
|
||||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
.setMetricsConfig(
|
||||||
.setField("max_field")
|
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||||
.setMetrics(Collections.singletonList("max")).build(),
|
|
||||||
new MetricConfig.Builder()
|
|
||||||
.setField("avg_field")
|
|
||||||
.setMetrics(Collections.singletonList("avg")).build()))
|
|
||||||
.build();
|
.build();
|
||||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||||
|
|
||||||
|
@ -412,12 +395,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
.setTimeZone(DateTimeZone.UTC)
|
.setTimeZone(DateTimeZone.UTC)
|
||||||
.build())
|
.build())
|
||||||
.build())
|
.build())
|
||||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
.setMetricsConfig(
|
||||||
.setField("max_field")
|
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||||
.setMetrics(Collections.singletonList("max")).build(),
|
|
||||||
new MetricConfig.Builder()
|
|
||||||
.setField("avg_field")
|
|
||||||
.setMetrics(Collections.singletonList("avg")).build()))
|
|
||||||
.build();
|
.build();
|
||||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||||
|
|
||||||
|
@ -442,12 +421,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
.build())
|
.build())
|
||||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||||
.build())
|
.build())
|
||||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
.setMetricsConfig(
|
||||||
.setField("max_field")
|
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||||
.setMetrics(Collections.singletonList("max")).build(),
|
|
||||||
new MetricConfig.Builder()
|
|
||||||
.setField("avg_field")
|
|
||||||
.setMetrics(Collections.singletonList("avg")).build()))
|
|
||||||
.build();
|
.build();
|
||||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||||
|
|
||||||
|
@ -485,9 +460,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||||
int i = ESTestCase.randomIntBetween(0, 3);
|
int i = ESTestCase.randomIntBetween(0, 3);
|
||||||
|
|
||||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
|
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
|
||||||
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
.getRollupJob("foo")
|
||||||
.setField("foo")
|
.setMetricsConfig(singletonList(new MetricConfig("foo", Arrays.asList("avg", "max", "min", "sum"))))
|
||||||
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
|
|
||||||
.build()));
|
.build()));
|
||||||
|
|
||||||
String aggType;
|
String aggType;
|
||||||
|
|
|
@ -45,6 +45,7 @@ import java.util.function.Function;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
import static java.util.Collections.emptyList;
|
import static java.util.Collections.emptyList;
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
import static org.elasticsearch.xpack.rollup.RollupRequestTranslator.translateAggregation;
|
import static org.elasticsearch.xpack.rollup.RollupRequestTranslator.translateAggregation;
|
||||||
import static org.hamcrest.Matchers.equalTo;
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
import static org.hamcrest.core.IsInstanceOf.instanceOf;
|
import static org.hamcrest.core.IsInstanceOf.instanceOf;
|
||||||
|
@ -153,9 +154,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
|
||||||
|
|
||||||
public void testUnsupportedMetric() {
|
public void testUnsupportedMetric() {
|
||||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
|
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
|
||||||
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
.getRollupJob("foo")
|
||||||
.setField("foo")
|
.setMetricsConfig(singletonList(new MetricConfig("foo", Arrays.asList("avg", "max", "min", "sum"))))
|
||||||
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
|
|
||||||
.build()));
|
.build()));
|
||||||
|
|
||||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||||
|
|
|
@ -454,7 +454,7 @@ public class SearchActionTests extends ESTestCase {
|
||||||
job2.setGroupConfig(group.build());
|
job2.setGroupConfig(group.build());
|
||||||
|
|
||||||
// so that the jobs aren't exactly equal
|
// so that the jobs aren't exactly equal
|
||||||
job2.setMetricsConfig(Collections.singletonList(ConfigTestHelpers.getMetricConfig().build()));
|
job2.setMetricsConfig(ConfigTestHelpers.randomMetricsConfigs(random()));
|
||||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||||
|
|
||||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue