Merge branch 'master' into index-lifecycle
client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClien t.java /Users/colings86/dev/work/git/elasticsearch/.git/worktrees/elasticsearch -ilm/MERGE_HEAD client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLe velClient.java client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClien t.java client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifec ycleIT.java client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLe velClientTests.java client/rest-high-level/src/test/java/org/elasticsearch/client/WatcherIT. java client/rest-high-level/src/test/java/org/elasticsearch/client/documentat ion/LicensingDocumentationIT.java client/rest-high-level/src/test/java/org/elasticsearch/client/documentat ion/WatcherDocumentationIT.java modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler. java modules/lang-painless/src/main/java/org/elasticsearch/painless/Def.java modules/lang-painless/src/main/java/org/elasticsearch/painless/FunctionR ef.java modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessE xplainError.java modules/lang-painless/src/main/java/org/elasticsearch/painless/ScriptCla ssInfo.java modules/lang-painless/src/main/java/org/elasticsearch/painless/antlr/Enh ancedPainlessLexer.java modules/lang-painless/src/main/java/org/elasticsearch/painless/lookup/Pa inlessLookup.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EExp licit.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EFun ctionRef.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EIns tanceof.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELam bda.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ELis tInit.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/EMap Init.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENew Array.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ENew Obj.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/ESta tic.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PCal lInvoke.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PFie ld.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSub ListShortcut.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/PSub MapShortcut.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SCat ch.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SDec laration.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SEac h.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SFun ction.java modules/lang-painless/src/main/java/org/elasticsearch/painless/node/SSub EachIterable.java modules/lang-painless/src/test/java/org/elasticsearch/painless/OverloadT ests.java modules/lang-painless/src/test/java/org/elasticsearch/painless/PainlessD ocGenerator.java modules/lang-painless/src/test/java/org/elasticsearch/painless/RegexTest s.java modules/lang-painless/src/test/java/org/elasticsearch/painless/node/Node ToStringTests.java rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_bas ic.yml server/src/main/java/org/elasticsearch/action/admin/cluster/stats/Cluste rStatsResponse.java server/src/main/java/org/elasticsearch/action/admin/cluster/stats/Transp ortClusterStatsAction.java server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAct ion.java server/src/main/java/org/elasticsearch/action/support/replication/Transp ortReplicationAction.java server/src/main/java/org/elasticsearch/index/engine/Engine.java server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.ja va server/src/main/java/org/elasticsearch/index/shard/IndexShard.java server/src/main/java/org/elasticsearch/index/shard/IndexShardOperationPe rmits.java server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer. java server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java server/src/main/java/org/elasticsearch/index/translog/Translog.java server/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHa ndler.java server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.j ava server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkAct ionTests.java server/src/test/java/org/elasticsearch/action/search/SearchAsyncActionTe sts.java server/src/test/java/org/elasticsearch/action/support/replication/Transp ortReplicationActionTests.java server/src/test/java/org/elasticsearch/action/support/replication/Transp ortWriteActionTests.java server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardS tateIT.java server/src/test/java/org/elasticsearch/index/replication/IndexLevelRepli cationTests.java server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringR eplicationTests.java server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTes ts.java server/src/test/java/org/elasticsearch/index/shard/IndexShardOperationPe rmitsTests.java server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java server/src/test/java/org/elasticsearch/index/shard/IndexingOperationList enerTests.java server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerT ests.java server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHa ndlerTests.java server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.ja va server/src/test/java/org/elasticsearch/search/profile/query/QueryProfile rIT.java test/framework/src/main/java/org/elasticsearch/index/replication/ESIndex LevelReplicationTestCase.java test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTes tCase.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job /MetricConfig.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job /RollupJobConfig.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/S ecurityContext.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/U serSettings.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/AuthenticateResponse.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/ChangePasswordRequestBuilder.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/GetUsersResponse.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a ction/user/PutUserRequestBuilder.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthc/Authentication.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthc/AuthenticationResult.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthc/Realm.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthz/RoleDescriptor.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/a uthz/accesscontrol/SecurityIndexSearcherWrapper.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/s upport/MetadataUtils.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/AnonymousUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/BeatsSystemUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/ElasticUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/InternalUserSerializationHelper.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/KibanaUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/LogstashSystemUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/SystemUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/XPackSecurityUser.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/XPackUser.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/Con figTestHelpers.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job /MetricsConfigSerializingTests.java -> x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job /MetricConfigSerializingTests.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/a uthz/accesscontrol/SecurityIndexSearcherWrapperUnitTests.java x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitorin g/collector/cluster/ClusterStatsMonitoringDocTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/Rollup JobIdentifierUtilTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/Rollup RequestTranslationTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action /SearchActionTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config /ConfigTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/In dexerUtilsTests.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/Ro llupIndexerIndexingTests.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/saml/TransportSamlLogoutAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/user/TransportAuthenticateAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/user/TransportGetUsersAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/ac tion/user/TransportHasPrivilegesAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/AuditTrail.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/AuditTrailService.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrail.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au dit/logfile/LoggingAuditTrail.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/AuthenticationService.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/NativeRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/NativeUsersStore.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/ReservedRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/esnative/UserAndPassword.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/file/FileRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/file/FileUserPasswdStore.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/ldap/LdapRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/pki/PkiRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/saml/SamlRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thc/support/CachingUsernamePasswordRealm.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thz/AuthorizationService.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/au thz/AuthorizedIndices.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/in gest/SetSecurityUserProcessor.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/re st/action/RestAuthenticateAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/re st/action/user/RestChangePasswordAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/re st/action/user/RestGetUsersAction.java x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/tr ansport/ServerTransportFilter.java x-pack/plugin/security/src/test/java/org/elasticsearch/integration/Clear RealmsCacheTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/Se curityContextTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/filter/SecurityActionFilterTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/interceptor/IndicesAliasesRequestInterceptorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/interceptor/ResizeRequestInterceptorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/saml/TransportSamlInvalidateSessionActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/saml/TransportSamlLogoutActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportAuthenticateActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportChangePasswordActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportDeleteUserActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportGetUsersActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportHasPrivilegesActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportPutUserActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/ac tion/user/TransportSetEnabledActionTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/AuditTrailServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrailMutedTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/index/IndexAuditTrailTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/logfile/LoggingAuditTrailFilterTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au dit/logfile/LoggingAuditTrailTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/AuthenticationServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/RealmsTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/TokenServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/UserTokenTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/esnative/NativeRealmIntegTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/esnative/NativeUsersStoreTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/esnative/ReservedRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/file/FileRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/file/FileUserPasswdStoreTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmAuthenticateFailedTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmCacheTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmTestCase.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/kerberos/KerberosRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/ldap/ActiveDirectoryRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/ldap/CancellableLdapRunnableTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/ldap/LdapRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/pki/PkiRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/support/CachingUsernamePasswordRealmTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thc/support/mapper/NativeRoleMappingStoreTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/AuthorizationServiceTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/AuthorizationUtilsTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/AuthorizedIndicesTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/IndicesAndAliasesResolverTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/au thz/SecuritySearchOperationListenerTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/in gest/SetSecurityUserProcessorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/tr ansport/SecurityServerTransportInterceptorTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/tr ansport/ServerTransportFilterTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/us er/AnonymousUserTests.java x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/us er/UserTests.java -> x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/us er/UserSerializationTests.java x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/f unction/scalar/string/StringFunctionUtils.java x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/exec ution/ExecutionServiceTests.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DefaultDetectorDescription.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectionRule.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/Detector.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectorFunction.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/FilterRef.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/MlFilter.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/Operator.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleAction.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleCondition.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleScope.java x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/ml/packag e-info.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/u ser/User.java -> x-pack/protocol/src/main/java/org/elasticsearch/protocol/xpack/security/ User.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectionRuleTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/DetectorTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/FilterRefTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/MlFilterTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleConditionTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/ml/job/co nfig/RuleScopeTests.java x-pack/protocol/src/test/java/org/elasticsearch/protocol/xpack/security/ UserTests.java x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/Watch erRestartIT.java x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cl uster/60_watcher.yml x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_clus ter/60_watcher.yml x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded _cluster/60_watcher.yml x-pack/qa/security-example-spi-extension/src/main/java/org/elasticsearch /example/realm/CustomRealm.java x-pack/qa/security-example-spi-extension/src/test/java/org/elasticsearch /example/realm/CustomRealmTests.java x-pack/qa/security-migrate-tests/src/test/java/org/elasticsearch/xpack/s ecurity/MigrateToolIT.java
This commit is contained in:
commit
20915a9baf
|
@ -205,6 +205,9 @@ public class RestHighLevelClient implements Closeable {
|
|||
private final SnapshotClient snapshotClient = new SnapshotClient(this);
|
||||
private final TasksClient tasksClient = new TasksClient(this);
|
||||
private final XPackClient xPackClient = new XPackClient(this);
|
||||
private final WatcherClient watcherClient = new WatcherClient(this);
|
||||
private final LicenseClient licenseClient = new LicenseClient(this);
|
||||
private final IndexLifecycleClient ilmClient = new IndexLifecycleClient(this);
|
||||
|
||||
/**
|
||||
* Creates a {@link RestHighLevelClient} given the low level {@link RestClientBuilder} that allows to build the
|
||||
|
@ -296,18 +299,47 @@ public class RestHighLevelClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
||||
* accessing the Elastic Licensed X-Pack APIs that are shipped with the
|
||||
* default distribution of Elasticsearch. All of these APIs will 404 if run
|
||||
* against the OSS distribution of Elasticsearch.
|
||||
* Provides methods for accessing the Elastic Licensed X-Pack Info
|
||||
* and Usage APIs that are shipped with the default distribution of
|
||||
* Elasticsearch. All of these APIs will 404 if run against the OSS
|
||||
* distribution of Elasticsearch.
|
||||
* <p>
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/xpack-api.html">
|
||||
* X-Pack APIs on elastic.co</a> for more information.
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/info-api.html">
|
||||
* Info APIs on elastic.co</a> for more information.
|
||||
*/
|
||||
public final XPackClient xpack() {
|
||||
return xPackClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides methods for accessing the Elastic Licensed Watcher APIs that
|
||||
* are shipped with the default distribution of Elasticsearch. All of
|
||||
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
|
||||
* <p>
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api.html">
|
||||
* Watcher APIs on elastic.co</a> for more information.
|
||||
*/
|
||||
public WatcherClient watcher() { return watcherClient; }
|
||||
|
||||
/**
|
||||
* Provides methods for accessing the Elastic Licensed Licensing APIs that
|
||||
* are shipped with the default distribution of Elasticsearch. All of
|
||||
* these APIs will 404 if run against the OSS distribution of Elasticsearch.
|
||||
* <p>
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/licensing-apis.html">
|
||||
* Licensing APIs on elastic.co</a> for more information.
|
||||
*/
|
||||
public LicenseClient license() { return licenseClient; }
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
||||
* accessing the Elastic Index Lifecycle APIs.
|
||||
* <p>
|
||||
* See the <a href="http://FILL-ME-IN-WE-HAVE-NO-DOCS-YET.com"> X-Pack APIs
|
||||
* on elastic.co</a> for more information.
|
||||
*/
|
||||
public IndexLifecycleClient indexLifecycle() { return ilmClient; }
|
||||
|
||||
/**
|
||||
* Executes a bulk request using the Bulk API.
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html">Bulk API on elastic.co</a>
|
||||
|
|
|
@ -41,19 +41,9 @@ import static java.util.Collections.emptySet;
|
|||
public final class XPackClient {
|
||||
|
||||
private final RestHighLevelClient restHighLevelClient;
|
||||
private final WatcherClient watcherClient;
|
||||
private final LicenseClient licenseClient;
|
||||
private final IndexLifecycleClient indexLifecycleClient;
|
||||
|
||||
XPackClient(RestHighLevelClient restHighLevelClient) {
|
||||
this.restHighLevelClient = restHighLevelClient;
|
||||
this.watcherClient = new WatcherClient(restHighLevelClient);
|
||||
this.licenseClient = new LicenseClient(restHighLevelClient);
|
||||
this.indexLifecycleClient = new IndexLifecycleClient(restHighLevelClient);
|
||||
}
|
||||
|
||||
public WatcherClient watcher() {
|
||||
return watcherClient;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -104,26 +94,4 @@ public final class XPackClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::xpackUsage, options,
|
||||
XPackUsageResponse::fromXContent, listener, emptySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
||||
* accessing the Elastic Licensing APIs.
|
||||
* <p>
|
||||
* See the <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/licensing-apis.html">
|
||||
* X-Pack APIs on elastic.co</a> for more information.
|
||||
*/
|
||||
public LicenseClient license() {
|
||||
return licenseClient;
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper for the {@link RestHighLevelClient} that provides methods for
|
||||
* accessing the Elastic Index Lifecycle APIs.
|
||||
* <p>
|
||||
* See the <a href="http://FILL-ME-IN-WE-HAVE-NO-DOCS-YET.com">
|
||||
* X-Pack APIs on elastic.co</a> for more information.
|
||||
*/
|
||||
public IndexLifecycleClient indexLifecycle() {
|
||||
return this.indexLifecycleClient;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,8 +90,8 @@ public class IndexLifecycleIT extends ESRestHighLevelClientTestCase {
|
|||
createIndex("foo", Settings.builder().put("index.lifecycle.name", "bar").build());
|
||||
createIndex("baz", Settings.builder().put("index.lifecycle.name", "eggplant").build());
|
||||
SetIndexLifecyclePolicyRequest req = new SetIndexLifecyclePolicyRequest(policy, "foo", "baz");
|
||||
SetIndexLifecyclePolicyResponse response = execute(req, highLevelClient().xpack().indexLifecycle()::setIndexLifecyclePolicy,
|
||||
highLevelClient().xpack().indexLifecycle()::setIndexLifecyclePolicyAsync);
|
||||
SetIndexLifecyclePolicyResponse response = execute(req, highLevelClient().indexLifecycle()::setIndexLifecyclePolicy,
|
||||
highLevelClient().indexLifecycle()::setIndexLifecyclePolicyAsync);
|
||||
assertThat(response.hasFailures(), is(false));
|
||||
assertThat(response.getFailedIndexes().isEmpty(), is(true));
|
||||
|
||||
|
|
|
@ -755,7 +755,9 @@ public class RestHighLevelClientTests extends ESTestCase {
|
|||
method.isAnnotationPresent(Deprecated.class));
|
||||
} else {
|
||||
//TODO xpack api are currently ignored, we need to load xpack yaml spec too
|
||||
if (apiName.startsWith("xpack.") == false) {
|
||||
if (apiName.startsWith("xpack.") == false &&
|
||||
apiName.startsWith("license.") == false &&
|
||||
apiName.startsWith("watcher.") == false) {
|
||||
apiNotFound.add(apiName);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
|||
"}";
|
||||
BytesReference bytesReference = new BytesArray(json);
|
||||
PutWatchRequest putWatchRequest = new PutWatchRequest(watchId, bytesReference, XContentType.JSON);
|
||||
return highLevelClient().xpack().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
return highLevelClient().watcher().putWatch(putWatchRequest, RequestOptions.DEFAULT);
|
||||
}
|
||||
|
||||
public void testDeleteWatch() throws Exception {
|
||||
|
@ -54,7 +54,7 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
|||
{
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
createWatch(watchId);
|
||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().xpack().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||
RequestOptions.DEFAULT);
|
||||
assertThat(deleteWatchResponse.getId(), is(watchId));
|
||||
assertThat(deleteWatchResponse.getVersion(), is(2L));
|
||||
|
@ -64,7 +64,7 @@ public class WatcherIT extends ESRestHighLevelClientTestCase {
|
|||
// delete watch that does not exist
|
||||
{
|
||||
String watchId = randomAlphaOfLength(10);
|
||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().xpack().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||
DeleteWatchResponse deleteWatchResponse = highLevelClient().watcher().deleteWatch(new DeleteWatchRequest(watchId),
|
||||
RequestOptions.DEFAULT);
|
||||
assertThat(deleteWatchResponse.getId(), is(watchId));
|
||||
assertThat(deleteWatchResponse.getVersion(), is(1L));
|
||||
|
|
|
@ -62,7 +62,7 @@ public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
request.setLicenseDefinition(license); // <1>
|
||||
request.setAcknowledge(false); // <2>
|
||||
|
||||
PutLicenseResponse response = client.xpack().license().putLicense(request, RequestOptions.DEFAULT);
|
||||
PutLicenseResponse response = client.license().putLicense(request, RequestOptions.DEFAULT);
|
||||
//end::put-license-execute
|
||||
|
||||
//tag::put-license-response
|
||||
|
@ -98,7 +98,7 @@ public class LicensingDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::put-license-execute-async
|
||||
client.xpack().license().putLicenseAsync(
|
||||
client.license().putLicenseAsync(
|
||||
request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::put-license-execute-async
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
"}");
|
||||
PutWatchRequest request = new PutWatchRequest("my_watch_id", watch, XContentType.JSON);
|
||||
request.setActive(false); // <1>
|
||||
PutWatchResponse response = client.xpack().watcher().putWatch(request, RequestOptions.DEFAULT);
|
||||
PutWatchResponse response = client.watcher().putWatch(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-put-watch-execute
|
||||
|
||||
//tag::x-pack-put-watch-response
|
||||
|
@ -85,7 +85,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-put-watch-execute-async
|
||||
client.xpack().watcher().putWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
client.watcher().putWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-put-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
@ -94,7 +94,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
{
|
||||
//tag::x-pack-delete-watch-execute
|
||||
DeleteWatchRequest request = new DeleteWatchRequest("my_watch_id");
|
||||
DeleteWatchResponse response = client.xpack().watcher().deleteWatch(request, RequestOptions.DEFAULT);
|
||||
DeleteWatchResponse response = client.watcher().deleteWatch(request, RequestOptions.DEFAULT);
|
||||
//end::x-pack-delete-watch-execute
|
||||
|
||||
//tag::x-pack-delete-watch-response
|
||||
|
@ -125,7 +125,7 @@ public class WatcherDocumentationIT extends ESRestHighLevelClientTestCase {
|
|||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::x-pack-delete-watch-execute-async
|
||||
client.xpack().watcher().deleteWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
client.watcher().deleteWatchAsync(request, RequestOptions.DEFAULT, listener); // <1>
|
||||
// end::x-pack-delete-watch-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
|
|
|
@ -22,6 +22,7 @@ Will return, for example:
|
|||
"successful" : 1,
|
||||
"failed" : 0
|
||||
},
|
||||
"cluster_uuid": "YjAvIhsCQ9CbjWZb2qJw3Q",
|
||||
"cluster_name": "elasticsearch",
|
||||
"timestamp": 1459427693515,
|
||||
"status": "green",
|
||||
|
|
|
@ -96,7 +96,7 @@ final class Compiler {
|
|||
if (statefulFactoryClass != null && statefulFactoryClass.getName().equals(name)) {
|
||||
return statefulFactoryClass;
|
||||
}
|
||||
Class<?> found = painlessLookup.getClassFromBinaryName(name);
|
||||
Class<?> found = painlessLookup.canonicalTypeNameToType(name.replace('$', '.'));
|
||||
|
||||
return found != null ? found : super.findClass(name);
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ public final class Def {
|
|||
String key = PainlessLookupUtility.buildPainlessMethodKey(name, arity);
|
||||
// check whitelist for matching method
|
||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(clazz);
|
||||
PainlessClass struct = painlessLookup.lookupPainlessClass(clazz);
|
||||
|
||||
if (struct != null) {
|
||||
PainlessMethod method = struct.methods.get(key);
|
||||
|
@ -197,7 +197,7 @@ public final class Def {
|
|||
}
|
||||
|
||||
for (Class<?> iface : clazz.getInterfaces()) {
|
||||
struct = painlessLookup.getPainlessStructFromJavaClass(iface);
|
||||
struct = painlessLookup.lookupPainlessClass(iface);
|
||||
|
||||
if (struct != null) {
|
||||
PainlessMethod method = struct.methods.get(key);
|
||||
|
@ -326,8 +326,8 @@ public final class Def {
|
|||
*/
|
||||
static MethodHandle lookupReference(PainlessLookup painlessLookup, MethodHandles.Lookup methodHandlesLookup, String interfaceClass,
|
||||
Class<?> receiverClass, String name) throws Throwable {
|
||||
Class<?> interfaceType = painlessLookup.getJavaClassFromPainlessType(interfaceClass);
|
||||
PainlessMethod interfaceMethod = painlessLookup.getPainlessStructFromJavaClass(interfaceType).functionalMethod;
|
||||
Class<?> interfaceType = painlessLookup.canonicalTypeNameToType(interfaceClass);
|
||||
PainlessMethod interfaceMethod = painlessLookup.lookupPainlessClass(interfaceType).functionalMethod;
|
||||
if (interfaceMethod == null) {
|
||||
throw new IllegalArgumentException("Class [" + interfaceClass + "] is not a functional interface");
|
||||
}
|
||||
|
@ -345,7 +345,7 @@ public final class Def {
|
|||
final FunctionRef ref;
|
||||
if ("this".equals(type)) {
|
||||
// user written method
|
||||
PainlessMethod interfaceMethod = painlessLookup.getPainlessStructFromJavaClass(clazz).functionalMethod;
|
||||
PainlessMethod interfaceMethod = painlessLookup.lookupPainlessClass(clazz).functionalMethod;
|
||||
if (interfaceMethod == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(clazz) + "], not a functional interface");
|
||||
|
@ -419,7 +419,7 @@ public final class Def {
|
|||
static MethodHandle lookupGetter(PainlessLookup painlessLookup, Class<?> receiverClass, String name) {
|
||||
// first try whitelist
|
||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(clazz);
|
||||
PainlessClass struct = painlessLookup.lookupPainlessClass(clazz);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.getterMethodHandles.get(name);
|
||||
|
@ -429,7 +429,7 @@ public final class Def {
|
|||
}
|
||||
|
||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||
struct = painlessLookup.getPainlessStructFromJavaClass(iface);
|
||||
struct = painlessLookup.lookupPainlessClass(iface);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.getterMethodHandles.get(name);
|
||||
|
@ -490,7 +490,7 @@ public final class Def {
|
|||
static MethodHandle lookupSetter(PainlessLookup painlessLookup, Class<?> receiverClass, String name) {
|
||||
// first try whitelist
|
||||
for (Class<?> clazz = receiverClass; clazz != null; clazz = clazz.getSuperclass()) {
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(clazz);
|
||||
PainlessClass struct = painlessLookup.lookupPainlessClass(clazz);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.setterMethodHandles.get(name);
|
||||
|
@ -500,7 +500,7 @@ public final class Def {
|
|||
}
|
||||
|
||||
for (final Class<?> iface : clazz.getInterfaces()) {
|
||||
struct = painlessLookup.getPainlessStructFromJavaClass(iface);
|
||||
struct = painlessLookup.lookupPainlessClass(iface);
|
||||
|
||||
if (struct != null) {
|
||||
MethodHandle handle = struct.setterMethodHandles.get(name);
|
||||
|
|
|
@ -90,10 +90,10 @@ public class FunctionRef {
|
|||
PainlessLookup painlessLookup, Class<?> expected, String type, String call, int numCaptures) {
|
||||
|
||||
if ("new".equals(call)) {
|
||||
return new FunctionRef(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
||||
return new FunctionRef(expected, painlessLookup.lookupPainlessClass(expected).functionalMethod,
|
||||
lookup(painlessLookup, expected, type), numCaptures);
|
||||
} else {
|
||||
return new FunctionRef(expected, painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod,
|
||||
return new FunctionRef(expected, painlessLookup.lookupPainlessClass(expected).functionalMethod,
|
||||
lookup(painlessLookup, expected, type, call, numCaptures > 0), numCaptures);
|
||||
}
|
||||
}
|
||||
|
@ -230,14 +230,14 @@ public class FunctionRef {
|
|||
private static PainlessConstructor lookup(PainlessLookup painlessLookup, Class<?> expected, String type) {
|
||||
// check its really a functional interface
|
||||
// for e.g. Comparable
|
||||
PainlessMethod method = painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod;
|
||||
PainlessMethod method = painlessLookup.lookupPainlessClass(expected).functionalMethod;
|
||||
if (method == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::new] " +
|
||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||
}
|
||||
|
||||
// lookup requested constructor
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(painlessLookup.getJavaClassFromPainlessType(type));
|
||||
PainlessClass struct = painlessLookup.lookupPainlessClass(painlessLookup.canonicalTypeNameToType(type));
|
||||
PainlessConstructor impl = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(method.typeParameters.size()));
|
||||
|
||||
if (impl == null) {
|
||||
|
@ -254,14 +254,14 @@ public class FunctionRef {
|
|||
String type, String call, boolean receiverCaptured) {
|
||||
// check its really a functional interface
|
||||
// for e.g. Comparable
|
||||
PainlessMethod method = painlessLookup.getPainlessStructFromJavaClass(expected).functionalMethod;
|
||||
PainlessMethod method = painlessLookup.lookupPainlessClass(expected).functionalMethod;
|
||||
if (method == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||
}
|
||||
|
||||
// lookup requested method
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(painlessLookup.getJavaClassFromPainlessType(type));
|
||||
PainlessClass struct = painlessLookup.lookupPainlessClass(painlessLookup.canonicalTypeNameToType(type));
|
||||
final PainlessMethod impl;
|
||||
// look for a static impl first
|
||||
PainlessMethod staticImpl =
|
||||
|
|
|
@ -57,7 +57,7 @@ public class PainlessExplainError extends Error {
|
|||
if (objectToExplain != null) {
|
||||
toString = objectToExplain.toString();
|
||||
javaClassName = objectToExplain.getClass().getName();
|
||||
PainlessClass struct = painlessLookup.getPainlessStructFromJavaClass(objectToExplain.getClass());
|
||||
PainlessClass struct = painlessLookup.lookupPainlessClass(objectToExplain.getClass());
|
||||
if (struct != null) {
|
||||
painlessClassName = PainlessLookupUtility.typeToCanonicalTypeName(objectToExplain.getClass());
|
||||
}
|
||||
|
|
|
@ -190,7 +190,7 @@ public class ScriptClassInfo {
|
|||
componentType = componentType.getComponentType();
|
||||
}
|
||||
|
||||
if (painlessLookup.getPainlessStructFromJavaClass(componentType) == null) {
|
||||
if (painlessLookup.lookupPainlessClass(componentType) == null) {
|
||||
throw new IllegalArgumentException(unknownErrorMessageSource.apply(componentType));
|
||||
}
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ final class EnhancedPainlessLexer extends PainlessLexer {
|
|||
|
||||
@Override
|
||||
protected boolean isType(String name) {
|
||||
return painlessLookup.isSimplePainlessType(name);
|
||||
return painlessLookup.isValidCanonicalClassName(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -19,41 +19,119 @@
|
|||
|
||||
package org.elasticsearch.painless.lookup;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessConstructorKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessFieldKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.buildPainlessMethodKey;
|
||||
import static org.elasticsearch.painless.lookup.PainlessLookupUtility.typeToCanonicalTypeName;
|
||||
|
||||
/**
|
||||
* The entire API for Painless. Also used as a whitelist for checking for legal
|
||||
* methods and fields during at both compile-time and runtime.
|
||||
*/
|
||||
public final class PainlessLookup {
|
||||
|
||||
public Collection<Class<?>> getStructs() {
|
||||
return classesToPainlessClasses.keySet();
|
||||
}
|
||||
|
||||
private final Map<String, Class<?>> canonicalClassNamesToClasses;
|
||||
private final Map<Class<?>, PainlessClass> classesToPainlessClasses;
|
||||
|
||||
PainlessLookup(Map<String, Class<?>> canonicalClassNamesToClasses, Map<Class<?>, PainlessClass> classesToPainlessClasses) {
|
||||
Objects.requireNonNull(canonicalClassNamesToClasses);
|
||||
Objects.requireNonNull(classesToPainlessClasses);
|
||||
|
||||
this.canonicalClassNamesToClasses = Collections.unmodifiableMap(canonicalClassNamesToClasses);
|
||||
this.classesToPainlessClasses = Collections.unmodifiableMap(classesToPainlessClasses);
|
||||
}
|
||||
|
||||
public Class<?> getClassFromBinaryName(String painlessType) {
|
||||
return canonicalClassNamesToClasses.get(painlessType.replace('$', '.'));
|
||||
public boolean isValidCanonicalClassName(String canonicalClassName) {
|
||||
Objects.requireNonNull(canonicalClassName);
|
||||
|
||||
return canonicalClassNamesToClasses.containsKey(canonicalClassName);
|
||||
}
|
||||
|
||||
public boolean isSimplePainlessType(String painlessType) {
|
||||
return canonicalClassNamesToClasses.containsKey(painlessType);
|
||||
}
|
||||
public Class<?> canonicalTypeNameToType(String painlessType) {
|
||||
Objects.requireNonNull(painlessType);
|
||||
|
||||
public PainlessClass getPainlessStructFromJavaClass(Class<?> clazz) {
|
||||
return classesToPainlessClasses.get(clazz);
|
||||
}
|
||||
|
||||
public Class<?> getJavaClassFromPainlessType(String painlessType) {
|
||||
return PainlessLookupUtility.canonicalTypeNameToType(painlessType, canonicalClassNamesToClasses);
|
||||
}
|
||||
|
||||
public Set<Class<?>> getClasses() {
|
||||
return classesToPainlessClasses.keySet();
|
||||
}
|
||||
|
||||
public PainlessClass lookupPainlessClass(Class<?> targetClass) {
|
||||
return classesToPainlessClasses.get(targetClass);
|
||||
}
|
||||
|
||||
public PainlessConstructor lookupPainlessConstructor(Class<?> targetClass, int constructorArity) {
|
||||
Objects.requireNonNull(targetClass);
|
||||
|
||||
PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass);
|
||||
String painlessConstructorKey = buildPainlessConstructorKey(constructorArity);
|
||||
|
||||
if (targetPainlessClass == null) {
|
||||
throw new IllegalArgumentException("target class [" + typeToCanonicalTypeName(targetClass) + "] " +
|
||||
"not found for constructor [" + painlessConstructorKey + "]");
|
||||
}
|
||||
|
||||
PainlessConstructor painlessConstructor = targetPainlessClass.constructors.get(painlessConstructorKey);
|
||||
|
||||
if (painlessConstructor == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"constructor [" + typeToCanonicalTypeName(targetClass) + ", " + painlessConstructorKey + "] not found");
|
||||
}
|
||||
|
||||
return painlessConstructor;
|
||||
}
|
||||
|
||||
public PainlessMethod lookupPainlessMethod(Class<?> targetClass, boolean isStatic, String methodName, int methodArity) {
|
||||
Objects.requireNonNull(targetClass);
|
||||
Objects.requireNonNull(methodName);
|
||||
|
||||
if (targetClass.isPrimitive()) {
|
||||
targetClass = PainlessLookupUtility.typeToBoxedType(targetClass);
|
||||
}
|
||||
|
||||
PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass);
|
||||
String painlessMethodKey = buildPainlessMethodKey(methodName, methodArity);
|
||||
|
||||
if (targetPainlessClass == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"target class [" + typeToCanonicalTypeName(targetClass) + "] not found for method [" + painlessMethodKey + "]");
|
||||
}
|
||||
|
||||
PainlessMethod painlessMethod = isStatic ?
|
||||
targetPainlessClass.staticMethods.get(painlessMethodKey) :
|
||||
targetPainlessClass.methods.get(painlessMethodKey);
|
||||
|
||||
if (painlessMethod == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"method [" + typeToCanonicalTypeName(targetClass) + ", " + painlessMethodKey + "] not found");
|
||||
}
|
||||
|
||||
return painlessMethod;
|
||||
}
|
||||
|
||||
public PainlessField lookupPainlessField(Class<?> targetClass, boolean isStatic, String fieldName) {
|
||||
Objects.requireNonNull(targetClass);
|
||||
Objects.requireNonNull(fieldName);
|
||||
|
||||
PainlessClass targetPainlessClass = classesToPainlessClasses.get(targetClass);
|
||||
String painlessFieldKey = buildPainlessFieldKey(fieldName);
|
||||
|
||||
if (targetPainlessClass == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"target class [" + typeToCanonicalTypeName(targetClass) + "] not found for field [" + painlessFieldKey + "]");
|
||||
}
|
||||
|
||||
PainlessField painlessField = isStatic ?
|
||||
targetPainlessClass.staticFields.get(painlessFieldKey) :
|
||||
targetPainlessClass.fields.get(painlessFieldKey);
|
||||
|
||||
if (painlessField == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"field [" + typeToCanonicalTypeName(targetClass) + ", " + painlessFieldKey + "] not found");
|
||||
}
|
||||
|
||||
return painlessField;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ public final class EExplicit extends AExpression {
|
|||
@Override
|
||||
void analyze(Locals locals) {
|
||||
try {
|
||||
actual = locals.getPainlessLookup().getJavaClassFromPainlessType(type);
|
||||
actual = locals.getPainlessLookup().canonicalTypeNameToType(type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + type + "]."));
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ public final class EFunctionRef extends AExpression implements ILambda {
|
|||
try {
|
||||
if ("this".equals(type)) {
|
||||
// user's own function
|
||||
PainlessMethod interfaceMethod = locals.getPainlessLookup().getPainlessStructFromJavaClass(expected).functionalMethod;
|
||||
PainlessMethod interfaceMethod = locals.getPainlessLookup().lookupPainlessClass(expected).functionalMethod;
|
||||
if (interfaceMethod == null) {
|
||||
throw new IllegalArgumentException("Cannot convert function reference [" + type + "::" + call + "] " +
|
||||
"to [" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface");
|
||||
|
|
|
@ -58,7 +58,7 @@ public final class EInstanceof extends AExpression {
|
|||
|
||||
// ensure the specified type is part of the definition
|
||||
try {
|
||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
||||
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ public final class ELambda extends AExpression implements ILambda {
|
|||
}
|
||||
} else {
|
||||
// we know the method statically, infer return type and any unknown/def types
|
||||
interfaceMethod = locals.getPainlessLookup().getPainlessStructFromJavaClass(expected).functionalMethod;
|
||||
interfaceMethod = locals.getPainlessLookup().lookupPainlessClass(expected).functionalMethod;
|
||||
if (interfaceMethod == null) {
|
||||
throw createError(new IllegalArgumentException("Cannot pass lambda to " +
|
||||
"[" + PainlessLookupUtility.typeToCanonicalTypeName(expected) + "], not a functional interface"));
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.painless.Locals;
|
|||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
import org.objectweb.asm.Type;
|
||||
|
@ -64,18 +63,16 @@ public final class EListInit extends AExpression {
|
|||
|
||||
actual = ArrayList.class;
|
||||
|
||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors.get(
|
||||
PainlessLookupUtility.buildPainlessConstructorKey(0));
|
||||
|
||||
if (constructor == null) {
|
||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
||||
try {
|
||||
constructor = locals.getPainlessLookup().lookupPainlessConstructor(actual, 0);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
method = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).methods
|
||||
.get(PainlessLookupUtility.buildPainlessMethodKey("add", 1));
|
||||
|
||||
if (method == null) {
|
||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
||||
try {
|
||||
method = locals.getPainlessLookup().lookupPainlessMethod(actual, false, "add", 1);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
for (int index = 0; index < values.size(); ++index) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.painless.Locals;
|
|||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
import org.objectweb.asm.Type;
|
||||
|
@ -70,18 +69,16 @@ public final class EMapInit extends AExpression {
|
|||
|
||||
actual = HashMap.class;
|
||||
|
||||
constructor = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).constructors.get(
|
||||
PainlessLookupUtility.buildPainlessConstructorKey(0));
|
||||
|
||||
if (constructor == null) {
|
||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
||||
try {
|
||||
constructor = locals.getPainlessLookup().lookupPainlessConstructor(actual, 0);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
method = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual).methods
|
||||
.get(PainlessLookupUtility.buildPainlessMethodKey("put", 2));
|
||||
|
||||
if (method == null) {
|
||||
throw createError(new IllegalStateException("Illegal tree structure."));
|
||||
try {
|
||||
method = locals.getPainlessLookup().lookupPainlessMethod(actual, false, "put", 2);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
if (keys.size() != values.size()) {
|
||||
|
|
|
@ -61,7 +61,7 @@ public final class ENewArray extends AExpression {
|
|||
Class<?> clazz;
|
||||
|
||||
try {
|
||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
||||
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.painless.Globals;
|
|||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessConstructor;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.objectweb.asm.Type;
|
||||
|
@ -60,38 +59,36 @@ public final class ENewObj extends AExpression {
|
|||
@Override
|
||||
void analyze(Locals locals) {
|
||||
try {
|
||||
actual = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
||||
actual = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||
}
|
||||
|
||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(actual);
|
||||
constructor = struct.constructors.get(PainlessLookupUtility.buildPainlessConstructorKey(arguments.size()));
|
||||
|
||||
if (constructor != null) {
|
||||
Class<?>[] types = new Class<?>[constructor.typeParameters.size()];
|
||||
constructor.typeParameters.toArray(types);
|
||||
|
||||
if (constructor.typeParameters.size() != arguments.size()) {
|
||||
throw createError(new IllegalArgumentException(
|
||||
"When calling constructor on type [" + PainlessLookupUtility.typeToCanonicalTypeName(actual) + "] " +
|
||||
"expected [" + constructor.typeParameters.size() + "] arguments, but found [" + arguments.size() + "]."));
|
||||
}
|
||||
|
||||
for (int argument = 0; argument < arguments.size(); ++argument) {
|
||||
AExpression expression = arguments.get(argument);
|
||||
|
||||
expression.expected = types[argument];
|
||||
expression.internal = true;
|
||||
expression.analyze(locals);
|
||||
arguments.set(argument, expression.cast(locals));
|
||||
}
|
||||
|
||||
statement = true;
|
||||
} else {
|
||||
throw createError(new IllegalArgumentException(
|
||||
"Unknown new call on type [" + PainlessLookupUtility.typeToCanonicalTypeName(actual) + "]."));
|
||||
try {
|
||||
constructor = locals.getPainlessLookup().lookupPainlessConstructor(actual, arguments.size());
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
Class<?>[] types = new Class<?>[constructor.typeParameters.size()];
|
||||
constructor.typeParameters.toArray(types);
|
||||
|
||||
if (constructor.typeParameters.size() != arguments.size()) {
|
||||
throw createError(new IllegalArgumentException(
|
||||
"When calling constructor on type [" + PainlessLookupUtility.typeToCanonicalTypeName(actual) + "] " +
|
||||
"expected [" + constructor.typeParameters.size() + "] arguments, but found [" + arguments.size() + "]."));
|
||||
}
|
||||
|
||||
for (int argument = 0; argument < arguments.size(); ++argument) {
|
||||
AExpression expression = arguments.get(argument);
|
||||
|
||||
expression.expected = types[argument];
|
||||
expression.internal = true;
|
||||
expression.analyze(locals);
|
||||
arguments.set(argument, expression.cast(locals));
|
||||
}
|
||||
|
||||
statement = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -48,7 +48,7 @@ public final class EStatic extends AExpression {
|
|||
@Override
|
||||
void analyze(Locals locals) {
|
||||
try {
|
||||
actual = locals.getPainlessLookup().getJavaClassFromPainlessType(type);
|
||||
actual = locals.getPainlessLookup().canonicalTypeNameToType(type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + type + "]."));
|
||||
}
|
||||
|
|
|
@ -23,8 +23,6 @@ import org.elasticsearch.painless.Globals;
|
|||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
|
||||
|
@ -66,26 +64,16 @@ public final class PCallInvoke extends AExpression {
|
|||
prefix.expected = prefix.actual;
|
||||
prefix = prefix.cast(locals);
|
||||
|
||||
if (prefix.actual.isArray()) {
|
||||
throw createError(new IllegalArgumentException("Illegal call [" + name + "] on array type."));
|
||||
}
|
||||
|
||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(prefix.actual);
|
||||
|
||||
if (prefix.actual.isPrimitive()) {
|
||||
struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(PainlessLookupUtility.typeToBoxedType(prefix.actual));
|
||||
}
|
||||
|
||||
String methodKey = PainlessLookupUtility.buildPainlessMethodKey(name, arguments.size());
|
||||
PainlessMethod method = prefix instanceof EStatic ? struct.staticMethods.get(methodKey) : struct.methods.get(methodKey);
|
||||
|
||||
if (method != null) {
|
||||
sub = new PSubCallInvoke(location, method, prefix.actual, arguments);
|
||||
} else if (prefix.actual == def.class) {
|
||||
if (prefix.actual == def.class) {
|
||||
sub = new PSubDefCall(location, name, arguments);
|
||||
} else {
|
||||
throw createError(new IllegalArgumentException("Unknown call [" + name + "] with [" + arguments.size() + "] arguments " +
|
||||
"on type [" + PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual) + "]."));
|
||||
try {
|
||||
PainlessMethod method =
|
||||
locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, prefix instanceof EStatic, name, arguments.size());
|
||||
sub = new PSubCallInvoke(location, method, prefix.actual, arguments);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
}
|
||||
|
||||
if (nullSafe) {
|
||||
|
|
|
@ -23,8 +23,6 @@ import org.elasticsearch.painless.Globals;
|
|||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessField;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
import org.elasticsearch.painless.lookup.def;
|
||||
|
@ -67,26 +65,34 @@ public final class PField extends AStoreable {
|
|||
} else if (prefix.actual == def.class) {
|
||||
sub = new PSubDefField(location, value);
|
||||
} else {
|
||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(prefix.actual);
|
||||
PainlessField field = prefix instanceof EStatic ? struct.staticFields.get(value) : struct.fields.get(value);
|
||||
try {
|
||||
sub = new PSubField(location,
|
||||
locals.getPainlessLookup().lookupPainlessField(prefix.actual, prefix instanceof EStatic, value));
|
||||
} catch (IllegalArgumentException fieldIAE) {
|
||||
PainlessMethod getter;
|
||||
PainlessMethod setter;
|
||||
|
||||
if (field != null) {
|
||||
sub = new PSubField(location, field);
|
||||
} else {
|
||||
PainlessMethod getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(
|
||||
"get" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0));
|
||||
|
||||
if (getter == null) {
|
||||
getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(
|
||||
"is" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0));
|
||||
try {
|
||||
getter = locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, false,
|
||||
"get" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0);
|
||||
} catch (IllegalArgumentException getIAE) {
|
||||
try {
|
||||
getter = locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, false,
|
||||
"is" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0);
|
||||
} catch (IllegalArgumentException isIAE) {
|
||||
getter = null;
|
||||
}
|
||||
}
|
||||
|
||||
PainlessMethod setter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey(
|
||||
"set" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 1));
|
||||
try {
|
||||
setter = locals.getPainlessLookup().lookupPainlessMethod(prefix.actual, false,
|
||||
"set" + Character.toUpperCase(value.charAt(0)) + value.substring(1), 0);
|
||||
} catch (IllegalArgumentException setIAE) {
|
||||
setter = null;
|
||||
}
|
||||
|
||||
if (getter != null || setter != null) {
|
||||
sub = new PSubShortcut(
|
||||
location, value, PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual), getter, setter);
|
||||
sub = new PSubShortcut(location, value, PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual), getter, setter);
|
||||
} else {
|
||||
EConstant index = new EConstant(location, value);
|
||||
index.analyze(locals);
|
||||
|
@ -99,12 +105,11 @@ public final class PField extends AStoreable {
|
|||
sub = new PSubListShortcut(location, prefix.actual, index);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (sub == null) {
|
||||
throw createError(new IllegalArgumentException(
|
||||
"Unknown field [" + value + "] for type [" + PainlessLookupUtility.typeToCanonicalTypeName(prefix.actual) + "]."));
|
||||
if (sub == null) {
|
||||
throw createError(fieldIAE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (nullSafe) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.painless.Locals;
|
|||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.WriterConstants;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
|
||||
|
@ -56,11 +55,14 @@ final class PSubListShortcut extends AStoreable {
|
|||
|
||||
@Override
|
||||
void analyze(Locals locals) {
|
||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(targetClass);
|
||||
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
|
||||
|
||||
getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("get", 1));
|
||||
setter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("set", 2));
|
||||
try {
|
||||
getter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "get", 1);
|
||||
setter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "set", 2);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
if (getter != null && (getter.returnType == void.class || getter.typeParameters.size() != 1 ||
|
||||
getter.typeParameters.get(0) != int.class)) {
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.painless.Globals;
|
|||
import org.elasticsearch.painless.Locals;
|
||||
import org.elasticsearch.painless.Location;
|
||||
import org.elasticsearch.painless.MethodWriter;
|
||||
import org.elasticsearch.painless.lookup.PainlessClass;
|
||||
import org.elasticsearch.painless.lookup.PainlessLookupUtility;
|
||||
import org.elasticsearch.painless.lookup.PainlessMethod;
|
||||
|
||||
|
@ -55,11 +54,14 @@ final class PSubMapShortcut extends AStoreable {
|
|||
|
||||
@Override
|
||||
void analyze(Locals locals) {
|
||||
PainlessClass struct = locals.getPainlessLookup().getPainlessStructFromJavaClass(targetClass);
|
||||
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(targetClass);
|
||||
|
||||
getter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("get", 1));
|
||||
setter = struct.methods.get(PainlessLookupUtility.buildPainlessMethodKey("put", 2));
|
||||
try {
|
||||
getter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "get", 1);
|
||||
setter = locals.getPainlessLookup().lookupPainlessMethod(targetClass, false, "put", 2);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
|
||||
if (getter != null && (getter.returnType == void.class || getter.typeParameters.size() != 1)) {
|
||||
throw createError(new IllegalArgumentException("Illegal map get shortcut for type [" + canonicalClassName + "]."));
|
||||
|
|
|
@ -67,7 +67,7 @@ public final class SCatch extends AStatement {
|
|||
Class<?> clazz;
|
||||
|
||||
try {
|
||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
||||
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ public final class SDeclaration extends AStatement {
|
|||
Class<?> clazz;
|
||||
|
||||
try {
|
||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
||||
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class SEach extends AStatement {
|
|||
Class<?> clazz;
|
||||
|
||||
try {
|
||||
clazz = locals.getPainlessLookup().getJavaClassFromPainlessType(this.type);
|
||||
clazz = locals.getPainlessLookup().canonicalTypeNameToType(this.type);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Not a type [" + this.type + "]."));
|
||||
}
|
||||
|
|
|
@ -121,7 +121,7 @@ public final class SFunction extends AStatement {
|
|||
|
||||
void generateSignature(PainlessLookup painlessLookup) {
|
||||
try {
|
||||
returnType = painlessLookup.getJavaClassFromPainlessType(rtnTypeStr);
|
||||
returnType = painlessLookup.canonicalTypeNameToType(rtnTypeStr);
|
||||
} catch (IllegalArgumentException exception) {
|
||||
throw createError(new IllegalArgumentException("Illegal return type [" + rtnTypeStr + "] for function [" + name + "]."));
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ public final class SFunction extends AStatement {
|
|||
|
||||
for (int param = 0; param < this.paramTypeStrs.size(); ++param) {
|
||||
try {
|
||||
Class<?> paramType = painlessLookup.getJavaClassFromPainlessType(this.paramTypeStrs.get(param));
|
||||
Class<?> paramType = painlessLookup.canonicalTypeNameToType(this.paramTypeStrs.get(param));
|
||||
|
||||
paramClasses[param] = PainlessLookupUtility.typeToJavaType(paramType);
|
||||
paramTypes.add(paramType);
|
||||
|
|
|
@ -76,12 +76,10 @@ final class SSubEachIterable extends AStatement {
|
|||
if (expression.actual == def.class) {
|
||||
method = null;
|
||||
} else {
|
||||
method = locals.getPainlessLookup().getPainlessStructFromJavaClass(expression.actual).methods
|
||||
.get(PainlessLookupUtility.buildPainlessMethodKey("iterator", 0));
|
||||
|
||||
if (method == null) {
|
||||
throw createError(new IllegalArgumentException("Unable to create iterator for the type " +
|
||||
"[" + PainlessLookupUtility.typeToCanonicalTypeName(expression.actual) + "]."));
|
||||
try {
|
||||
method = locals.getPainlessLookup().lookupPainlessMethod(expression.actual, false, "iterator", 0);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
throw createError(iae);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -23,12 +23,12 @@ package org.elasticsearch.painless;
|
|||
public class OverloadTests extends ScriptTestCase {
|
||||
|
||||
public void testMethod() {
|
||||
assertEquals(2, exec("return 'abc123abc'.indexOf('c');"));
|
||||
assertEquals(8, exec("return 'abc123abc'.indexOf('c', 3);"));
|
||||
//assertEquals(2, exec("return 'abc123abc'.indexOf('c');"));
|
||||
//assertEquals(8, exec("return 'abc123abc'.indexOf('c', 3);"));
|
||||
IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> {
|
||||
exec("return 'abc123abc'.indexOf('c', 3, 'bogus');");
|
||||
});
|
||||
assertTrue(expected.getMessage().contains("[indexOf] with [3] arguments"));
|
||||
assertTrue(expected.getMessage().contains("[java.lang.String, indexOf/3]"));
|
||||
}
|
||||
|
||||
public void testMethodDynamic() {
|
||||
|
|
|
@ -45,9 +45,9 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Comparator.comparing;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
/**
|
||||
* Generates an API reference from the method and type whitelists in {@link PainlessLookup}.
|
||||
|
@ -74,9 +74,10 @@ public class PainlessDocGenerator {
|
|||
Files.newOutputStream(indexPath, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE),
|
||||
false, StandardCharsets.UTF_8.name())) {
|
||||
emitGeneratedWarning(indexStream);
|
||||
List<Class<?>> classes = PAINLESS_LOOKUP.getStructs().stream().sorted(comparing(Class::getCanonicalName)).collect(toList());
|
||||
List<Class<?>> classes = PAINLESS_LOOKUP.getClasses().stream().sorted(
|
||||
Comparator.comparing(Class::getCanonicalName)).collect(Collectors.toList());
|
||||
for (Class<?> clazz : classes) {
|
||||
PainlessClass struct = PAINLESS_LOOKUP.getPainlessStructFromJavaClass(clazz);
|
||||
PainlessClass struct = PAINLESS_LOOKUP.lookupPainlessClass(clazz);
|
||||
String canonicalClassName = PainlessLookupUtility.typeToCanonicalTypeName(clazz);
|
||||
|
||||
if (clazz.isPrimitive()) {
|
||||
|
|
|
@ -252,7 +252,7 @@ public class RegexTests extends ScriptTestCase {
|
|||
IllegalArgumentException e = expectScriptThrows(IllegalArgumentException.class, () -> {
|
||||
exec("Pattern.compile('aa')");
|
||||
});
|
||||
assertEquals("Unknown call [compile] with [1] arguments on type [java.util.regex.Pattern].", e.getMessage());
|
||||
assertTrue(e.getMessage().contains("[java.util.regex.Pattern, compile/1]"));
|
||||
}
|
||||
|
||||
public void testBadRegexPattern() {
|
||||
|
|
|
@ -404,7 +404,7 @@ public class NodeToStringTests extends ESTestCase {
|
|||
|
||||
public void testPSubCallInvoke() {
|
||||
Location l = new Location(getTestName(), 0);
|
||||
PainlessClass c = painlessLookup.getPainlessStructFromJavaClass(Integer.class);
|
||||
PainlessClass c = painlessLookup.lookupPainlessClass(Integer.class);
|
||||
PainlessMethod m = c.methods.get(PainlessLookupUtility.buildPainlessMethodKey("toString", 0));
|
||||
PSubCallInvoke node = new PSubCallInvoke(l, m, null, emptyList());
|
||||
node.prefix = new EVariable(l, "a");
|
||||
|
@ -459,7 +459,7 @@ public class NodeToStringTests extends ESTestCase {
|
|||
|
||||
public void testPSubField() {
|
||||
Location l = new Location(getTestName(), 0);
|
||||
PainlessClass s = painlessLookup.getPainlessStructFromJavaClass(Boolean.class);
|
||||
PainlessClass s = painlessLookup.lookupPainlessClass(Boolean.class);
|
||||
PainlessField f = s.staticFields.get("TRUE");
|
||||
PSubField node = new PSubField(l, f);
|
||||
node.prefix = new EStatic(l, "Boolean");
|
||||
|
@ -497,7 +497,7 @@ public class NodeToStringTests extends ESTestCase {
|
|||
|
||||
public void testPSubShortcut() {
|
||||
Location l = new Location(getTestName(), 0);
|
||||
PainlessClass s = painlessLookup.getPainlessStructFromJavaClass(FeatureTest.class);
|
||||
PainlessClass s = painlessLookup.lookupPainlessClass(FeatureTest.class);
|
||||
PainlessMethod getter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("getX", 0));
|
||||
PainlessMethod setter = s.methods.get(PainlessLookupUtility.buildPainlessMethodKey("setX", 1));
|
||||
PSubShortcut node = new PSubShortcut(l, "x", FeatureTest.class.getName(), getter, setter);
|
||||
|
|
|
@ -29,3 +29,40 @@
|
|||
- is_true: nodes.fs
|
||||
- is_true: nodes.plugins
|
||||
- is_true: nodes.network_types
|
||||
|
||||
---
|
||||
"get cluster stats returns cluster_uuid at the top level":
|
||||
- skip:
|
||||
version: " - 6.99.99"
|
||||
reason: "cluster stats including cluster_uuid at the top level is new in v6.5.0 and higher"
|
||||
|
||||
- do:
|
||||
cluster.stats: {}
|
||||
|
||||
- is_true: cluster_uuid
|
||||
- is_true: timestamp
|
||||
- is_true: cluster_name
|
||||
- match: {status: green}
|
||||
- gte: { indices.count: 0}
|
||||
- is_true: indices.docs
|
||||
- is_true: indices.store
|
||||
- is_true: indices.fielddata
|
||||
- is_true: indices.query_cache
|
||||
- is_true: indices.completion
|
||||
- is_true: indices.segments
|
||||
- gte: { nodes.count.total: 1}
|
||||
- gte: { nodes.count.master: 1}
|
||||
- gte: { nodes.count.data: 1}
|
||||
- gte: { nodes.count.ingest: 0}
|
||||
- gte: { nodes.count.coordinating_only: 0}
|
||||
- is_true: nodes.os
|
||||
- is_true: nodes.os.mem.total_in_bytes
|
||||
- is_true: nodes.os.mem.free_in_bytes
|
||||
- is_true: nodes.os.mem.used_in_bytes
|
||||
- gte: { nodes.os.mem.free_percent: 0 }
|
||||
- gte: { nodes.os.mem.used_percent: 0 }
|
||||
- is_true: nodes.process
|
||||
- is_true: nodes.jvm
|
||||
- is_true: nodes.fs
|
||||
- is_true: nodes.plugins
|
||||
- is_true: nodes.network_types
|
||||
|
|
|
@ -40,15 +40,18 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
|||
ClusterStatsIndices indicesStats;
|
||||
ClusterHealthStatus status;
|
||||
long timestamp;
|
||||
String clusterUUID;
|
||||
|
||||
ClusterStatsResponse() {
|
||||
}
|
||||
|
||||
public ClusterStatsResponse(long timestamp,
|
||||
String clusterUUID,
|
||||
ClusterName clusterName,
|
||||
List<ClusterStatsNodeResponse> nodes,
|
||||
List<FailedNodeException> failures) {
|
||||
super(clusterName, nodes, failures);
|
||||
this.clusterUUID = clusterUUID;
|
||||
this.timestamp = timestamp;
|
||||
nodesStats = new ClusterStatsNodes(nodes);
|
||||
indicesStats = new ClusterStatsIndices(nodes);
|
||||
|
@ -61,6 +64,10 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
|||
}
|
||||
}
|
||||
|
||||
public String getClusterUUID() {
|
||||
return this.clusterUUID;
|
||||
}
|
||||
|
||||
public long getTimestamp() {
|
||||
return this.timestamp;
|
||||
}
|
||||
|
@ -111,6 +118,7 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
|
|||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("cluster_uuid", getClusterUUID());
|
||||
builder.field("timestamp", getTimestamp());
|
||||
if (status != null) {
|
||||
builder.field("status", status.name().toLowerCase(Locale.ROOT));
|
||||
|
|
|
@ -74,6 +74,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
List<ClusterStatsNodeResponse> responses, List<FailedNodeException> failures) {
|
||||
return new ClusterStatsResponse(
|
||||
System.currentTimeMillis(),
|
||||
clusterService.state().metaData().clusterUUID(),
|
||||
clusterService.getClusterName(),
|
||||
responses,
|
||||
failures);
|
||||
|
|
|
@ -144,7 +144,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
switch (indexResult.getResultType()) {
|
||||
case SUCCESS:
|
||||
IndexResponse response = new IndexResponse(primary.shardId(), indexRequest.type(), indexRequest.id(),
|
||||
indexResult.getSeqNo(), primary.getPrimaryTerm(), indexResult.getVersion(), indexResult.isCreated());
|
||||
indexResult.getSeqNo(), indexResult.getTerm(), indexResult.getVersion(), indexResult.isCreated());
|
||||
return new BulkItemResultHolder(response, indexResult, bulkItemRequest);
|
||||
case FAILURE:
|
||||
return new BulkItemResultHolder(null, indexResult, bulkItemRequest);
|
||||
|
@ -161,7 +161,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
switch (deleteResult.getResultType()) {
|
||||
case SUCCESS:
|
||||
DeleteResponse response = new DeleteResponse(primary.shardId(), deleteRequest.type(), deleteRequest.id(),
|
||||
deleteResult.getSeqNo(), primary.getPrimaryTerm(), deleteResult.getVersion(), deleteResult.isFound());
|
||||
deleteResult.getSeqNo(), deleteResult.getTerm(), deleteResult.getVersion(), deleteResult.isFound());
|
||||
return new BulkItemResultHolder(response, deleteResult, bulkItemRequest);
|
||||
case FAILURE:
|
||||
return new BulkItemResultHolder(null, deleteResult, bulkItemRequest);
|
||||
|
@ -300,7 +300,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
assert result instanceof Engine.IndexResult : result.getClass();
|
||||
final IndexRequest updateIndexRequest = translate.action();
|
||||
final IndexResponse indexResponse = new IndexResponse(primary.shardId(), updateIndexRequest.type(), updateIndexRequest.id(),
|
||||
result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated());
|
||||
result.getSeqNo(), result.getTerm(), result.getVersion(), ((Engine.IndexResult) result).isCreated());
|
||||
updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(),
|
||||
indexResponse.getId(), indexResponse.getSeqNo(), indexResponse.getPrimaryTerm(), indexResponse.getVersion(),
|
||||
indexResponse.getResult());
|
||||
|
@ -320,7 +320,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
final DeleteRequest updateDeleteRequest = translate.action();
|
||||
|
||||
final DeleteResponse deleteResponse = new DeleteResponse(primary.shardId(), updateDeleteRequest.type(), updateDeleteRequest.id(),
|
||||
result.getSeqNo(), primary.getPrimaryTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound());
|
||||
result.getSeqNo(), result.getTerm(), result.getVersion(), ((Engine.DeleteResult) result).isFound());
|
||||
|
||||
updateResponse = new UpdateResponse(deleteResponse.getShardInfo(), deleteResponse.getShardId(),
|
||||
deleteResponse.getType(), deleteResponse.getId(), deleteResponse.getSeqNo(), deleteResponse.getPrimaryTerm(),
|
||||
|
@ -356,7 +356,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
} catch (Exception failure) {
|
||||
// we may fail translating a update to index or delete operation
|
||||
// we use index result to communicate failure while translating update request
|
||||
final Engine.Result result = new Engine.IndexResult(failure, updateRequest.version(), SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||
final Engine.Result result = primary.getFailedIndexResult(failure, updateRequest.version());
|
||||
return new BulkItemResultHolder(null, result, primaryItemRequest);
|
||||
}
|
||||
|
||||
|
@ -559,7 +559,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
() ->
|
||||
primary.applyIndexOperationOnPrimary(request.version(), request.versionType(), sourceToParse,
|
||||
request.getAutoGeneratedTimestamp(), request.isRetry()),
|
||||
e -> new Engine.IndexResult(e, request.version()),
|
||||
e -> primary.getFailedIndexResult(e, request.version()),
|
||||
mappingUpdater);
|
||||
}
|
||||
|
||||
|
@ -567,7 +567,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
MappingUpdatePerformer mappingUpdater) throws Exception {
|
||||
return executeOnPrimaryWhileHandlingMappingUpdates(primary.shardId(), request.type(),
|
||||
() -> primary.applyDeleteOperationOnPrimary(request.version(), request.type(), request.id(), request.versionType()),
|
||||
e -> new Engine.DeleteResult(e, request.version()),
|
||||
e -> primary.getFailedDeleteResult(e, request.version()),
|
||||
mappingUpdater);
|
||||
}
|
||||
|
||||
|
|
|
@ -929,7 +929,7 @@ public abstract class TransportReplicationAction<
|
|||
if (actualAllocationId.equals(allocationId) == false) {
|
||||
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
|
||||
}
|
||||
final long actualTerm = indexShard.getPrimaryTerm();
|
||||
final long actualTerm = indexShard.getPendingPrimaryTerm();
|
||||
if (actualTerm != primaryTerm) {
|
||||
throw new ShardNotFoundException(shardId, "expected aID [{}] with term [{}] but found [{}]", allocationId,
|
||||
primaryTerm, actualTerm);
|
||||
|
@ -983,7 +983,7 @@ public abstract class TransportReplicationAction<
|
|||
}
|
||||
|
||||
public boolean isRelocated() {
|
||||
return indexShard.isPrimaryMode() == false;
|
||||
return indexShard.isRelocatedPrimary();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -304,6 +304,7 @@ public abstract class Engine implements Closeable {
|
|||
private final Operation.TYPE operationType;
|
||||
private final Result.Type resultType;
|
||||
private final long version;
|
||||
private final long term;
|
||||
private final long seqNo;
|
||||
private final Exception failure;
|
||||
private final SetOnce<Boolean> freeze = new SetOnce<>();
|
||||
|
@ -311,19 +312,21 @@ public abstract class Engine implements Closeable {
|
|||
private Translog.Location translogLocation;
|
||||
private long took;
|
||||
|
||||
protected Result(Operation.TYPE operationType, Exception failure, long version, long seqNo) {
|
||||
protected Result(Operation.TYPE operationType, Exception failure, long version, long term, long seqNo) {
|
||||
this.operationType = operationType;
|
||||
this.failure = Objects.requireNonNull(failure);
|
||||
this.version = version;
|
||||
this.term = term;
|
||||
this.seqNo = seqNo;
|
||||
this.requiredMappingUpdate = null;
|
||||
this.resultType = Type.FAILURE;
|
||||
}
|
||||
|
||||
protected Result(Operation.TYPE operationType, long version, long seqNo) {
|
||||
protected Result(Operation.TYPE operationType, long version, long term, long seqNo) {
|
||||
this.operationType = operationType;
|
||||
this.version = version;
|
||||
this.seqNo = seqNo;
|
||||
this.term = term;
|
||||
this.failure = null;
|
||||
this.requiredMappingUpdate = null;
|
||||
this.resultType = Type.SUCCESS;
|
||||
|
@ -333,6 +336,7 @@ public abstract class Engine implements Closeable {
|
|||
this.operationType = operationType;
|
||||
this.version = Versions.NOT_FOUND;
|
||||
this.seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
this.term = 0L;
|
||||
this.failure = null;
|
||||
this.requiredMappingUpdate = requiredMappingUpdate;
|
||||
this.resultType = Type.MAPPING_UPDATE_REQUIRED;
|
||||
|
@ -357,6 +361,10 @@ public abstract class Engine implements Closeable {
|
|||
return seqNo;
|
||||
}
|
||||
|
||||
public long getTerm() {
|
||||
return term;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the operation was aborted due to missing mappings, this method will return the mappings
|
||||
* that are required to complete the operation.
|
||||
|
@ -415,20 +423,20 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
private final boolean created;
|
||||
|
||||
public IndexResult(long version, long seqNo, boolean created) {
|
||||
super(Operation.TYPE.INDEX, version, seqNo);
|
||||
public IndexResult(long version, long term, long seqNo, boolean created) {
|
||||
super(Operation.TYPE.INDEX, version, term, seqNo);
|
||||
this.created = created;
|
||||
}
|
||||
|
||||
/**
|
||||
* use in case of the index operation failed before getting to internal engine
|
||||
**/
|
||||
public IndexResult(Exception failure, long version) {
|
||||
this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||
public IndexResult(Exception failure, long version, long term) {
|
||||
this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO);
|
||||
}
|
||||
|
||||
public IndexResult(Exception failure, long version, long seqNo) {
|
||||
super(Operation.TYPE.INDEX, failure, version, seqNo);
|
||||
public IndexResult(Exception failure, long version, long term, long seqNo) {
|
||||
super(Operation.TYPE.INDEX, failure, version, term, seqNo);
|
||||
this.created = false;
|
||||
}
|
||||
|
||||
|
@ -447,20 +455,20 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
private final boolean found;
|
||||
|
||||
public DeleteResult(long version, long seqNo, boolean found) {
|
||||
super(Operation.TYPE.DELETE, version, seqNo);
|
||||
public DeleteResult(long version, long term, long seqNo, boolean found) {
|
||||
super(Operation.TYPE.DELETE, version, term, seqNo);
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
/**
|
||||
* use in case of the delete operation failed before getting to internal engine
|
||||
**/
|
||||
public DeleteResult(Exception failure, long version) {
|
||||
this(failure, version, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
|
||||
public DeleteResult(Exception failure, long version, long term) {
|
||||
this(failure, version, term, SequenceNumbers.UNASSIGNED_SEQ_NO, false);
|
||||
}
|
||||
|
||||
public DeleteResult(Exception failure, long version, long seqNo, boolean found) {
|
||||
super(Operation.TYPE.DELETE, failure, version, seqNo);
|
||||
public DeleteResult(Exception failure, long version, long term, long seqNo, boolean found) {
|
||||
super(Operation.TYPE.DELETE, failure, version, term, seqNo);
|
||||
this.found = found;
|
||||
}
|
||||
|
||||
|
@ -477,12 +485,12 @@ public abstract class Engine implements Closeable {
|
|||
|
||||
public static class NoOpResult extends Result {
|
||||
|
||||
NoOpResult(long seqNo) {
|
||||
super(Operation.TYPE.NO_OP, 0, seqNo);
|
||||
NoOpResult(long term, long seqNo) {
|
||||
super(Operation.TYPE.NO_OP, term, 0, seqNo);
|
||||
}
|
||||
|
||||
NoOpResult(long seqNo, Exception failure) {
|
||||
super(Operation.TYPE.NO_OP, failure, 0, seqNo);
|
||||
NoOpResult(long term, long seqNo, Exception failure) {
|
||||
super(Operation.TYPE.NO_OP, failure, term, 0, seqNo);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -736,6 +736,10 @@ public class InternalEngine extends Engine {
|
|||
return localCheckpointTracker.generateSeqNo();
|
||||
}
|
||||
|
||||
private long getPrimaryTerm() {
|
||||
return engineConfig.getPrimaryTermSupplier().getAsLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexResult index(Index index) throws IOException {
|
||||
assert Objects.equals(index.uid().field(), IdFieldMapper.NAME) : index.uid().field();
|
||||
|
@ -788,7 +792,7 @@ public class InternalEngine extends Engine {
|
|||
indexResult = indexIntoLucene(index, plan);
|
||||
} else {
|
||||
indexResult = new IndexResult(
|
||||
plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||
plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||
}
|
||||
if (index.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
final Translog.Location location;
|
||||
|
@ -900,7 +904,7 @@ public class InternalEngine extends Engine {
|
|||
currentVersion, index.version(), currentNotFoundOrDeleted)) {
|
||||
final VersionConflictEngineException e =
|
||||
new VersionConflictEngineException(shardId, index, currentVersion, currentNotFoundOrDeleted);
|
||||
plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion);
|
||||
plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm());
|
||||
} else {
|
||||
plan = IndexingStrategy.processNormally(currentNotFoundOrDeleted,
|
||||
generateSeqNoForOperation(index),
|
||||
|
@ -930,7 +934,7 @@ public class InternalEngine extends Engine {
|
|||
assert assertDocDoesNotExist(index, canOptimizeAddDocument(index) == false);
|
||||
addDocs(index.docs(), indexWriter);
|
||||
}
|
||||
return new IndexResult(plan.versionForIndexing, plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||
return new IndexResult(plan.versionForIndexing, getPrimaryTerm(), plan.seqNoForIndexing, plan.currentNotFoundOrDeleted);
|
||||
} catch (Exception ex) {
|
||||
if (indexWriter.getTragicException() == null) {
|
||||
/* There is no tragic event recorded so this must be a document failure.
|
||||
|
@ -946,7 +950,7 @@ public class InternalEngine extends Engine {
|
|||
* we return a `MATCH_ANY` version to indicate no document was index. The value is
|
||||
* not used anyway
|
||||
*/
|
||||
return new IndexResult(ex, Versions.MATCH_ANY, plan.seqNoForIndexing);
|
||||
return new IndexResult(ex, Versions.MATCH_ANY, getPrimaryTerm(), plan.seqNoForIndexing);
|
||||
} else {
|
||||
throw ex;
|
||||
}
|
||||
|
@ -1019,8 +1023,8 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
static IndexingStrategy skipDueToVersionConflict(
|
||||
VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion) {
|
||||
final IndexResult result = new IndexResult(e, currentVersion);
|
||||
VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long term) {
|
||||
final IndexResult result = new IndexResult(e, currentVersion, term);
|
||||
return new IndexingStrategy(
|
||||
currentNotFoundOrDeleted, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result);
|
||||
}
|
||||
|
@ -1097,7 +1101,7 @@ public class InternalEngine extends Engine {
|
|||
deleteResult = deleteInLucene(delete, plan);
|
||||
} else {
|
||||
deleteResult = new DeleteResult(
|
||||
plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||
plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||
}
|
||||
if (delete.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
final Translog.Location location;
|
||||
|
@ -1178,7 +1182,7 @@ public class InternalEngine extends Engine {
|
|||
final DeletionStrategy plan;
|
||||
if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) {
|
||||
final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete, currentVersion, currentlyDeleted);
|
||||
plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, currentlyDeleted);
|
||||
plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted);
|
||||
} else {
|
||||
plan = DeletionStrategy.processNormally(
|
||||
currentlyDeleted,
|
||||
|
@ -1201,12 +1205,12 @@ public class InternalEngine extends Engine {
|
|||
new DeleteVersionValue(plan.versionOfDeletion, plan.seqNoOfDeletion, delete.primaryTerm(),
|
||||
engineConfig.getThreadPool().relativeTimeInMillis()));
|
||||
return new DeleteResult(
|
||||
plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||
plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||
} catch (Exception ex) {
|
||||
if (indexWriter.getTragicException() == null) {
|
||||
// there is no tragic event and such it must be a document level failure
|
||||
return new DeleteResult(
|
||||
ex, plan.versionOfDeletion, plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||
ex, plan.versionOfDeletion, getPrimaryTerm(), plan.seqNoOfDeletion, plan.currentlyDeleted == false);
|
||||
} else {
|
||||
throw ex;
|
||||
}
|
||||
|
@ -1237,9 +1241,9 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
static DeletionStrategy skipDueToVersionConflict(
|
||||
VersionConflictEngineException e, long currentVersion, boolean currentlyDeleted) {
|
||||
VersionConflictEngineException e, long currentVersion, long term, boolean currentlyDeleted) {
|
||||
final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
final DeleteResult deleteResult = new DeleteResult(e, currentVersion, unassignedSeqNo, currentlyDeleted == false);
|
||||
final DeleteResult deleteResult = new DeleteResult(e, currentVersion, term, unassignedSeqNo, currentlyDeleted == false);
|
||||
return new DeletionStrategy(false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult);
|
||||
}
|
||||
|
||||
|
@ -1268,7 +1272,7 @@ public class InternalEngine extends Engine {
|
|||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
noOpResult = innerNoOp(noOp);
|
||||
} catch (final Exception e) {
|
||||
noOpResult = new NoOpResult(noOp.seqNo(), e);
|
||||
noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo(), e);
|
||||
}
|
||||
return noOpResult;
|
||||
}
|
||||
|
@ -1278,7 +1282,7 @@ public class InternalEngine extends Engine {
|
|||
assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED;
|
||||
final long seqNo = noOp.seqNo();
|
||||
try {
|
||||
final NoOpResult noOpResult = new NoOpResult(noOp.seqNo());
|
||||
final NoOpResult noOpResult = new NoOpResult(getPrimaryTerm(), noOp.seqNo());
|
||||
if (noOp.origin() != Operation.Origin.LOCAL_TRANSLOG_RECOVERY) {
|
||||
final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason()));
|
||||
noOpResult.setTranslogLocation(location);
|
||||
|
|
|
@ -85,6 +85,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
|||
* computation from that point on.
|
||||
*/
|
||||
volatile boolean primaryMode;
|
||||
|
||||
/**
|
||||
* Boolean flag that indicates if a relocation handoff is in progress. A handoff is started by calling {@link #startRelocationHandoff}
|
||||
* and is finished by either calling {@link #completeRelocationHandoff} or {@link #abortRelocationHandoff}, depending on whether the
|
||||
|
@ -102,6 +103,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
|||
*/
|
||||
boolean handoffInProgress;
|
||||
|
||||
/**
|
||||
* Boolean flag that indicates whether a relocation handoff completed (see {@link #completeRelocationHandoff}).
|
||||
*/
|
||||
volatile boolean relocated;
|
||||
|
||||
/**
|
||||
* The global checkpoint tracker relies on the property that cluster state updates are applied in-order. After transferring a primary
|
||||
* context from the primary relocation source to the target and initializing the target, it is possible for the target to apply a
|
||||
|
@ -260,6 +266,13 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
|||
return primaryMode;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the replication tracker has relocated away to another shard copy.
|
||||
*/
|
||||
public boolean isRelocated() {
|
||||
return relocated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Class invariant that should hold before and after every invocation of public methods on this class. As Java lacks implication
|
||||
* as a logical operator, many of the invariants are written under the form (!A || B), they should be read as (A implies B) however.
|
||||
|
@ -287,6 +300,9 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
|||
// relocation handoff can only occur in primary mode
|
||||
assert !handoffInProgress || primaryMode;
|
||||
|
||||
// a relocated copy is not in primary mode
|
||||
assert !relocated || !primaryMode;
|
||||
|
||||
// the current shard is marked as in-sync when the global checkpoint tracker operates in primary mode
|
||||
assert !primaryMode || checkpoints.get(shardAllocationId).inSync;
|
||||
|
||||
|
@ -766,8 +782,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
|
|||
assert invariant();
|
||||
assert primaryMode;
|
||||
assert handoffInProgress;
|
||||
assert relocated == false;
|
||||
primaryMode = false;
|
||||
handoffInProgress = false;
|
||||
relocated = true;
|
||||
// forget all checkpoint information except for global checkpoint of current shard
|
||||
checkpoints.entrySet().stream().forEach(e -> {
|
||||
final CheckpointState cps = e.getValue();
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.cluster.routing.RecoverySource;
|
|||
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
|
@ -192,7 +193,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
protected volatile ShardRouting shardRouting;
|
||||
protected volatile IndexShardState state;
|
||||
protected volatile long primaryTerm;
|
||||
protected volatile long pendingPrimaryTerm; // see JavaDocs for getPendingPrimaryTerm
|
||||
protected volatile long operationPrimaryTerm;
|
||||
protected final AtomicReference<Engine> currentEngineReference = new AtomicReference<>();
|
||||
final EngineFactory engineFactory;
|
||||
|
||||
|
@ -315,7 +317,8 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
indexShardOperationPermits = new IndexShardOperationPermits(shardId, threadPool);
|
||||
searcherWrapper = indexSearcherWrapper;
|
||||
primaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
|
||||
pendingPrimaryTerm = indexSettings.getIndexMetaData().primaryTerm(shardId.id());
|
||||
operationPrimaryTerm = pendingPrimaryTerm;
|
||||
refreshListeners = buildRefreshListeners();
|
||||
lastSearcherAccess.set(threadPool.relativeTimeInMillis());
|
||||
persistMetadata(path, indexSettings, shardRouting, null, logger);
|
||||
|
@ -365,10 +368,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the primary term the index shard is on. See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
|
||||
* USE THIS METHOD WITH CARE!
|
||||
* Returns the primary term the index shard is supposed to be on. In case of primary promotion or when a replica learns about
|
||||
* a new term due to a new primary, the term that's exposed here will not be the term that the shard internally uses to assign
|
||||
* to operations. The shard will auto-correct its internal operation term, but this might take time.
|
||||
* See {@link org.elasticsearch.cluster.metadata.IndexMetaData#primaryTerm(int)}
|
||||
*/
|
||||
public long getPrimaryTerm() {
|
||||
return this.primaryTerm;
|
||||
public long getPendingPrimaryTerm() {
|
||||
return this.pendingPrimaryTerm;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -418,7 +425,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
"a primary relocation is completed by the master, but primary mode is not active " + currentRouting;
|
||||
|
||||
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
|
||||
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isPrimaryMode() == false &&
|
||||
} else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() &&
|
||||
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
|
||||
// if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery
|
||||
// failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two
|
||||
|
@ -431,7 +438,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
final CountDownLatch shardStateUpdated = new CountDownLatch(1);
|
||||
|
||||
if (newRouting.primary()) {
|
||||
if (newPrimaryTerm == primaryTerm) {
|
||||
if (newPrimaryTerm == pendingPrimaryTerm) {
|
||||
if (currentRouting.initializing() && currentRouting.isRelocationTarget() == false && newRouting.active()) {
|
||||
// the master started a recovering primary, activate primary mode.
|
||||
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
||||
|
@ -454,10 +461,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
assert newRouting.initializing() == false :
|
||||
"a started primary shard should never update its term; "
|
||||
+ "shard " + newRouting + ", "
|
||||
+ "current term [" + primaryTerm + "], "
|
||||
+ "current term [" + pendingPrimaryTerm + "], "
|
||||
+ "new term [" + newPrimaryTerm + "]";
|
||||
assert newPrimaryTerm > primaryTerm :
|
||||
"primary terms can only go up; current term [" + primaryTerm + "], new term [" + newPrimaryTerm + "]";
|
||||
assert newPrimaryTerm > pendingPrimaryTerm :
|
||||
"primary terms can only go up; current term [" + pendingPrimaryTerm + "], new term [" + newPrimaryTerm + "]";
|
||||
/*
|
||||
* Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we
|
||||
* increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is
|
||||
|
@ -468,12 +475,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
if (resyncStarted == false) {
|
||||
throw new IllegalStateException("cannot start resync while it's already in progress");
|
||||
}
|
||||
indexShardOperationPermits.asyncBlockOperations(
|
||||
30,
|
||||
TimeUnit.MINUTES,
|
||||
bumpPrimaryTerm(newPrimaryTerm,
|
||||
() -> {
|
||||
shardStateUpdated.await();
|
||||
assert pendingPrimaryTerm == newPrimaryTerm :
|
||||
"shard term changed on primary. expected [" + newPrimaryTerm + "] but was [" + pendingPrimaryTerm + "]" +
|
||||
", current routing: " + currentRouting + ", new routing: " + newRouting;
|
||||
assert operationPrimaryTerm == newPrimaryTerm;
|
||||
try {
|
||||
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
||||
/*
|
||||
* If this shard was serving as a replica shard when another shard was promoted to primary then the state of
|
||||
* its local checkpoint tracker was reset during the primary term transition. In particular, the local
|
||||
|
@ -517,10 +527,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
} catch (final AlreadyClosedException e) {
|
||||
// okay, the index was deleted
|
||||
}
|
||||
},
|
||||
e -> failShard("exception during primary term transition", e));
|
||||
replicationTracker.activatePrimaryMode(getLocalCheckpoint());
|
||||
primaryTerm = newPrimaryTerm;
|
||||
});
|
||||
}
|
||||
}
|
||||
// set this last, once we finished updating all internal state.
|
||||
|
@ -528,8 +535,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
|
||||
assert this.shardRouting.primary() == false ||
|
||||
this.shardRouting.started() == false || // note that we use started and not active to avoid relocating shards
|
||||
this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning
|
||||
this.replicationTracker.isPrimaryMode()
|
||||
: "an started primary must be in primary mode " + this.shardRouting;
|
||||
: "a started primary with non-pending operation term must be in primary mode " + this.shardRouting;
|
||||
shardStateUpdated.countDown();
|
||||
}
|
||||
if (currentRouting != null && currentRouting.active() == false && newRouting.active()) {
|
||||
|
@ -590,7 +598,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
consumer.accept(primaryContext);
|
||||
synchronized (mutex) {
|
||||
verifyRelocatingState();
|
||||
replicationTracker.completeRelocationHandoff(); // make changes to primaryMode flag only under mutex
|
||||
replicationTracker.completeRelocationHandoff(); // make changes to primaryMode and relocated flag only under mutex
|
||||
}
|
||||
} catch (final Exception e) {
|
||||
try {
|
||||
|
@ -655,21 +663,22 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
public Engine.IndexResult applyIndexOperationOnPrimary(long version, VersionType versionType, SourceToParse sourceToParse,
|
||||
long autoGeneratedTimestamp, boolean isRetry) throws IOException {
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, versionType, autoGeneratedTimestamp,
|
||||
return applyIndexOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, operationPrimaryTerm, version, versionType, autoGeneratedTimestamp,
|
||||
isRetry, Engine.Operation.Origin.PRIMARY, sourceToParse);
|
||||
}
|
||||
|
||||
public Engine.IndexResult applyIndexOperationOnReplica(long seqNo, long version, long autoGeneratedTimeStamp,
|
||||
boolean isRetry, SourceToParse sourceToParse)
|
||||
throws IOException {
|
||||
return applyIndexOperation(seqNo, primaryTerm, version, null, autoGeneratedTimeStamp, isRetry,
|
||||
return applyIndexOperation(seqNo, operationPrimaryTerm, version, null, autoGeneratedTimeStamp, isRetry,
|
||||
Engine.Operation.Origin.REPLICA, sourceToParse);
|
||||
}
|
||||
|
||||
private Engine.IndexResult applyIndexOperation(long seqNo, long opPrimaryTerm, long version, @Nullable VersionType versionType,
|
||||
long autoGeneratedTimeStamp, boolean isRetry, Engine.Operation.Origin origin,
|
||||
SourceToParse sourceToParse) throws IOException {
|
||||
assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
|
||||
assert opPrimaryTerm <= this.operationPrimaryTerm: "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm
|
||||
+ "]";
|
||||
ensureWriteAllowed(origin);
|
||||
Engine.Index operation;
|
||||
try {
|
||||
|
@ -686,7 +695,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
// can not raise an exception that may block any replication of previous operations to the
|
||||
// replicas
|
||||
verifyNotClosed(e);
|
||||
return new Engine.IndexResult(e, version, seqNo);
|
||||
return new Engine.IndexResult(e, version, opPrimaryTerm, seqNo);
|
||||
}
|
||||
|
||||
return index(getEngine(), operation);
|
||||
|
@ -723,12 +732,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public Engine.NoOpResult markSeqNoAsNoop(long seqNo, String reason) throws IOException {
|
||||
return markSeqNoAsNoop(seqNo, primaryTerm, reason, Engine.Operation.Origin.REPLICA);
|
||||
return markSeqNoAsNoop(seqNo, operationPrimaryTerm, reason, Engine.Operation.Origin.REPLICA);
|
||||
}
|
||||
|
||||
private Engine.NoOpResult markSeqNoAsNoop(long seqNo, long opPrimaryTerm, String reason,
|
||||
Engine.Operation.Origin origin) throws IOException {
|
||||
assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
|
||||
assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm
|
||||
+ "]";
|
||||
long startTime = System.nanoTime();
|
||||
ensureWriteAllowed(origin);
|
||||
final Engine.NoOp noOp = new Engine.NoOp(seqNo, opPrimaryTerm, origin, startTime, reason);
|
||||
|
@ -743,20 +753,29 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
return engine.noOp(noOp);
|
||||
}
|
||||
|
||||
public Engine.IndexResult getFailedIndexResult(Exception e, long version) {
|
||||
return new Engine.IndexResult(e, version, operationPrimaryTerm);
|
||||
}
|
||||
|
||||
public Engine.DeleteResult getFailedDeleteResult(Exception e, long version) {
|
||||
return new Engine.DeleteResult(e, version, operationPrimaryTerm);
|
||||
}
|
||||
|
||||
public Engine.DeleteResult applyDeleteOperationOnPrimary(long version, String type, String id, VersionType versionType)
|
||||
throws IOException {
|
||||
assert versionType.validateVersionForWrites(version);
|
||||
return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, type, id, versionType,
|
||||
return applyDeleteOperation(SequenceNumbers.UNASSIGNED_SEQ_NO, operationPrimaryTerm, version, type, id, versionType,
|
||||
Engine.Operation.Origin.PRIMARY);
|
||||
}
|
||||
|
||||
public Engine.DeleteResult applyDeleteOperationOnReplica(long seqNo, long version, String type, String id) throws IOException {
|
||||
return applyDeleteOperation(seqNo, primaryTerm, version, type, id, null, Engine.Operation.Origin.REPLICA);
|
||||
return applyDeleteOperation(seqNo, operationPrimaryTerm, version, type, id, null, Engine.Operation.Origin.REPLICA);
|
||||
}
|
||||
|
||||
private Engine.DeleteResult applyDeleteOperation(long seqNo, long opPrimaryTerm, long version, String type, String id,
|
||||
@Nullable VersionType versionType, Engine.Operation.Origin origin) throws IOException {
|
||||
assert opPrimaryTerm <= this.primaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.primaryTerm + "]";
|
||||
assert opPrimaryTerm <= this.operationPrimaryTerm : "op term [ " + opPrimaryTerm + " ] > shard term [" + this.operationPrimaryTerm
|
||||
+ "]";
|
||||
ensureWriteAllowed(origin);
|
||||
// When there is a single type, the unique identifier is only composed of the _id,
|
||||
// so there is no way to differenciate foo#1 from bar#1. This is especially an issue
|
||||
|
@ -772,7 +791,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
return new Engine.DeleteResult(update);
|
||||
}
|
||||
} catch (MapperParsingException | IllegalArgumentException | TypeMissingException e) {
|
||||
return new Engine.DeleteResult(e, version, seqNo, false);
|
||||
return new Engine.DeleteResult(e, version, operationPrimaryTerm, seqNo, false);
|
||||
}
|
||||
final Term uid = extractUidForDelete(type, id);
|
||||
final Engine.Delete delete = prepareDelete(type, id, uid, seqNo, opPrimaryTerm, version,
|
||||
|
@ -1209,7 +1228,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
public void trimOperationOfPreviousPrimaryTerms(long aboveSeqNo) {
|
||||
getEngine().trimOperationsFromTranslog(primaryTerm, aboveSeqNo);
|
||||
getEngine().trimOperationsFromTranslog(operationPrimaryTerm, aboveSeqNo);
|
||||
}
|
||||
|
||||
public Engine.Result applyTranslogOperation(Translog.Operation operation, Engine.Operation.Origin origin) throws IOException {
|
||||
|
@ -2082,10 +2101,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns whether the shard is in primary mode, i.e., in charge of replicating changes (see {@link ReplicationTracker}).
|
||||
* Returns whether the shard is a relocated primary, i.e. not in charge anymore of replicating changes (see {@link ReplicationTracker}).
|
||||
*/
|
||||
public boolean isPrimaryMode() {
|
||||
return replicationTracker.isPrimaryMode();
|
||||
public boolean isRelocatedPrimary() {
|
||||
assert shardRouting.primary() : "only call isRelocatedPrimary on primary shard";
|
||||
return replicationTracker.isRelocated();
|
||||
}
|
||||
|
||||
class ShardEventListener implements Engine.EventListener {
|
||||
|
@ -2175,7 +2195,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
|
||||
Collections.singletonList(refreshListeners),
|
||||
Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
|
||||
indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, this::getPrimaryTerm);
|
||||
indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, () -> operationPrimaryTerm);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2194,7 +2214,25 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
indexShardOperationPermits.acquire(onPermitAcquired, executorOnDelay, false, debugInfo);
|
||||
}
|
||||
|
||||
private final Object primaryTermMutex = new Object();
|
||||
private <E extends Exception> void bumpPrimaryTerm(long newPrimaryTerm, final CheckedRunnable<E> onBlocked) {
|
||||
assert Thread.holdsLock(mutex);
|
||||
assert newPrimaryTerm > pendingPrimaryTerm;
|
||||
assert operationPrimaryTerm <= pendingPrimaryTerm;
|
||||
final CountDownLatch termUpdated = new CountDownLatch(1);
|
||||
indexShardOperationPermits.asyncBlockOperations(30, TimeUnit.MINUTES, () -> {
|
||||
assert operationPrimaryTerm <= pendingPrimaryTerm;
|
||||
termUpdated.await();
|
||||
// indexShardOperationPermits doesn't guarantee that async submissions are executed
|
||||
// in the order submitted. We need to guard against another term bump
|
||||
if (operationPrimaryTerm < newPrimaryTerm) {
|
||||
operationPrimaryTerm = newPrimaryTerm;
|
||||
onBlocked.run();
|
||||
}
|
||||
},
|
||||
e -> failShard("exception during primary term transition", e));
|
||||
pendingPrimaryTerm = newPrimaryTerm;
|
||||
termUpdated.countDown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Acquire a replica operation permit whenever the shard is ready for indexing (see
|
||||
|
@ -2203,7 +2241,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
* {@link IllegalStateException}. If permit acquisition is delayed, the listener will be invoked on the executor with the specified
|
||||
* name.
|
||||
*
|
||||
* @param operationPrimaryTerm the operation primary term
|
||||
* @param opPrimaryTerm the operation primary term
|
||||
* @param globalCheckpoint the global checkpoint associated with the request
|
||||
* @param onPermitAcquired the listener for permit acquisition
|
||||
* @param executorOnDelay the name of the executor to invoke the listener on if permit acquisition is delayed
|
||||
|
@ -2211,15 +2249,14 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
* the tracing will capture the supplied object's {@link Object#toString()} value. Otherwise the object
|
||||
* isn't used
|
||||
*/
|
||||
public void acquireReplicaOperationPermit(final long operationPrimaryTerm, final long globalCheckpoint,
|
||||
public void acquireReplicaOperationPermit(final long opPrimaryTerm, final long globalCheckpoint,
|
||||
final ActionListener<Releasable> onPermitAcquired, final String executorOnDelay,
|
||||
final Object debugInfo) {
|
||||
verifyNotClosed();
|
||||
verifyReplicationTarget();
|
||||
final boolean globalCheckpointUpdated;
|
||||
if (operationPrimaryTerm > primaryTerm) {
|
||||
synchronized (primaryTermMutex) {
|
||||
if (operationPrimaryTerm > primaryTerm) {
|
||||
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||
synchronized (mutex) {
|
||||
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||
IndexShardState shardState = state();
|
||||
// only roll translog and update primary term if shard has made it past recovery
|
||||
// Having a new primary term here means that the old primary failed and that there is a new primary, which again
|
||||
|
@ -2229,64 +2266,52 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
shardState != IndexShardState.STARTED) {
|
||||
throw new IndexShardNotStartedException(shardId, shardState);
|
||||
}
|
||||
try {
|
||||
indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> {
|
||||
assert operationPrimaryTerm > primaryTerm :
|
||||
"shard term already update. op term [" + operationPrimaryTerm + "], shardTerm [" + primaryTerm + "]";
|
||||
primaryTerm = operationPrimaryTerm;
|
||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
|
||||
final long currentGlobalCheckpoint = getGlobalCheckpoint();
|
||||
final long localCheckpoint;
|
||||
if (currentGlobalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
|
||||
} else {
|
||||
localCheckpoint = currentGlobalCheckpoint;
|
||||
}
|
||||
logger.trace(
|
||||
|
||||
if (opPrimaryTerm > pendingPrimaryTerm) {
|
||||
bumpPrimaryTerm(opPrimaryTerm, () -> {
|
||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "primary term transition");
|
||||
final long currentGlobalCheckpoint = getGlobalCheckpoint();
|
||||
final long localCheckpoint;
|
||||
if (currentGlobalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) {
|
||||
localCheckpoint = SequenceNumbers.NO_OPS_PERFORMED;
|
||||
} else {
|
||||
localCheckpoint = currentGlobalCheckpoint;
|
||||
}
|
||||
logger.trace(
|
||||
"detected new primary with primary term [{}], resetting local checkpoint from [{}] to [{}]",
|
||||
operationPrimaryTerm,
|
||||
opPrimaryTerm,
|
||||
getLocalCheckpoint(),
|
||||
localCheckpoint);
|
||||
getEngine().resetLocalCheckpoint(localCheckpoint);
|
||||
getEngine().rollTranslogGeneration();
|
||||
getEngine().resetLocalCheckpoint(localCheckpoint);
|
||||
getEngine().rollTranslogGeneration();
|
||||
});
|
||||
globalCheckpointUpdated = true;
|
||||
} catch (final Exception e) {
|
||||
onPermitAcquired.onFailure(e);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
globalCheckpointUpdated = false;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
globalCheckpointUpdated = false;
|
||||
}
|
||||
|
||||
assert operationPrimaryTerm <= primaryTerm
|
||||
: "operation primary term [" + operationPrimaryTerm + "] should be at most [" + primaryTerm + "]";
|
||||
assert opPrimaryTerm <= pendingPrimaryTerm
|
||||
: "operation primary term [" + opPrimaryTerm + "] should be at most [" + pendingPrimaryTerm + "]";
|
||||
indexShardOperationPermits.acquire(
|
||||
new ActionListener<Releasable>() {
|
||||
@Override
|
||||
public void onResponse(final Releasable releasable) {
|
||||
if (operationPrimaryTerm < primaryTerm) {
|
||||
if (opPrimaryTerm < operationPrimaryTerm) {
|
||||
releasable.close();
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"%s operation primary term [%d] is too old (current [%d])",
|
||||
shardId,
|
||||
operationPrimaryTerm,
|
||||
primaryTerm);
|
||||
opPrimaryTerm,
|
||||
operationPrimaryTerm);
|
||||
onPermitAcquired.onFailure(new IllegalStateException(message));
|
||||
} else {
|
||||
if (globalCheckpointUpdated == false) {
|
||||
try {
|
||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "operation");
|
||||
} catch (Exception e) {
|
||||
releasable.close();
|
||||
onPermitAcquired.onFailure(e);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
updateGlobalCheckpointOnReplica(globalCheckpoint, "operation");
|
||||
} catch (Exception e) {
|
||||
releasable.close();
|
||||
onPermitAcquired.onFailure(e);
|
||||
return;
|
||||
}
|
||||
onPermitAcquired.onResponse(releasable);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.Assertions;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -29,10 +28,12 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
@ -59,7 +60,7 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
final Semaphore semaphore = new Semaphore(TOTAL_PERMITS, true); // fair to ensure a blocking thread is not starved
|
||||
private final List<DelayedOperation> delayedOperations = new ArrayList<>(); // operations that are delayed
|
||||
private volatile boolean closed;
|
||||
private boolean delayed; // does not need to be volatile as all accesses are done under a lock on this
|
||||
private int queuedBlockOperations; // does not need to be volatile as all accesses are done under a lock on this
|
||||
|
||||
// only valid when assertions are enabled. Key is AtomicBoolean associated with each permit to ensure close once semantics.
|
||||
// Value is a tuple, with a some debug information supplied by the caller and a stack trace of the acquiring thread
|
||||
|
@ -102,9 +103,6 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
final long timeout,
|
||||
final TimeUnit timeUnit,
|
||||
final CheckedRunnable<E> onBlocked) throws InterruptedException, TimeoutException, E {
|
||||
if (closed) {
|
||||
throw new IndexShardClosedException(shardId);
|
||||
}
|
||||
delayOperations();
|
||||
try {
|
||||
doBlockOperations(timeout, timeUnit, onBlocked);
|
||||
|
@ -147,13 +145,12 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
}
|
||||
|
||||
private void delayOperations() {
|
||||
if (closed) {
|
||||
throw new IndexShardClosedException(shardId);
|
||||
}
|
||||
synchronized (this) {
|
||||
if (delayed) {
|
||||
throw new IllegalStateException("operations are already delayed");
|
||||
} else {
|
||||
assert delayedOperations.isEmpty();
|
||||
delayed = true;
|
||||
}
|
||||
assert queuedBlockOperations > 0 || delayedOperations.isEmpty();
|
||||
queuedBlockOperations++;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,7 +161,7 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
if (Assertions.ENABLED) {
|
||||
// since delayed is not volatile, we have to synchronize even here for visibility
|
||||
synchronized (this) {
|
||||
assert delayed;
|
||||
assert queuedBlockOperations > 0;
|
||||
}
|
||||
}
|
||||
if (semaphore.tryAcquire(TOTAL_PERMITS, timeout, timeUnit)) {
|
||||
|
@ -182,10 +179,14 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
private void releaseDelayedOperations() {
|
||||
final List<DelayedOperation> queuedActions;
|
||||
synchronized (this) {
|
||||
assert delayed;
|
||||
queuedActions = new ArrayList<>(delayedOperations);
|
||||
delayedOperations.clear();
|
||||
delayed = false;
|
||||
assert queuedBlockOperations > 0;
|
||||
queuedBlockOperations--;
|
||||
if (queuedBlockOperations == 0) {
|
||||
queuedActions = new ArrayList<>(delayedOperations);
|
||||
delayedOperations.clear();
|
||||
} else {
|
||||
queuedActions = Collections.emptyList();
|
||||
}
|
||||
}
|
||||
if (!queuedActions.isEmpty()) {
|
||||
/*
|
||||
|
@ -242,7 +243,7 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
final Releasable releasable;
|
||||
try {
|
||||
synchronized (this) {
|
||||
if (delayed) {
|
||||
if (queuedBlockOperations > 0) {
|
||||
final Supplier<StoredContext> contextSupplier = threadPool.getThreadContext().newRestorableContext(false);
|
||||
final ActionListener<Releasable> wrappedListener;
|
||||
if (executorOnDelay != null) {
|
||||
|
@ -308,6 +309,11 @@ final class IndexShardOperationPermits implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
synchronized boolean isBlocked() {
|
||||
return queuedBlockOperations > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return a list of describing each permit that wasn't released yet. The description consist of the debugInfo supplied
|
||||
* when the permit was acquired plus a stack traces that was captured when the permit was request.
|
||||
|
|
|
@ -136,7 +136,7 @@ public class PrimaryReplicaSyncer extends AbstractComponent {
|
|||
}
|
||||
};
|
||||
|
||||
resync(shardId, indexShard.routingEntry().allocationId().getId(), indexShard.getPrimaryTerm(), wrappedSnapshot,
|
||||
resync(shardId, indexShard.routingEntry().allocationId().getId(), indexShard.getPendingPrimaryTerm(), wrappedSnapshot,
|
||||
startingSeqNo, maxSeqNo, resyncListener);
|
||||
} catch (Exception e) {
|
||||
try {
|
||||
|
|
|
@ -394,7 +394,7 @@ final class StoreRecovery {
|
|||
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
|
||||
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
|
||||
final String translogUUID = Translog.createEmptyTranslog(
|
||||
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm());
|
||||
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPendingPrimaryTerm());
|
||||
store.associateIndexWithNewTranslog(translogUUID);
|
||||
} else if (indexShouldExists) {
|
||||
// since we recover from local, just fill the files and size
|
||||
|
@ -409,11 +409,12 @@ final class StoreRecovery {
|
|||
} else {
|
||||
store.createEmpty();
|
||||
final String translogUUID = Translog.createEmptyTranslog(
|
||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPrimaryTerm());
|
||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId,
|
||||
indexShard.getPendingPrimaryTerm());
|
||||
store.associateIndexWithNewTranslog(translogUUID);
|
||||
}
|
||||
indexShard.openEngineAndRecoverFromTranslog();
|
||||
indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm());
|
||||
indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm());
|
||||
indexShard.finalizeRecovery();
|
||||
indexShard.postRecovery("post recovery from shard_store");
|
||||
} catch (EngineException | IOException e) {
|
||||
|
@ -458,11 +459,11 @@ final class StoreRecovery {
|
|||
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
|
||||
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
|
||||
final String translogUUID = Translog.createEmptyTranslog(
|
||||
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm());
|
||||
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPendingPrimaryTerm());
|
||||
store.associateIndexWithNewTranslog(translogUUID);
|
||||
assert indexShard.shardRouting.primary() : "only primary shards can recover from store";
|
||||
indexShard.openEngineAndRecoverFromTranslog();
|
||||
indexShard.getEngine().fillSeqNoGaps(indexShard.getPrimaryTerm());
|
||||
indexShard.getEngine().fillSeqNoGaps(indexShard.getPendingPrimaryTerm());
|
||||
indexShard.finalizeRecovery();
|
||||
indexShard.postRecovery("restore done");
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -491,7 +491,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
|||
try (ReleasableLock ignored = readLock.acquire()) {
|
||||
ensureOpen();
|
||||
if (operation.primaryTerm() > current.getPrimaryTerm()) {
|
||||
throw new IllegalArgumentException("Operation term is newer than the current term;"
|
||||
assert false :
|
||||
"Operation term is newer than the current term; "
|
||||
+ "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]";
|
||||
throw new IllegalArgumentException("Operation term is newer than the current term; "
|
||||
+ "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]");
|
||||
}
|
||||
return current.add(bytes, operation.seqNo());
|
||||
|
|
|
@ -250,7 +250,7 @@ public class RecoverySourceHandler {
|
|||
try (Releasable ignored = FutureUtils.get(permit)) {
|
||||
// check that the IndexShard still has the primary authority. This needs to be checked under operation permit to prevent
|
||||
// races, as IndexShard will switch its authority only when it holds all operation permits, see IndexShard.relocated()
|
||||
if (primary.isPrimaryMode() == false) {
|
||||
if (primary.isRelocatedPrimary()) {
|
||||
throw new IndexShardRelocatedException(primary.shardId());
|
||||
}
|
||||
runnable.run();
|
||||
|
|
|
@ -443,7 +443,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
}
|
||||
// TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2
|
||||
final String translogUUID = Translog.createEmptyTranslog(
|
||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPrimaryTerm());
|
||||
indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId,
|
||||
indexShard.getPendingPrimaryTerm());
|
||||
store.associateIndexWithNewTranslog(translogUUID);
|
||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||
// this is a fatal exception at this stage.
|
||||
|
|
|
@ -441,7 +441,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
BulkItemRequest replicaRequest = new BulkItemRequest(0, writeRequest);
|
||||
|
||||
Exception err = new ElasticsearchException("I'm dead <(x.x)>");
|
||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
|
||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0);
|
||||
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
|
||||
replicaRequest);
|
||||
|
||||
|
@ -478,7 +478,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
|
||||
Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
|
||||
"I'm conflicted <(;_;)>");
|
||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
|
||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0);
|
||||
BulkItemResultHolder failedResults = new BulkItemResultHolder(null, indexResult,
|
||||
replicaRequest);
|
||||
|
||||
|
@ -516,7 +516,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
|
||||
boolean created = randomBoolean();
|
||||
Translog.Location resultLocation = new Translog.Location(42, 42, 42);
|
||||
Engine.IndexResult indexResult = new FakeResult(1, 1, created, resultLocation);
|
||||
Engine.IndexResult indexResult = new FakeResult(1, 1, 1, created, resultLocation);
|
||||
DocWriteResponse indexResponse = new IndexResponse(shardId, "_doc", "id", 1, 17, 1, created);
|
||||
BulkItemResultHolder goodResults =
|
||||
new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
|
||||
|
@ -559,7 +559,7 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
Translog.Location newLocation = new Translog.Location(1, 1, 1);
|
||||
final long version = randomNonNegativeLong();
|
||||
final long seqNo = randomNonNegativeLong();
|
||||
Engine.IndexResult indexResult = new IndexResultWithLocation(version, seqNo, created, newLocation);
|
||||
Engine.IndexResult indexResult = new IndexResultWithLocation(version, 0L, seqNo, created, newLocation);
|
||||
results = new BulkItemResultHolder(indexResponse, indexResult, replicaRequest);
|
||||
assertThat(TransportShardBulkAction.calculateTranslogLocation(original, results),
|
||||
equalTo(newLocation));
|
||||
|
@ -629,8 +629,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
|
||||
public class IndexResultWithLocation extends Engine.IndexResult {
|
||||
private final Translog.Location location;
|
||||
public IndexResultWithLocation(long version, long seqNo, boolean created, Translog.Location newLocation) {
|
||||
super(version, seqNo, created);
|
||||
public IndexResultWithLocation(long version, long term, long seqNo, boolean created, Translog.Location newLocation) {
|
||||
super(version, term, seqNo, created);
|
||||
this.location = newLocation;
|
||||
}
|
||||
|
||||
|
@ -647,8 +647,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
BulkItemRequest request = new BulkItemRequest(0, updateRequest);
|
||||
Exception err = new VersionConflictEngineException(shardId, "_doc", "id",
|
||||
"I'm conflicted <(;_;)>");
|
||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0);
|
||||
Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, true);
|
||||
Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0);
|
||||
Engine.DeleteResult deleteResult = new Engine.DeleteResult(1, 1, 1, true);
|
||||
DocWriteResponse.Result docWriteResult = DocWriteResponse.Result.CREATED;
|
||||
DocWriteResponse.Result deleteWriteResult = DocWriteResponse.Result.DELETED;
|
||||
IndexRequest indexRequest = new IndexRequest("index", "_doc", "id");
|
||||
|
@ -830,8 +830,8 @@ public class TransportShardBulkActionTests extends IndexShardTestCase {
|
|||
|
||||
private final Translog.Location location;
|
||||
|
||||
protected FakeResult(long version, long seqNo, boolean created, Translog.Location location) {
|
||||
super(version, seqNo, created);
|
||||
protected FakeResult(long version, long term, long seqNo, boolean created, Translog.Location location) {
|
||||
super(version, term, seqNo, created);
|
||||
this.location = location;
|
||||
}
|
||||
|
||||
|
|
|
@ -256,7 +256,6 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
assertEquals(10, numRequests.get());
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29242")
|
||||
public void testFanOutAndCollect() throws InterruptedException {
|
||||
SearchRequest request = new SearchRequest();
|
||||
request.allowPartialSearchResults(true);
|
||||
|
@ -347,6 +346,9 @@ public class SearchAsyncActionTests extends ESTestCase {
|
|||
sendReleaseSearchContext(result.getRequestId(), new MockConnection(result.node), OriginalIndices.NONE);
|
||||
}
|
||||
responseListener.onResponse(response);
|
||||
if (latch.getCount() == 0) {
|
||||
throw new AssertionError("Running a search phase after the latch has reached 0 !!!!");
|
||||
}
|
||||
latch.countDown();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -587,8 +587,6 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
|
||||
public void testPrimaryReference() throws Exception {
|
||||
final IndexShard shard = mock(IndexShard.class);
|
||||
final long primaryTerm = 1 + randomInt(200);
|
||||
when(shard.getPrimaryTerm()).thenReturn(primaryTerm);
|
||||
|
||||
AtomicBoolean closed = new AtomicBoolean();
|
||||
Releasable releasable = () -> {
|
||||
|
@ -683,9 +681,9 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
|
||||
|
||||
final IndexShard shard = mock(IndexShard.class);
|
||||
when(shard.getPrimaryTerm()).thenReturn(primaryTerm);
|
||||
when(shard.getPendingPrimaryTerm()).thenReturn(primaryTerm);
|
||||
when(shard.routingEntry()).thenReturn(routingEntry);
|
||||
when(shard.isPrimaryMode()).thenReturn(true);
|
||||
when(shard.isRelocatedPrimary()).thenReturn(false);
|
||||
IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().shardRoutingTable(shardId);
|
||||
Set<String> inSyncIds = randomBoolean() ? Collections.singleton(routingEntry.allocationId().getId()) :
|
||||
clusterService.state().metaData().index(index).inSyncAllocationIds(0);
|
||||
|
@ -1201,7 +1199,7 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
doAnswer(invocation -> {
|
||||
long term = (Long)invocation.getArguments()[0];
|
||||
ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[2];
|
||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||
if (term < primaryTerm) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
|
||||
shardId, term, primaryTerm));
|
||||
|
@ -1219,9 +1217,9 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
}
|
||||
return routing;
|
||||
});
|
||||
when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false);
|
||||
when(indexShard.isRelocatedPrimary()).thenAnswer(invocationOnMock -> isRelocated.get());
|
||||
doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
|
||||
when(indexShard.getPrimaryTerm()).thenAnswer(i ->
|
||||
when(indexShard.getPendingPrimaryTerm()).thenAnswer(i ->
|
||||
clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
|
||||
return indexShard;
|
||||
}
|
||||
|
|
|
@ -454,7 +454,7 @@ public class TransportWriteActionTests extends ESTestCase {
|
|||
doAnswer(invocation -> {
|
||||
long term = (Long)invocation.getArguments()[0];
|
||||
ActionListener<Releasable> callback = (ActionListener<Releasable>) invocation.getArguments()[1];
|
||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||
if (term < primaryTerm) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT, "%s operation term [%d] is too old (current [%d])",
|
||||
shardId, term, primaryTerm));
|
||||
|
@ -472,9 +472,9 @@ public class TransportWriteActionTests extends ESTestCase {
|
|||
}
|
||||
return routing;
|
||||
});
|
||||
when(indexShard.isPrimaryMode()).thenAnswer(invocationOnMock -> isRelocated.get() == false);
|
||||
when(indexShard.isRelocatedPrimary()).thenAnswer(invocationOnMock -> isRelocated.get());
|
||||
doThrow(new AssertionError("failed shard is not supported")).when(indexShard).failShard(anyString(), any(Exception.class));
|
||||
when(indexShard.getPrimaryTerm()).thenAnswer(i ->
|
||||
when(indexShard.getPendingPrimaryTerm()).thenAnswer(i ->
|
||||
clusterService.state().metaData().getIndexSafe(shardId.getIndex()).primaryTerm(shardId.id()));
|
||||
return indexShard;
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ public class ShardStateIT extends ESIntegTestCase {
|
|||
if (indexService != null) {
|
||||
for (IndexShard shard : indexService) {
|
||||
assertThat("term mismatch for shard " + shard.shardId(),
|
||||
shard.getPrimaryTerm(), equalTo(metaData.primaryTerm(shard.shardId().id())));
|
||||
shard.getPendingPrimaryTerm(), equalTo(metaData.primaryTerm(shard.shardId().id())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,8 +59,10 @@ import java.util.Collections;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
|
@ -221,7 +223,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
}
|
||||
|
||||
logger.info("--> promoting replica to primary " + replica1.routingEntry());
|
||||
shards.promoteReplicaToPrimary(replica1);
|
||||
shards.promoteReplicaToPrimary(replica1).get();
|
||||
indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"2\"}", XContentType.JSON);
|
||||
shards.index(indexRequest);
|
||||
shards.refresh("test");
|
||||
|
@ -234,6 +236,102 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testReplicaTermIncrementWithConcurrentPrimaryPromotion() throws Exception {
|
||||
Map<String, String> mappings =
|
||||
Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
|
||||
try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(2, mappings))) {
|
||||
shards.startAll();
|
||||
long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm();
|
||||
List<IndexShard> replicas = shards.getReplicas();
|
||||
IndexShard replica1 = replicas.get(0);
|
||||
IndexShard replica2 = replicas.get(1);
|
||||
|
||||
shards.promoteReplicaToPrimary(replica1, (shard, listener) -> {});
|
||||
long newReplica1Term = replica1.getPendingPrimaryTerm();
|
||||
assertEquals(primaryPrimaryTerm + 1, newReplica1Term);
|
||||
|
||||
assertEquals(primaryPrimaryTerm, replica2.getPendingPrimaryTerm());
|
||||
|
||||
IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON);
|
||||
BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, replica1);
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
Thread t1 = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
indexOnReplica(replicationRequest, shards, replica2, newReplica1Term);
|
||||
} catch (IllegalStateException ise) {
|
||||
assertThat(ise.getMessage(), containsString("is too old"));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Thread t2 = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
shards.promoteReplicaToPrimary(replica2).get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
t2.start();
|
||||
t1.start();
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
assertEquals(newReplica1Term + 1, replica2.getPendingPrimaryTerm());
|
||||
}
|
||||
}
|
||||
|
||||
public void testReplicaOperationWithConcurrentPrimaryPromotion() throws Exception {
|
||||
Map<String, String> mappings =
|
||||
Collections.singletonMap("type", "{ \"type\": { \"properties\": { \"f\": { \"type\": \"keyword\"} }}}");
|
||||
try (ReplicationGroup shards = new ReplicationGroup(buildIndexMetaData(1, mappings))) {
|
||||
shards.startAll();
|
||||
long primaryPrimaryTerm = shards.getPrimary().getPendingPrimaryTerm();
|
||||
IndexRequest indexRequest = new IndexRequest(index.getName(), "type", "1").source("{ \"f\": \"1\"}", XContentType.JSON);
|
||||
BulkShardRequest replicationRequest = indexOnPrimary(indexRequest, shards.getPrimary());
|
||||
|
||||
List<IndexShard> replicas = shards.getReplicas();
|
||||
IndexShard replica = replicas.get(0);
|
||||
|
||||
CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
AtomicBoolean successFullyIndexed = new AtomicBoolean();
|
||||
Thread t1 = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
indexOnReplica(replicationRequest, shards, replica, primaryPrimaryTerm);
|
||||
successFullyIndexed.set(true);
|
||||
} catch (IllegalStateException ise) {
|
||||
assertThat(ise.getMessage(), containsString("is too old"));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
Thread t2 = new Thread(() -> {
|
||||
try {
|
||||
barrier.await();
|
||||
shards.promoteReplicaToPrimary(replica).get();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
});
|
||||
t2.start();
|
||||
t1.start();
|
||||
t1.join();
|
||||
t2.join();
|
||||
|
||||
assertEquals(primaryPrimaryTerm + 1, replica.getPendingPrimaryTerm());
|
||||
if (successFullyIndexed.get()) {
|
||||
try(Translog.Snapshot snapshot = getTranslog(replica).newSnapshot()) {
|
||||
assertThat(snapshot.totalOperations(), equalTo(1));
|
||||
Translog.Operation op = snapshot.next();
|
||||
assertThat(op.primaryTerm(), equalTo(primaryPrimaryTerm));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* test document failures (failures after seq_no generation) are added as noop operation to the translog
|
||||
* for primary and replica shards
|
||||
|
@ -255,7 +353,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
.source("{}", XContentType.JSON)
|
||||
);
|
||||
assertTrue(response.isFailed());
|
||||
assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPrimaryTerm(), failureMessage);
|
||||
assertNoOpTranslogOperationForDocumentFailure(shards, 1, shards.getPrimary().getPendingPrimaryTerm(), failureMessage);
|
||||
shards.assertAllEqual(0);
|
||||
|
||||
// add some replicas
|
||||
|
@ -269,7 +367,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
.source("{}", XContentType.JSON)
|
||||
);
|
||||
assertTrue(response.isFailed());
|
||||
assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPrimaryTerm(), failureMessage);
|
||||
assertNoOpTranslogOperationForDocumentFailure(shards, 2, shards.getPrimary().getPendingPrimaryTerm(), failureMessage);
|
||||
shards.assertAllEqual(0);
|
||||
}
|
||||
}
|
||||
|
@ -361,7 +459,7 @@ public class IndexLevelReplicationTests extends ESIndexLevelReplicationTestCase
|
|||
// Make sure that peer-recovery transfers all but non-overridden operations.
|
||||
IndexShard replica3 = shards.addReplica();
|
||||
logger.info("--> Promote replica2 as the primary");
|
||||
shards.promoteReplicaToPrimary(replica2);
|
||||
shards.promoteReplicaToPrimary(replica2).get();
|
||||
logger.info("--> Recover replica3 from replica2");
|
||||
recoverReplica(replica3, replica2, true);
|
||||
try (Translog.Snapshot snapshot = getTranslog(replica3).newSnapshot()) {
|
||||
|
|
|
@ -245,7 +245,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
}
|
||||
}
|
||||
|
||||
shards.promoteReplicaToPrimary(newPrimary);
|
||||
shards.promoteReplicaToPrimary(newPrimary).get();
|
||||
|
||||
// check that local checkpoint of new primary is properly tracked after primary promotion
|
||||
assertThat(newPrimary.getLocalCheckpoint(), equalTo(totalDocs - 1L));
|
||||
|
@ -432,7 +432,8 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
while ((next = snapshot.next()) != null) {
|
||||
translogOperations++;
|
||||
assertThat("unexpected op: " + next, (int)next.seqNo(), lessThan(initialDocs + extraDocs));
|
||||
assertThat("unexpected primaryTerm: " + next.primaryTerm(), next.primaryTerm(), is(oldPrimary.getPrimaryTerm()));
|
||||
assertThat("unexpected primaryTerm: " + next.primaryTerm(), next.primaryTerm(),
|
||||
is(oldPrimary.getPendingPrimaryTerm()));
|
||||
final Translog.Source source = next.getSource();
|
||||
assertThat(source.source.utf8ToString(), is("{ \"f\": \"normal\"}"));
|
||||
}
|
||||
|
|
|
@ -770,8 +770,10 @@ public class ReplicationTrackerTests extends ESTestCase {
|
|||
assertThat(newPrimary.routingTable, equalTo(oldPrimary.routingTable));
|
||||
assertThat(newPrimary.replicationGroup, equalTo(oldPrimary.replicationGroup));
|
||||
|
||||
assertFalse(oldPrimary.relocated);
|
||||
oldPrimary.completeRelocationHandoff();
|
||||
assertFalse(oldPrimary.primaryMode);
|
||||
assertTrue(oldPrimary.relocated);
|
||||
}
|
||||
|
||||
public void testIllegalStateExceptionIfUnknownAllocationId() {
|
||||
|
|
|
@ -71,7 +71,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
|||
public static void setupThreadPool() {
|
||||
int writeThreadPoolSize = randomIntBetween(1, 2);
|
||||
int writeThreadPoolQueueSize = randomIntBetween(1, 2);
|
||||
threadPool = new TestThreadPool("IndexShardOperationsLockTests",
|
||||
threadPool = new TestThreadPool("IndexShardOperationPermitsTests",
|
||||
Settings.builder()
|
||||
.put("thread_pool." + ThreadPool.Names.WRITE + ".size", writeThreadPoolSize)
|
||||
.put("thread_pool." + ThreadPool.Names.WRITE + ".queue_size", writeThreadPoolQueueSize)
|
||||
|
@ -100,7 +100,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
|||
assertThat(permits.getActiveOperationsCount(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testAllOperationsInvoked() throws InterruptedException, TimeoutException, ExecutionException {
|
||||
public void testAllOperationsInvoked() throws InterruptedException, TimeoutException {
|
||||
int numThreads = 10;
|
||||
|
||||
class DummyException extends RuntimeException {}
|
||||
|
@ -187,7 +187,7 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
|||
future.get().close();
|
||||
}
|
||||
|
||||
public void testOperationsIfClosed() throws ExecutionException, InterruptedException {
|
||||
public void testOperationsIfClosed() {
|
||||
PlainActionFuture<Releasable> future = new PlainActionFuture<>();
|
||||
permits.close();
|
||||
permits.acquire(future, ThreadPool.Names.GENERIC, true, "");
|
||||
|
@ -195,10 +195,12 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
|||
assertThat(exception.getCause(), instanceOf(IndexShardClosedException.class));
|
||||
}
|
||||
|
||||
public void testBlockIfClosed() throws ExecutionException, InterruptedException {
|
||||
public void testBlockIfClosed() {
|
||||
permits.close();
|
||||
expectThrows(IndexShardClosedException.class, () -> permits.blockOperations(randomInt(10), TimeUnit.MINUTES,
|
||||
() -> { throw new IllegalArgumentException("fake error"); }));
|
||||
expectThrows(IndexShardClosedException.class, () -> permits.asyncBlockOperations(randomInt(10), TimeUnit.MINUTES,
|
||||
() -> { throw new IllegalArgumentException("fake error"); }, e -> { throw new AssertionError(e); }));
|
||||
}
|
||||
|
||||
public void testOperationsDelayedIfBlock() throws ExecutionException, InterruptedException, TimeoutException {
|
||||
|
@ -210,6 +212,36 @@ public class IndexShardOperationPermitsTests extends ESTestCase {
|
|||
future.get(1, TimeUnit.HOURS).close();
|
||||
}
|
||||
|
||||
public void testGetBlockWhenBlocked() throws ExecutionException, InterruptedException, TimeoutException {
|
||||
PlainActionFuture<Releasable> future = new PlainActionFuture<>();
|
||||
final CountDownLatch blockAcquired = new CountDownLatch(1);
|
||||
final CountDownLatch releaseBlock = new CountDownLatch(1);
|
||||
final AtomicBoolean blocked = new AtomicBoolean();
|
||||
try (Releasable ignored = blockAndWait()) {
|
||||
permits.acquire(future, ThreadPool.Names.GENERIC, true, "");
|
||||
|
||||
permits.asyncBlockOperations(
|
||||
30,
|
||||
TimeUnit.MINUTES,
|
||||
() -> {
|
||||
blocked.set(true);
|
||||
blockAcquired.countDown();
|
||||
releaseBlock.await();
|
||||
},
|
||||
e -> {
|
||||
throw new RuntimeException(e);
|
||||
});
|
||||
assertFalse(blocked.get());
|
||||
assertFalse(future.isDone());
|
||||
}
|
||||
blockAcquired.await();
|
||||
assertTrue(blocked.get());
|
||||
assertFalse(future.isDone());
|
||||
releaseBlock.countDown();
|
||||
|
||||
future.get(1, TimeUnit.HOURS).close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that the ThreadContext is restored when a operation is executed after it has been delayed due to a block
|
||||
*/
|
||||
|
|
|
@ -297,7 +297,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
// expected
|
||||
}
|
||||
try {
|
||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
||||
indexShard.acquireReplicaOperationPermit(indexShard.getPendingPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, null,
|
||||
ThreadPool.Names.WRITE, "");
|
||||
fail("we should not be able to increment anymore");
|
||||
} catch (IndexShardClosedException e) {
|
||||
|
@ -308,7 +308,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
public void testRejectOperationPermitWithHigherTermWhenNotStarted() throws IOException {
|
||||
IndexShard indexShard = newShard(false);
|
||||
expectThrows(IndexShardNotStartedException.class, () ->
|
||||
indexShard.acquireReplicaOperationPermit(indexShard.getPrimaryTerm() + randomIntBetween(1, 100),
|
||||
indexShard.acquireReplicaOperationPermit(indexShard.getPendingPrimaryTerm() + randomIntBetween(1, 100),
|
||||
SequenceNumbers.UNASSIGNED_SEQ_NO, null, ThreadPool.Names.WRITE, ""));
|
||||
closeShards(indexShard);
|
||||
}
|
||||
|
@ -331,7 +331,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
throw new RuntimeException(e);
|
||||
}
|
||||
indexShard.acquireReplicaOperationPermit(
|
||||
indexShard.getPrimaryTerm(),
|
||||
indexShard.getPendingPrimaryTerm(),
|
||||
indexShard.getGlobalCheckpoint(),
|
||||
new ActionListener<Releasable>() {
|
||||
@Override
|
||||
|
@ -418,16 +418,13 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* This test makes sure that people can use the shard routing entry to check whether a shard was already promoted to
|
||||
* a primary. Concretely this means, that when we publish the routing entry via {@link IndexShard#routingEntry()} the following
|
||||
* should have happened
|
||||
* 1) Internal state (ala ReplicationTracker) have been updated
|
||||
* 2) Primary term is set to the new term
|
||||
* This test makes sure that people can use the shard routing entry + take an operation permit to check whether a shard was already
|
||||
* promoted to a primary.
|
||||
*/
|
||||
public void testPublishingOrderOnPromotion() throws IOException, InterruptedException, BrokenBarrierException {
|
||||
final IndexShard indexShard = newShard(false);
|
||||
recoveryEmptyReplica(indexShard, randomBoolean());
|
||||
final long promotedTerm = indexShard.getPrimaryTerm() + 1;
|
||||
final long promotedTerm = indexShard.getPendingPrimaryTerm() + 1;
|
||||
final CyclicBarrier barrier = new CyclicBarrier(2);
|
||||
final AtomicBoolean stop = new AtomicBoolean();
|
||||
final Thread thread = new Thread(() -> {
|
||||
|
@ -438,8 +435,12 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
}
|
||||
while(stop.get() == false) {
|
||||
if (indexShard.routingEntry().primary()) {
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(promotedTerm));
|
||||
assertThat(indexShard.getReplicationGroup(), notNullValue());
|
||||
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(promotedTerm));
|
||||
final PlainActionFuture<Releasable> permitAcquiredFuture = new PlainActionFuture<>();
|
||||
indexShard.acquirePrimaryOperationPermit(permitAcquiredFuture, ThreadPool.Names.SAME, "bla");
|
||||
try (Releasable ignored = permitAcquiredFuture.actionGet()) {
|
||||
assertThat(indexShard.getReplicationGroup(), notNullValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -504,7 +505,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
|
||||
// promote the replica
|
||||
final ShardRouting replicaRouting = indexShard.routingEntry();
|
||||
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 10000);
|
||||
final long newPrimaryTerm = indexShard.getPendingPrimaryTerm() + between(1, 10000);
|
||||
final ShardRouting primaryRouting =
|
||||
newShardRouting(
|
||||
replicaRouting.shardId(),
|
||||
|
@ -558,7 +559,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
ShardRouting replicaRouting = indexShard.routingEntry();
|
||||
ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null,
|
||||
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
|
||||
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000);
|
||||
final long newPrimaryTerm = indexShard.getPendingPrimaryTerm() + between(1, 1000);
|
||||
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
}, 0L,
|
||||
|
@ -568,11 +569,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
} else {
|
||||
indexShard = newStartedShard(true);
|
||||
}
|
||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||
assertEquals(0, indexShard.getActiveOperationsCount());
|
||||
if (indexShard.routingEntry().isRelocationTarget() == false) {
|
||||
try {
|
||||
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), null, ThreadPool.Names.WRITE, "");
|
||||
final PlainActionFuture<Releasable> permitAcquiredFuture = new PlainActionFuture<>();
|
||||
indexShard.acquireReplicaOperationPermit(primaryTerm, indexShard.getGlobalCheckpoint(), permitAcquiredFuture,
|
||||
ThreadPool.Names.WRITE, "");
|
||||
permitAcquiredFuture.actionGet();
|
||||
fail("shard shouldn't accept operations as replica");
|
||||
} catch (IllegalStateException ignored) {
|
||||
|
||||
|
@ -650,7 +654,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertThat(e, hasToString(containsString("shard " + shardRouting + " is not a primary")));
|
||||
}
|
||||
|
||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||
final long translogGen = engineClosed ? -1 : getTranslog(indexShard).getGeneration().translogFileGeneration;
|
||||
|
||||
final Releasable operation1;
|
||||
|
@ -728,7 +732,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
ActionListener<Releasable> listener = new ActionListener<Releasable>() {
|
||||
@Override
|
||||
public void onResponse(Releasable releasable) {
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
|
||||
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
|
||||
|
@ -765,7 +769,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
barrier.await();
|
||||
if (indexShard.state() == IndexShardState.CREATED || indexShard.state() == IndexShardState.RECOVERING) {
|
||||
barrier.await();
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm));
|
||||
assertFalse(onResponse.get());
|
||||
assertThat(onFailure.get(), instanceOf(IndexShardNotStartedException.class));
|
||||
Releasables.close(operation1);
|
||||
|
@ -774,18 +778,19 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
// our operation should be blocked until the previous operations complete
|
||||
assertFalse(onResponse.get());
|
||||
assertNull(onFailure.get());
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||
assertThat(indexShard.operationPrimaryTerm, equalTo(primaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||
Releasables.close(operation1);
|
||||
// our operation should still be blocked
|
||||
assertFalse(onResponse.get());
|
||||
assertNull(onFailure.get());
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
|
||||
assertThat(indexShard.operationPrimaryTerm, equalTo(primaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(primaryTerm));
|
||||
Releasables.close(operation2);
|
||||
barrier.await();
|
||||
// now lock acquisition should have succeeded
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||
assertThat(indexShard.operationPrimaryTerm, equalTo(newPrimaryTerm));
|
||||
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(newPrimaryTerm));
|
||||
assertThat(TestTranslog.getCurrentTerm(getTranslog(indexShard)), equalTo(newPrimaryTerm));
|
||||
if (engineClosed) {
|
||||
assertFalse(onResponse.get());
|
||||
|
@ -884,7 +889,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
indexShard.acquireReplicaOperationPermit(
|
||||
indexShard.getPrimaryTerm() + 1,
|
||||
indexShard.getPendingPrimaryTerm() + 1,
|
||||
globalCheckpoint,
|
||||
new ActionListener<Releasable>() {
|
||||
@Override
|
||||
|
@ -906,7 +911,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
final CountDownLatch resyncLatch = new CountDownLatch(1);
|
||||
indexShard.updateShardState(
|
||||
newRouting,
|
||||
indexShard.getPrimaryTerm() + 1,
|
||||
indexShard.getPendingPrimaryTerm() + 1,
|
||||
(s, r) -> resyncLatch.countDown(),
|
||||
1L,
|
||||
Collections.singleton(newRouting.allocationId().getId()),
|
||||
|
@ -938,7 +943,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
Math.toIntExact(indexShard.getLocalCheckpoint()));
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
indexShard.acquireReplicaOperationPermit(
|
||||
indexShard.primaryTerm + 1,
|
||||
indexShard.pendingPrimaryTerm + 1,
|
||||
globalCheckpoint,
|
||||
new ActionListener<Releasable>() {
|
||||
@Override
|
||||
|
@ -975,7 +980,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
final CyclicBarrier barrier = new CyclicBarrier(3);
|
||||
final CountDownLatch latch = new CountDownLatch(2);
|
||||
|
||||
final long primaryTerm = indexShard.getPrimaryTerm();
|
||||
final long primaryTerm = indexShard.getPendingPrimaryTerm();
|
||||
final AtomicLong counter = new AtomicLong();
|
||||
final AtomicReference<Exception> onFailure = new AtomicReference<>();
|
||||
|
||||
|
@ -993,7 +998,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
@Override
|
||||
public void onResponse(Releasable releasable) {
|
||||
counter.incrementAndGet();
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm + increment));
|
||||
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm + increment));
|
||||
latch.countDown();
|
||||
releasable.close();
|
||||
}
|
||||
|
@ -1037,7 +1042,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertThat(counter.get(), equalTo(2L));
|
||||
}
|
||||
|
||||
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm + Math.max(firstIncrement, secondIncrement)));
|
||||
assertThat(indexShard.getPendingPrimaryTerm(), equalTo(primaryTerm + Math.max(firstIncrement, secondIncrement)));
|
||||
|
||||
closeShards(indexShard);
|
||||
}
|
||||
|
@ -1416,14 +1421,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
recoveryThread.start();
|
||||
latch.await();
|
||||
// recovery can only be finalized after we release the current primaryOperationLock
|
||||
assertTrue(shard.isPrimaryMode());
|
||||
assertFalse(shard.isRelocatedPrimary());
|
||||
}
|
||||
// recovery can be now finalized
|
||||
recoveryThread.join();
|
||||
assertFalse(shard.isPrimaryMode());
|
||||
assertTrue(shard.isRelocatedPrimary());
|
||||
try (Releasable ignored = acquirePrimaryOperationPermitBlockingly(shard)) {
|
||||
// lock can again be acquired
|
||||
assertFalse(shard.isPrimaryMode());
|
||||
assertTrue(shard.isRelocatedPrimary());
|
||||
}
|
||||
|
||||
closeShards(shard);
|
||||
|
@ -1465,7 +1470,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
|
||||
public void testStressRelocated() throws Exception {
|
||||
final IndexShard shard = newStartedShard(true);
|
||||
assertTrue(shard.isPrimaryMode());
|
||||
assertFalse(shard.isRelocatedPrimary());
|
||||
IndexShardTestCase.updateRoutingEntry(shard, ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"));
|
||||
final int numThreads = randomIntBetween(2, 4);
|
||||
Thread[] indexThreads = new Thread[numThreads];
|
||||
|
@ -1501,14 +1506,14 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
assertThat(relocated.get(), equalTo(false));
|
||||
assertThat(shard.getActiveOperationsCount(), greaterThan(0));
|
||||
// ensure we only transition after pending operations completed
|
||||
assertTrue(shard.isPrimaryMode());
|
||||
assertFalse(shard.isRelocatedPrimary());
|
||||
// complete pending operations
|
||||
barrier.await();
|
||||
// complete recovery/relocation
|
||||
recoveryThread.join();
|
||||
// ensure relocated successfully once pending operations are done
|
||||
assertThat(relocated.get(), equalTo(true));
|
||||
assertFalse(shard.isPrimaryMode());
|
||||
assertTrue(shard.isRelocatedPrimary());
|
||||
assertThat(shard.getActiveOperationsCount(), equalTo(0));
|
||||
|
||||
for (Thread indexThread : indexThreads) {
|
||||
|
@ -1572,7 +1577,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
cyclicBarrier.await();
|
||||
relocationThread.join();
|
||||
cancellingThread.join();
|
||||
if (shard.isPrimaryMode() == false) {
|
||||
if (shard.isRelocatedPrimary()) {
|
||||
logger.debug("shard was relocated successfully");
|
||||
assertThat(cancellingException.get(), instanceOf(IllegalIndexShardStateException.class));
|
||||
assertThat("current routing:" + shard.routingEntry(), shard.routingEntry().relocating(), equalTo(true));
|
||||
|
@ -1719,7 +1724,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
while ((operation = snapshot.next()) != null) {
|
||||
if (operation.opType() == Translog.Operation.Type.NO_OP) {
|
||||
numNoops++;
|
||||
assertEquals(newShard.getPrimaryTerm(), operation.primaryTerm());
|
||||
assertEquals(newShard.getPendingPrimaryTerm(), operation.primaryTerm());
|
||||
assertEquals(0, operation.seqNo());
|
||||
}
|
||||
}
|
||||
|
@ -1826,7 +1831,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
flushShard(shard);
|
||||
assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1"));
|
||||
// Simulate resync (without rollback): Noop #1, index #2
|
||||
acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1);
|
||||
acquireReplicaOperationPermitBlockingly(shard, shard.pendingPrimaryTerm + 1);
|
||||
shard.markSeqNoAsNoop(1, "test");
|
||||
shard.applyIndexOperationOnReplica(2, 1, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
|
||||
SourceToParse.source(indexName, "_doc", "doc-2", new BytesArray("{}"), XContentType.JSON));
|
||||
|
@ -1837,7 +1842,8 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
IndexShard newShard = reinitShard(shard,
|
||||
newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), true, ShardRoutingState.INITIALIZING,
|
||||
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE));
|
||||
newShard.primaryTerm++;
|
||||
newShard.pendingPrimaryTerm++;
|
||||
newShard.operationPrimaryTerm++;
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
|
||||
assertTrue(newShard.recoverFromStore());
|
||||
|
@ -1852,7 +1858,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
ShardRouting inRecoveryRouting = ShardRoutingHelper.relocate(origRouting, "some_node");
|
||||
IndexShardTestCase.updateRoutingEntry(shard, inRecoveryRouting);
|
||||
shard.relocated(primaryContext -> {});
|
||||
assertFalse(shard.isPrimaryMode());
|
||||
assertTrue(shard.isRelocatedPrimary());
|
||||
try {
|
||||
IndexShardTestCase.updateRoutingEntry(shard, origRouting);
|
||||
fail("Expected IndexShardRelocatedException");
|
||||
|
@ -2160,11 +2166,11 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
int numCorruptEntries = 0;
|
||||
for (int i = 0; i < numTotalEntries; i++) {
|
||||
if (randomBoolean()) {
|
||||
operations.add(new Translog.Index("_doc", "1", 0, primary.getPrimaryTerm(), 1,
|
||||
operations.add(new Translog.Index("_doc", "1", 0, primary.getPendingPrimaryTerm(), 1,
|
||||
"{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1));
|
||||
} else {
|
||||
// corrupt entry
|
||||
operations.add(new Translog.Index("_doc", "2", 1, primary.getPrimaryTerm(), 1,
|
||||
operations.add(new Translog.Index("_doc", "2", 1, primary.getPendingPrimaryTerm(), 1,
|
||||
"{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1));
|
||||
numCorruptEntries++;
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ public class IndexingOperationListenerTests extends ESTestCase{
|
|||
ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null);
|
||||
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong());
|
||||
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc);
|
||||
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true));
|
||||
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, true));
|
||||
assertEquals(0, preIndex.get());
|
||||
assertEquals(0, postIndex.get());
|
||||
assertEquals(0, postIndexException.get());
|
||||
|
@ -172,7 +172,7 @@ public class IndexingOperationListenerTests extends ESTestCase{
|
|||
assertEquals(2, postDelete.get());
|
||||
assertEquals(2, postDeleteException.get());
|
||||
|
||||
compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, SequenceNumbers.UNASSIGNED_SEQ_NO, false));
|
||||
compositeListener.postIndex(randomShardId, index, new Engine.IndexResult(0, 0, SequenceNumbers.UNASSIGNED_SEQ_NO, false));
|
||||
assertEquals(0, preIndex.get());
|
||||
assertEquals(2, postIndex.get());
|
||||
assertEquals(0, postIndexException.get());
|
||||
|
|
|
@ -83,7 +83,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
|
|||
boolean syncNeeded = numDocs > 0;
|
||||
|
||||
String allocationId = shard.routingEntry().allocationId().getId();
|
||||
shard.updateShardState(shard.routingEntry(), shard.getPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
||||
shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
||||
new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet());
|
||||
shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint);
|
||||
assertEquals(globalCheckPoint, shard.getGlobalCheckpoint());
|
||||
|
@ -142,7 +142,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase {
|
|||
}
|
||||
|
||||
String allocationId = shard.routingEntry().allocationId().getId();
|
||||
shard.updateShardState(shard.routingEntry(), shard.getPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
||||
shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId),
|
||||
new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet());
|
||||
|
||||
CountDownLatch syncCalledLatch = new CountDownLatch(1);
|
||||
|
|
|
@ -2669,7 +2669,7 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm,
|
||||
1, VersionType.INTERNAL, Origin.PRIMARY, 0, 0, false);
|
||||
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomSeqNum, true);
|
||||
Engine.IndexResult eIndexResult = new Engine.IndexResult(1, randomPrimaryTerm, randomSeqNum, true);
|
||||
Translog.Index index = new Translog.Index(eIndex, eIndexResult);
|
||||
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
|
@ -2680,7 +2680,7 @@ public class TranslogTests extends ESTestCase {
|
|||
|
||||
Engine.Delete eDelete = new Engine.Delete(doc.type(), doc.id(), newUid(doc), randomSeqNum, randomPrimaryTerm,
|
||||
2, VersionType.INTERNAL, Origin.PRIMARY, 0);
|
||||
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomSeqNum, true);
|
||||
Engine.DeleteResult eDeleteResult = new Engine.DeleteResult(2, randomPrimaryTerm, randomSeqNum, true);
|
||||
Translog.Delete delete = new Translog.Delete(eDelete, eDeleteResult);
|
||||
|
||||
out = new BytesStreamOutput();
|
||||
|
|
|
@ -179,12 +179,12 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
final int initialNumberOfDocs = randomIntBetween(16, 64);
|
||||
for (int i = 0; i < initialNumberOfDocs; i++) {
|
||||
final Engine.Index index = getIndex(Integer.toString(i));
|
||||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)));
|
||||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)));
|
||||
}
|
||||
final int numberOfDocsWithValidSequenceNumbers = randomIntBetween(16, 64);
|
||||
for (int i = initialNumberOfDocs; i < initialNumberOfDocs + numberOfDocsWithValidSequenceNumbers; i++) {
|
||||
final Engine.Index index = getIndex(Integer.toString(i));
|
||||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, i - initialNumberOfDocs, true)));
|
||||
operations.add(new Translog.Index(index, new Engine.IndexResult(1, 1, i - initialNumberOfDocs, true)));
|
||||
}
|
||||
operations.add(null);
|
||||
final long startingSeqNo = randomIntBetween(0, numberOfDocsWithValidSequenceNumbers - 1);
|
||||
|
@ -395,7 +395,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
final IndexShard shard = mock(IndexShard.class);
|
||||
when(shard.seqNoStats()).thenReturn(mock(SeqNoStats.class));
|
||||
when(shard.segmentStats(anyBoolean())).thenReturn(mock(SegmentsStats.class));
|
||||
when(shard.isPrimaryMode()).thenReturn(false);
|
||||
when(shard.isRelocatedPrimary()).thenReturn(true);
|
||||
when(shard.acquireSafeIndexCommit()).thenReturn(mock(Engine.IndexCommitRef.class));
|
||||
doAnswer(invocation -> {
|
||||
((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> {});
|
||||
|
@ -444,7 +444,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||
final IndexShard shard = mock(IndexShard.class);
|
||||
final AtomicBoolean freed = new AtomicBoolean(true);
|
||||
when(shard.isPrimaryMode()).thenReturn(true);
|
||||
when(shard.isRelocatedPrimary()).thenReturn(false);
|
||||
doAnswer(invocation -> {
|
||||
freed.set(false);
|
||||
((ActionListener<Releasable>)invocation.getArguments()[0]).onResponse(() -> freed.set(true));
|
||||
|
|
|
@ -201,7 +201,7 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
|
|||
if (randomBoolean()) {
|
||||
// create a new translog
|
||||
translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs,
|
||||
replica.shardId(), replica.getPrimaryTerm());
|
||||
replica.shardId(), replica.getPendingPrimaryTerm());
|
||||
translogGenToUse = 1;
|
||||
} else {
|
||||
translogUUIDtoUse = translogGeneration.translogUUID;
|
||||
|
|
|
@ -35,8 +35,10 @@ import org.elasticsearch.search.sort.SortOrder;
|
|||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.search.profile.query.RandomQueryGenerator.randomQueryBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -105,7 +107,6 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||
* search for each query. It then does some basic sanity checking of score and hits
|
||||
* to make sure the profiling doesn't interfere with the hits being returned
|
||||
*/
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32492")
|
||||
public void testProfileMatchesRegular() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
@ -150,6 +151,10 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||
SearchResponse vanillaResponse = responses[0].getResponse();
|
||||
SearchResponse profileResponse = responses[1].getResponse();
|
||||
|
||||
assertThat(vanillaResponse.getFailedShards(), equalTo(0));
|
||||
assertThat(profileResponse.getFailedShards(), equalTo(0));
|
||||
assertThat(vanillaResponse.getSuccessfulShards(), equalTo(profileResponse.getSuccessfulShards()));
|
||||
|
||||
float vanillaMaxScore = vanillaResponse.getHits().getMaxScore();
|
||||
float profileMaxScore = profileResponse.getHits().getMaxScore();
|
||||
if (Float.isNaN(vanillaMaxScore)) {
|
||||
|
@ -160,10 +165,19 @@ public class QueryProfilerIT extends ESIntegTestCase {
|
|||
vanillaMaxScore, profileMaxScore, 0.001);
|
||||
}
|
||||
|
||||
assertThat(
|
||||
"Profile totalHits of [" + profileResponse.getHits().getTotalHits() + "] is not close to Vanilla totalHits ["
|
||||
+ vanillaResponse.getHits().getTotalHits() + "]",
|
||||
vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits()));
|
||||
if (vanillaResponse.getHits().totalHits != profileResponse.getHits().totalHits) {
|
||||
Set<SearchHit> vanillaSet = new HashSet<>(Arrays.asList(vanillaResponse.getHits().getHits()));
|
||||
Set<SearchHit> profileSet = new HashSet<>(Arrays.asList(profileResponse.getHits().getHits()));
|
||||
if (vanillaResponse.getHits().totalHits > profileResponse.getHits().totalHits) {
|
||||
vanillaSet.removeAll(profileSet);
|
||||
fail("Vanilla hits were larger than profile hits. Non-overlapping elements were: "
|
||||
+ vanillaSet.toString());
|
||||
} else {
|
||||
profileSet.removeAll(vanillaSet);
|
||||
fail("Profile hits were larger than vanilla hits. Non-overlapping elements were: "
|
||||
+ profileSet.toString());
|
||||
}
|
||||
}
|
||||
|
||||
SearchHit[] vanillaHits = vanillaResponse.getHits().getHits();
|
||||
SearchHit[] profileHits = profileResponse.getHits().getHits();
|
||||
|
|
|
@ -84,6 +84,7 @@ import java.util.concurrent.CopyOnWriteArrayList;
|
|||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.FutureTask;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
@ -233,7 +234,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
activeIds.add(primary.routingEntry().allocationId().getId());
|
||||
ShardRouting startedRoutingEntry = ShardRoutingHelper.moveToStarted(primary.routingEntry());
|
||||
IndexShardRoutingTable routingTable = routingTable(shr -> shr == primary.routingEntry() ? startedRoutingEntry : shr);
|
||||
primary.updateShardState(startedRoutingEntry, primary.getPrimaryTerm(), null,
|
||||
primary.updateShardState(startedRoutingEntry, primary.getPendingPrimaryTerm(), null,
|
||||
currentClusterStateVersion.incrementAndGet(), activeIds, routingTable, Collections.emptySet());
|
||||
for (final IndexShard replica : replicas) {
|
||||
recoverReplica(replica);
|
||||
|
@ -279,20 +280,10 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
/**
|
||||
* promotes the specific replica as the new primary
|
||||
*/
|
||||
public synchronized Future<PrimaryReplicaSyncer.ResyncTask> promoteReplicaToPrimary(IndexShard replica) throws IOException {
|
||||
final long newTerm = indexMetaData.primaryTerm(shardId.id()) + 1;
|
||||
IndexMetaData.Builder newMetaData = IndexMetaData.builder(indexMetaData).primaryTerm(shardId.id(), newTerm);
|
||||
indexMetaData = newMetaData.build();
|
||||
assertTrue(replicas.remove(replica));
|
||||
closeShards(primary);
|
||||
primary = replica;
|
||||
assert primary.routingEntry().active() : "only active replicas can be promoted to primary: " + primary.routingEntry();
|
||||
public Future<PrimaryReplicaSyncer.ResyncTask> promoteReplicaToPrimary(IndexShard replica) throws IOException {
|
||||
PlainActionFuture<PrimaryReplicaSyncer.ResyncTask> fut = new PlainActionFuture<>();
|
||||
ShardRouting primaryRouting = replica.routingEntry().moveActiveReplicaToPrimary();
|
||||
IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr);
|
||||
|
||||
primary.updateShardState(primaryRouting,
|
||||
newTerm, (shard, listener) -> primaryReplicaSyncer.resync(shard,
|
||||
promoteReplicaToPrimary(replica,
|
||||
(shard, listener) -> primaryReplicaSyncer.resync(shard,
|
||||
new ActionListener<PrimaryReplicaSyncer.ResyncTask>() {
|
||||
@Override
|
||||
public void onResponse(PrimaryReplicaSyncer.ResyncTask resyncTask) {
|
||||
|
@ -305,11 +296,27 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
listener.onFailure(e);
|
||||
fut.onFailure(e);
|
||||
}
|
||||
}), currentClusterStateVersion.incrementAndGet(), activeIds(), routingTable, Collections.emptySet());
|
||||
|
||||
}));
|
||||
return fut;
|
||||
}
|
||||
|
||||
public synchronized void promoteReplicaToPrimary(IndexShard replica,
|
||||
BiConsumer<IndexShard, ActionListener<PrimaryReplicaSyncer.ResyncTask>> primaryReplicaSyncer)
|
||||
throws IOException {
|
||||
final long newTerm = indexMetaData.primaryTerm(shardId.id()) + 1;
|
||||
IndexMetaData.Builder newMetaData = IndexMetaData.builder(indexMetaData).primaryTerm(shardId.id(), newTerm);
|
||||
indexMetaData = newMetaData.build();
|
||||
assertTrue(replicas.remove(replica));
|
||||
closeShards(primary);
|
||||
primary = replica;
|
||||
assert primary.routingEntry().active() : "only active replicas can be promoted to primary: " + primary.routingEntry();
|
||||
ShardRouting primaryRouting = replica.routingEntry().moveActiveReplicaToPrimary();
|
||||
IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr);
|
||||
|
||||
primary.updateShardState(primaryRouting, newTerm, primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(),
|
||||
activeIds(), routingTable, Collections.emptySet());
|
||||
}
|
||||
|
||||
private synchronized Set<String> activeIds() {
|
||||
return shardRoutings().stream()
|
||||
.filter(ShardRouting::active).map(ShardRouting::allocationId).map(AllocationId::getId).collect(Collectors.toSet());
|
||||
|
@ -425,7 +432,8 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
|
||||
private void updateAllocationIDsOnPrimary() throws IOException {
|
||||
|
||||
primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
||||
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
|
||||
currentClusterStateVersion.incrementAndGet(),
|
||||
activeIds(), routingTable(Function.identity()), Collections.emptySet());
|
||||
}
|
||||
}
|
||||
|
@ -527,7 +535,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
IndexShard replica = replicationGroup.replicas.stream()
|
||||
.filter(s -> replicaRouting.isSameAllocation(s.routingEntry())).findFirst().get();
|
||||
replica.acquireReplicaOperationPermit(
|
||||
replicationGroup.primary.getPrimaryTerm(),
|
||||
replicationGroup.primary.getPendingPrimaryTerm(),
|
||||
globalCheckpoint,
|
||||
new ActionListener<Releasable>() {
|
||||
@Override
|
||||
|
@ -605,7 +613,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
|
||||
@Override
|
||||
protected void performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
||||
executeShardBulkOnReplica(request, replica, getPrimaryShard().getPrimaryTerm(), getPrimaryShard().getGlobalCheckpoint());
|
||||
executeShardBulkOnReplica(request, replica, getPrimaryShard().getPendingPrimaryTerm(), getPrimaryShard().getGlobalCheckpoint());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -664,14 +672,18 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
|
|||
* indexes the given requests on the supplied replica shard
|
||||
*/
|
||||
void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
|
||||
executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint());
|
||||
indexOnReplica(request, group, replica, group.primary.getPendingPrimaryTerm());
|
||||
}
|
||||
|
||||
void indexOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica, long term) throws Exception {
|
||||
executeShardBulkOnReplica(request, replica, term, group.primary.getGlobalCheckpoint());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes the delete request on the given replica shard.
|
||||
*/
|
||||
void deleteOnReplica(BulkShardRequest request, ReplicationGroup group, IndexShard replica) throws Exception {
|
||||
executeShardBulkOnReplica(request, replica, group.primary.getPrimaryTerm(), group.primary.getGlobalCheckpoint());
|
||||
executeShardBulkOnReplica(request, replica, group.primary.getPendingPrimaryTerm(), group.primary.getGlobalCheckpoint());
|
||||
}
|
||||
|
||||
class GlobalCheckpointSync extends ReplicationAction<
|
||||
|
|
|
@ -425,7 +425,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
IndexShardRoutingTable newRoutingTable = new IndexShardRoutingTable.Builder(shardRouting.shardId())
|
||||
.addShard(shardRouting)
|
||||
.build();
|
||||
shard.updateShardState(shardRouting, shard.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
||||
shard.updateShardState(shardRouting, shard.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
||||
inSyncIds, newRoutingTable, Collections.emptySet());
|
||||
}
|
||||
|
||||
|
@ -514,8 +514,8 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
request,
|
||||
(int) ByteSizeUnit.MB.toBytes(1),
|
||||
Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), pNode.getName()).build());
|
||||
primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
||||
inSyncIds, routingTable, Collections.emptySet());
|
||||
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
|
||||
currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet());
|
||||
recovery.recoverToTarget();
|
||||
recoveryTarget.markAsDone();
|
||||
}
|
||||
|
@ -536,9 +536,9 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
Set<String> inSyncIdsWithReplica = new HashSet<>(inSyncIds);
|
||||
inSyncIdsWithReplica.add(replica.routingEntry().allocationId().getId());
|
||||
// update both primary and replica shard state
|
||||
primary.updateShardState(primary.routingEntry(), primary.getPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(),
|
||||
inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
||||
replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPrimaryTerm(), null,
|
||||
primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null,
|
||||
currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
||||
replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPendingPrimaryTerm(), null,
|
||||
currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet());
|
||||
}
|
||||
|
||||
|
@ -560,7 +560,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
.removeShard(replica.routingEntry())
|
||||
.addShard(routingEntry)
|
||||
.build();
|
||||
replica.updateShardState(routingEntry, replica.getPrimaryTerm() + 1,
|
||||
replica.updateShardState(routingEntry, replica.getPendingPrimaryTerm() + 1,
|
||||
(is, listener) ->
|
||||
listener.onResponse(new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())),
|
||||
currentClusterStateVersion.incrementAndGet(),
|
||||
|
|
|
@ -12,9 +12,10 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentFragment;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.max.MaxAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder;
|
||||
|
@ -32,6 +33,8 @@ import java.util.Map;
|
|||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* The configuration object for the metrics portion of a rollup job config
|
||||
*
|
||||
|
@ -48,14 +51,7 @@ import java.util.stream.Collectors;
|
|||
* ]
|
||||
* }
|
||||
*/
|
||||
public class MetricConfig implements Writeable, ToXContentFragment {
|
||||
private static final String NAME = "metric_config";
|
||||
|
||||
private String field;
|
||||
private List<String> metrics;
|
||||
|
||||
private static final ParseField FIELD = new ParseField("field");
|
||||
private static final ParseField METRICS = new ParseField("metrics");
|
||||
public class MetricConfig implements Writeable, ToXContentObject {
|
||||
|
||||
// TODO: replace these with an enum
|
||||
private static final ParseField MIN = new ParseField("min");
|
||||
|
@ -64,27 +60,54 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
|||
private static final ParseField AVG = new ParseField("avg");
|
||||
private static final ParseField VALUE_COUNT = new ParseField("value_count");
|
||||
|
||||
public static final ObjectParser<MetricConfig.Builder, Void> PARSER = new ObjectParser<>(NAME, MetricConfig.Builder::new);
|
||||
|
||||
private static final String NAME = "metrics";
|
||||
private static final String FIELD = "field";
|
||||
private static final String METRICS = "metrics";
|
||||
private static final ConstructingObjectParser<MetricConfig, Void> PARSER;
|
||||
static {
|
||||
PARSER.declareString(MetricConfig.Builder::setField, FIELD);
|
||||
PARSER.declareStringArray(MetricConfig.Builder::setMetrics, METRICS);
|
||||
PARSER = new ConstructingObjectParser<>(NAME, args -> {
|
||||
@SuppressWarnings("unchecked") List<String> metrics = (List<String>) args[1];
|
||||
return new MetricConfig((String) args[0], metrics);
|
||||
});
|
||||
PARSER.declareString(constructorArg(), new ParseField(FIELD));
|
||||
PARSER.declareStringArray(constructorArg(), new ParseField(METRICS));
|
||||
}
|
||||
|
||||
MetricConfig(String name, List<String> metrics) {
|
||||
this.field = name;
|
||||
private final String field;
|
||||
private final List<String> metrics;
|
||||
|
||||
public MetricConfig(final String field, final List<String> metrics) {
|
||||
if (field == null || field.isEmpty()) {
|
||||
throw new IllegalArgumentException("Field must be a non-null, non-empty string");
|
||||
}
|
||||
if (metrics == null || metrics.isEmpty()) {
|
||||
throw new IllegalArgumentException("Metrics must be a non-null, non-empty array of strings");
|
||||
}
|
||||
metrics.forEach(m -> {
|
||||
if (RollupField.SUPPORTED_METRICS.contains(m) == false) {
|
||||
throw new IllegalArgumentException("Unsupported metric [" + m + "]. " +
|
||||
"Supported metrics include: " + RollupField.SUPPORTED_METRICS);
|
||||
}
|
||||
});
|
||||
this.field = field;
|
||||
this.metrics = metrics;
|
||||
}
|
||||
|
||||
MetricConfig(StreamInput in) throws IOException {
|
||||
MetricConfig(final StreamInput in) throws IOException {
|
||||
field = in.readString();
|
||||
metrics = in.readList(StreamInput::readString);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name of the field used in the metric configuration. Never {@code null}.
|
||||
*/
|
||||
public String getField() {
|
||||
return field;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the names of the metrics used in the metric configuration. Never {@code null}.
|
||||
*/
|
||||
public List<String> getMetrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
@ -159,10 +182,13 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
|||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field(FIELD.getPreferredName(), field);
|
||||
builder.field(METRICS.getPreferredName(), metrics);
|
||||
return builder;
|
||||
public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field(FIELD, field);
|
||||
builder.field(METRICS, metrics);
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -172,19 +198,16 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
public boolean equals(final Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
MetricConfig that = (MetricConfig) other;
|
||||
|
||||
return Objects.equals(this.field, that.field)
|
||||
&& Objects.equals(this.metrics, that.metrics);
|
||||
final MetricConfig that = (MetricConfig) other;
|
||||
return Objects.equals(field, that.field) && Objects.equals(metrics, that.metrics);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -197,52 +220,7 @@ public class MetricConfig implements Writeable, ToXContentFragment {
|
|||
return Strings.toString(this, true, true);
|
||||
}
|
||||
|
||||
|
||||
public static class Builder {
|
||||
private String field;
|
||||
private List<String> metrics;
|
||||
|
||||
public Builder() {
|
||||
}
|
||||
|
||||
public Builder(MetricConfig config) {
|
||||
this.field = config.getField();
|
||||
this.metrics = config.getMetrics();
|
||||
}
|
||||
|
||||
public String getField() {
|
||||
return field;
|
||||
}
|
||||
|
||||
public MetricConfig.Builder setField(String field) {
|
||||
this.field = field;
|
||||
return this;
|
||||
}
|
||||
|
||||
public List<String> getMetrics() {
|
||||
return metrics;
|
||||
}
|
||||
|
||||
public MetricConfig.Builder setMetrics(List<String> metrics) {
|
||||
this.metrics = metrics;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MetricConfig build() {
|
||||
if (Strings.isNullOrEmpty(field) == true) {
|
||||
throw new IllegalArgumentException("Parameter [" + FIELD.getPreferredName() + "] must be a non-null, non-empty string.");
|
||||
}
|
||||
if (metrics == null || metrics.isEmpty()) {
|
||||
throw new IllegalArgumentException("Parameter [" + METRICS.getPreferredName()
|
||||
+ "] must be a non-null, non-empty array of strings.");
|
||||
}
|
||||
metrics.forEach(m -> {
|
||||
if (RollupField.SUPPORTED_METRICS.contains(m) == false) {
|
||||
throw new IllegalArgumentException("Unsupported metric [" + m + "]. " +
|
||||
"Supported metrics include: " + RollupField.SUPPORTED_METRICS);
|
||||
}
|
||||
});
|
||||
return new MetricConfig(field, metrics);
|
||||
}
|
||||
public static MetricConfig fromXContent(final XContentParser parser) throws IOException {
|
||||
return PARSER.parse(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject {
|
|||
static {
|
||||
PARSER.declareString(RollupJobConfig.Builder::setId, RollupField.ID);
|
||||
PARSER.declareObject(RollupJobConfig.Builder::setGroupConfig, (p, c) -> GroupConfig.PARSER.apply(p,c).build(), GROUPS);
|
||||
PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, (p, c) -> MetricConfig.PARSER.apply(p, c).build(), METRICS);
|
||||
PARSER.declareObjectArray(RollupJobConfig.Builder::setMetricsConfig, (p, c) -> MetricConfig.fromXContent(p), METRICS);
|
||||
PARSER.declareString((params, val) ->
|
||||
params.setTimeout(TimeValue.parseTimeValue(val, TIMEOUT.getPreferredName())), TIMEOUT);
|
||||
PARSER.declareString(RollupJobConfig.Builder::setIndexPattern, INDEX_PATTERN);
|
||||
|
@ -160,10 +160,8 @@ public class RollupJobConfig implements NamedWriteable, ToXContentObject {
|
|||
}
|
||||
if (metricsConfig != null) {
|
||||
builder.startArray(METRICS.getPreferredName());
|
||||
for (MetricConfig config : metricsConfig) {
|
||||
builder.startObject();
|
||||
config.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
for (MetricConfig metric : metricsConfig) {
|
||||
metric.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext.StoredContext;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
|
|
@ -10,7 +10,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.xpack.core.security.authc.Authentication;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.core.security.authc.support.Hasher;
|
||||
import org.elasticsearch.xpack.core.security.support.Validation;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
|
|
|
@ -20,7 +20,7 @@ import org.elasticsearch.common.xcontent.XContentParser.Token;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.xpack.core.security.authc.support.Hasher;
|
||||
import org.elasticsearch.xpack.core.security.support.Validation;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
|
@ -12,7 +12,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
|||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.xpack.core.security.user.InternalUserSerializationHelper;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Base64;
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
package org.elasticsearch.xpack.core.security.authc;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.xpack.core.XPackField;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivilege;
|
||||
import org.elasticsearch.xpack.core.security.authz.privilege.ConditionalClusterPrivileges;
|
||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
import org.elasticsearch.xpack.core.security.support.Validation;
|
||||
import org.elasticsearch.xpack.core.security.xcontent.XContentUtils;
|
||||
|
||||
|
@ -163,7 +162,7 @@ public class RoleDescriptor implements ToXContentObject {
|
|||
}
|
||||
sb.append("], runAs=[").append(Strings.arrayToCommaDelimitedString(runAs));
|
||||
sb.append("], metadata=[");
|
||||
MetadataUtils.writeValue(sb, metadata);
|
||||
sb.append(metadata);
|
||||
sb.append("]]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication;
|
|||
import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
||||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
||||
import org.elasticsearch.xpack.core.security.support.Exceptions;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -5,8 +5,6 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.core.security.support;
|
||||
|
||||
import java.lang.reflect.Array;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -19,50 +17,6 @@ public class MetadataUtils {
|
|||
private MetadataUtils() {
|
||||
}
|
||||
|
||||
public static void writeValue(StringBuilder sb, Object object) {
|
||||
if (object == null) {
|
||||
sb.append(object);
|
||||
} else if (object instanceof Map) {
|
||||
sb.append("{");
|
||||
for (Map.Entry<String, Object> entry : ((Map<String, Object>) object).entrySet()) {
|
||||
sb.append(entry.getKey()).append("=");
|
||||
writeValue(sb, entry.getValue());
|
||||
}
|
||||
sb.append("}");
|
||||
|
||||
} else if (object instanceof Collection) {
|
||||
sb.append("[");
|
||||
boolean first = true;
|
||||
for (Object item : (Collection) object) {
|
||||
if (!first) {
|
||||
sb.append(",");
|
||||
}
|
||||
writeValue(sb, item);
|
||||
first = false;
|
||||
}
|
||||
sb.append("]");
|
||||
} else if (object.getClass().isArray()) {
|
||||
sb.append("[");
|
||||
for (int i = 0; i < Array.getLength(object); i++) {
|
||||
if (i != 0) {
|
||||
sb.append(",");
|
||||
}
|
||||
writeValue(sb, Array.get(object, i));
|
||||
}
|
||||
sb.append("]");
|
||||
} else {
|
||||
sb.append(object);
|
||||
}
|
||||
}
|
||||
|
||||
public static void verifyNoReservedMetadata(Map<String, Object> metadata) {
|
||||
for (String key : metadata.keySet()) {
|
||||
if (key.startsWith(RESERVED_PREFIX)) {
|
||||
throw new IllegalArgumentException("invalid user metadata. [" + key + "] is a reserved for internal use");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean containsReservedMetadata(Map<String, Object> metadata) {
|
||||
for (String key : metadata.keySet()) {
|
||||
if (key.startsWith(RESERVED_PREFIX)) {
|
||||
|
|
|
@ -9,6 +9,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
|
||||
import java.util.Collections;
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.user;
|
|||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.support.MetadataUtils;
|
||||
|
||||
/**
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.authz.privilege.SystemPrivilege;
|
||||
|
||||
import java.util.function.Predicate;
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
|
||||
/**
|
||||
* internal user that manages xpack security. Has all cluster/indices permissions.
|
||||
*/
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
package org.elasticsearch.xpack.core.security.user;
|
||||
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.Role;
|
||||
import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField;
|
||||
|
|
|
@ -17,6 +17,7 @@ import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig;
|
|||
import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -38,11 +39,7 @@ public class ConfigTestHelpers {
|
|||
builder.setGroupConfig(ConfigTestHelpers.getGroupConfig().build());
|
||||
builder.setPageSize(ESTestCase.randomIntBetween(1,10));
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
List<MetricConfig> metrics = IntStream.range(1, ESTestCase.randomIntBetween(1,10))
|
||||
.mapToObj(n -> ConfigTestHelpers.getMetricConfig().build())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
builder.setMetricsConfig(metrics);
|
||||
builder.setMetricsConfig(randomMetricsConfigs(ESTestCase.random()));
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
@ -59,32 +56,6 @@ public class ConfigTestHelpers {
|
|||
return groupBuilder;
|
||||
}
|
||||
|
||||
public static MetricConfig.Builder getMetricConfig() {
|
||||
MetricConfig.Builder builder = new MetricConfig.Builder();
|
||||
builder.setField(ESTestCase.randomAlphaOfLength(15)); // large names so we don't accidentally collide
|
||||
List<String> metrics = new ArrayList<>();
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
metrics.add("min");
|
||||
}
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
metrics.add("max");
|
||||
}
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
metrics.add("sum");
|
||||
}
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
metrics.add("avg");
|
||||
}
|
||||
if (ESTestCase.randomBoolean()) {
|
||||
metrics.add("value_count");
|
||||
}
|
||||
if (metrics.size() == 0) {
|
||||
metrics.add("min");
|
||||
}
|
||||
builder.setMetrics(metrics);
|
||||
return builder;
|
||||
}
|
||||
|
||||
private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m"};
|
||||
public static String randomPositiveTimeValue() {
|
||||
return ESTestCase.randomIntBetween(1, 1000) + ESTestCase.randomFrom(TIME_SUFFIXES);
|
||||
|
@ -123,6 +94,39 @@ public class ConfigTestHelpers {
|
|||
return new HistogramGroupConfig(randomInterval(random), randomFields(random));
|
||||
}
|
||||
|
||||
public static List<MetricConfig> randomMetricsConfigs(final Random random) {
|
||||
final int numMetrics = randomIntBetween(random, 1, 10);
|
||||
final List<MetricConfig> metrics = new ArrayList<>(numMetrics);
|
||||
for (int i = 0; i < numMetrics; i++) {
|
||||
metrics.add(randomMetricConfig(random));
|
||||
}
|
||||
return Collections.unmodifiableList(metrics);
|
||||
}
|
||||
|
||||
public static MetricConfig randomMetricConfig(final Random random) {
|
||||
final String field = randomAsciiAlphanumOfLengthBetween(random, 15, 25); // large names so we don't accidentally collide
|
||||
final List<String> metrics = new ArrayList<>();
|
||||
if (random.nextBoolean()) {
|
||||
metrics.add("min");
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
metrics.add("max");
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
metrics.add("sum");
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
metrics.add("avg");
|
||||
}
|
||||
if (random.nextBoolean()) {
|
||||
metrics.add("value_count");
|
||||
}
|
||||
if (metrics.size() == 0) {
|
||||
metrics.add("min");
|
||||
}
|
||||
return new MetricConfig(field, Collections.unmodifiableList(metrics));
|
||||
}
|
||||
|
||||
public static TermsGroupConfig randomTermsGroupConfig(final Random random) {
|
||||
return new TermsGroupConfig(randomFields(random));
|
||||
}
|
||||
|
|
|
@ -17,14 +17,16 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<MetricConfig> {
|
||||
public class MetricConfigSerializingTests extends AbstractSerializingTestCase<MetricConfig> {
|
||||
|
||||
@Override
|
||||
protected MetricConfig doParseInstance(XContentParser parser) throws IOException {
|
||||
return MetricConfig.PARSER.apply(parser, null).build();
|
||||
protected MetricConfig doParseInstance(final XContentParser parser) throws IOException {
|
||||
return MetricConfig.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -34,24 +36,20 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
|||
|
||||
@Override
|
||||
protected MetricConfig createTestInstance() {
|
||||
return ConfigTestHelpers.getMetricConfig().build();
|
||||
return ConfigTestHelpers.randomMetricConfig(random());
|
||||
}
|
||||
|
||||
public void testValidateNoMapping() throws IOException {
|
||||
public void testValidateNoMapping() {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||
|
||||
MetricConfig config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " +
|
||||
"indices matching the index pattern."));
|
||||
}
|
||||
|
||||
public void testValidateNomatchingField() throws IOException {
|
||||
|
||||
public void testValidateNomatchingField() {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||
|
||||
|
@ -59,17 +57,13 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
|||
FieldCapabilities fieldCaps = mock(FieldCapabilities.class);
|
||||
responseMap.put("some_other_field", Collections.singletonMap("date", fieldCaps));
|
||||
|
||||
MetricConfig config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().get(0), equalTo("Could not find a [numeric] field with name [my_field] in any of the " +
|
||||
"indices matching the index pattern."));
|
||||
}
|
||||
|
||||
public void testValidateFieldWrongType() throws IOException {
|
||||
|
||||
public void testValidateFieldWrongType() {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||
|
||||
|
@ -77,17 +71,13 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
|||
FieldCapabilities fieldCaps = mock(FieldCapabilities.class);
|
||||
responseMap.put("my_field", Collections.singletonMap("keyword", fieldCaps));
|
||||
|
||||
MetricConfig config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().get(0), equalTo("The field referenced by a metric group must be a [numeric] type, " +
|
||||
"but found [keyword] for field [my_field]"));
|
||||
}
|
||||
|
||||
public void testValidateFieldMatchingNotAggregatable() throws IOException {
|
||||
|
||||
public void testValidateFieldMatchingNotAggregatable() {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||
|
||||
|
@ -96,15 +86,12 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
|||
when(fieldCaps.isAggregatable()).thenReturn(false);
|
||||
responseMap.put("my_field", Collections.singletonMap("long", fieldCaps));
|
||||
|
||||
MetricConfig config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().get(0), equalTo("The field [my_field] must be aggregatable across all indices, but is not."));
|
||||
}
|
||||
|
||||
public void testValidateMatchingField() throws IOException {
|
||||
public void testValidateMatchingField() {
|
||||
ActionRequestValidationException e = new ActionRequestValidationException();
|
||||
Map<String, Map<String, FieldCapabilities>> responseMap = new HashMap<>();
|
||||
|
||||
|
@ -113,10 +100,7 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
|||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("long", fieldCaps));
|
||||
|
||||
MetricConfig config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
MetricConfig config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
|
@ -124,70 +108,49 @@ public class MetricsConfigSerializingTests extends AbstractSerializingTestCase<M
|
|||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("double", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("float", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("short", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("byte", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("half_float", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("scaled_float", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
|
||||
fieldCaps = mock(FieldCapabilities.class);
|
||||
when(fieldCaps.isAggregatable()).thenReturn(true);
|
||||
responseMap.put("my_field", Collections.singletonMap("integer", fieldCaps));
|
||||
config = new MetricConfig.Builder()
|
||||
.setField("my_field")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build();
|
||||
config = new MetricConfig("my_field", singletonList("max"));
|
||||
config.validateMappings(responseMap, e);
|
||||
assertThat(e.validationErrors().size(), equalTo(0));
|
||||
}
|
|
@ -76,7 +76,7 @@ import org.elasticsearch.test.IndexSettingsModule;
|
|||
import org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReader.DocumentSubsetDirectoryReader;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissions;
|
||||
import org.elasticsearch.xpack.core.security.authz.permission.FieldPermissionsDefinition;
|
||||
import org.elasticsearch.xpack.core.security.user.User;
|
||||
import org.elasticsearch.protocol.xpack.security.User;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
@ -442,7 +442,7 @@ public class SecurityIndexSearcherWrapperUnitTests extends ESTestCase {
|
|||
return "rendered_text";
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
when(scriptService.compile(any(Script.class), eq(TemplateScript.CONTEXT))).thenReturn(compiledTemplate);
|
||||
|
||||
XContentBuilder builder = jsonBuilder();
|
||||
|
|
|
@ -302,6 +302,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
|
|||
when(mockNodeResponse.shardsStats()).thenReturn(new ShardStats[]{mockShardStats});
|
||||
|
||||
final ClusterStatsResponse clusterStats = new ClusterStatsResponse(1451606400000L,
|
||||
"_cluster",
|
||||
clusterName,
|
||||
singletonList(mockNodeResponse),
|
||||
emptyList());
|
||||
|
@ -353,6 +354,7 @@ public class ClusterStatsMonitoringDocTests extends BaseMonitoringDocTestCase<Cl
|
|||
+ (needToEnableTLS ? ",\"cluster_needs_tls\":true" : "")
|
||||
+ "},"
|
||||
+ "\"cluster_stats\":{"
|
||||
+ "\"cluster_uuid\":\"_cluster\","
|
||||
+ "\"timestamp\":1451606400000,"
|
||||
+ "\"status\":\"red\","
|
||||
+ "\"indices\":{"
|
||||
|
|
|
@ -26,10 +26,10 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig;
|
|||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class RollupJobIdentifierUtilTests extends ESTestCase {
|
||||
|
@ -103,10 +103,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
||||
job.setGroupConfig(group.build());
|
||||
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
||||
.setField("bar")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build()));
|
||||
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max"))));
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = singletonSet(cap);
|
||||
|
||||
|
@ -168,10 +165,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
GroupConfig.Builder group = ConfigTestHelpers.getGroupConfig();
|
||||
group.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
||||
job.setGroupConfig(group.build());
|
||||
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
||||
.setField("bar")
|
||||
.setMetrics(Collections.singletonList("max"))
|
||||
.build()));
|
||||
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("max"))));
|
||||
RollupJobCaps cap = new RollupJobCaps(job.build());
|
||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||
caps.add(cap);
|
||||
|
@ -180,10 +174,7 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
GroupConfig.Builder group2 = ConfigTestHelpers.getGroupConfig();
|
||||
group2.setDateHisto(new DateHistoGroupConfig.Builder().setField("foo").setInterval(new DateHistogramInterval("1h")).build());
|
||||
job2.setGroupConfig(group.build());
|
||||
job.setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
||||
.setField("bar")
|
||||
.setMetrics(Collections.singletonList("min"))
|
||||
.build()));
|
||||
job.setMetricsConfig(singletonList(new MetricConfig("bar", singletonList("min"))));
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
caps.add(cap2);
|
||||
|
||||
|
@ -331,12 +322,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.build())
|
||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||
.build())
|
||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
||||
.setField("max_field")
|
||||
.setMetrics(Collections.singletonList("max")).build(),
|
||||
new MetricConfig.Builder()
|
||||
.setField("avg_field")
|
||||
.setMetrics(Collections.singletonList("avg")).build()))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
@ -360,12 +347,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.setTimeZone(DateTimeZone.UTC)
|
||||
.build())
|
||||
.build())
|
||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
||||
.setField("max_field")
|
||||
.setMetrics(Collections.singletonList("max")).build(),
|
||||
new MetricConfig.Builder()
|
||||
.setField("avg_field")
|
||||
.setMetrics(Collections.singletonList("avg")).build()))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
@ -412,12 +395,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.setTimeZone(DateTimeZone.UTC)
|
||||
.build())
|
||||
.build())
|
||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
||||
.setField("max_field")
|
||||
.setMetrics(Collections.singletonList("max")).build(),
|
||||
new MetricConfig.Builder()
|
||||
.setField("avg_field")
|
||||
.setMetrics(Collections.singletonList("avg")).build()))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
@ -442,12 +421,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
.build())
|
||||
.setHisto(new HistogramGroupConfig(1L, "baz")) // <-- NOTE right type but wrong name
|
||||
.build())
|
||||
.setMetricsConfig(Arrays.asList(new MetricConfig.Builder()
|
||||
.setField("max_field")
|
||||
.setMetrics(Collections.singletonList("max")).build(),
|
||||
new MetricConfig.Builder()
|
||||
.setField("avg_field")
|
||||
.setMetrics(Collections.singletonList("avg")).build()))
|
||||
.setMetricsConfig(
|
||||
Arrays.asList(new MetricConfig("max_field", singletonList("max")), new MetricConfig("avg_field", singletonList("avg"))))
|
||||
.build();
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(job));
|
||||
|
||||
|
@ -485,9 +460,8 @@ public class RollupJobIdentifierUtilTests extends ESTestCase {
|
|||
int i = ESTestCase.randomIntBetween(0, 3);
|
||||
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
|
||||
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
||||
.setField("foo")
|
||||
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
|
||||
.getRollupJob("foo")
|
||||
.setMetricsConfig(singletonList(new MetricConfig("foo", Arrays.asList("avg", "max", "min", "sum"))))
|
||||
.build()));
|
||||
|
||||
String aggType;
|
||||
|
|
|
@ -45,6 +45,7 @@ import java.util.function.Function;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.xpack.rollup.RollupRequestTranslator.translateAggregation;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.core.IsInstanceOf.instanceOf;
|
||||
|
@ -153,9 +154,8 @@ public class RollupRequestTranslationTests extends ESTestCase {
|
|||
|
||||
public void testUnsupportedMetric() {
|
||||
Set<RollupJobCaps> caps = singletonSet(new RollupJobCaps(ConfigTestHelpers
|
||||
.getRollupJob("foo").setMetricsConfig(Collections.singletonList(new MetricConfig.Builder()
|
||||
.setField("foo")
|
||||
.setMetrics(Arrays.asList("avg", "max", "min", "sum")).build()))
|
||||
.getRollupJob("foo")
|
||||
.setMetricsConfig(singletonList(new MetricConfig("foo", Arrays.asList("avg", "max", "min", "sum"))))
|
||||
.build()));
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
|
@ -384,7 +384,7 @@ public class RollupRequestTranslationTests extends ESTestCase {
|
|||
assertThat(e.getMessage(), equalTo("Unable to translate aggregation tree into Rollup. Aggregation [test_geo] is of type " +
|
||||
"[GeoDistanceAggregationBuilder] which is currently unsupported."));
|
||||
}
|
||||
|
||||
|
||||
private Set<RollupJobCaps> singletonSet(RollupJobCaps cap) {
|
||||
Set<RollupJobCaps> caps = new HashSet<>();
|
||||
caps.add(cap);
|
||||
|
|
|
@ -454,7 +454,7 @@ public class SearchActionTests extends ESTestCase {
|
|||
job2.setGroupConfig(group.build());
|
||||
|
||||
// so that the jobs aren't exactly equal
|
||||
job2.setMetricsConfig(Collections.singletonList(ConfigTestHelpers.getMetricConfig().build()));
|
||||
job2.setMetricsConfig(ConfigTestHelpers.randomMetricsConfigs(random()));
|
||||
RollupJobCaps cap2 = new RollupJobCaps(job2.build());
|
||||
|
||||
Set<RollupJobCaps> caps = new HashSet<>(2);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue