Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
570390ac36
|
@ -1,5 +1,5 @@
|
|||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
elasticsearch = 6.0.0-alpha1
|
||||
elasticsearch = 6.0.0-alpha2
|
||||
lucene = 7.0.0-snapshot-89f6d17
|
||||
|
||||
# optional dependencies
|
||||
|
|
|
@ -87,7 +87,10 @@ public class Version implements Comparable<Version> {
|
|||
public static final int V_6_0_0_alpha1_ID_UNRELEASED = 6000001;
|
||||
public static final Version V_6_0_0_alpha1_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1_UNRELEASED;
|
||||
public static final int V_6_0_0_alpha2_ID_UNRELEASED = 6000002;
|
||||
public static final Version V_6_0_0_alpha2_UNRELEASED =
|
||||
new Version(V_6_0_0_alpha2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha2_UNRELEASED;
|
||||
|
||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||
|
||||
|
@ -102,6 +105,8 @@ public class Version implements Comparable<Version> {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_6_0_0_alpha2_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha2_UNRELEASED;
|
||||
case V_6_0_0_alpha1_ID_UNRELEASED:
|
||||
return V_6_0_0_alpha1_UNRELEASED;
|
||||
case V_5_5_0_ID_UNRELEASED:
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.snapshots.SnapshotInfo.VERBOSE_INTRODUCED;
|
||||
|
||||
/**
|
||||
* Get snapshot request
|
||||
|
@ -43,6 +44,8 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
|
|||
|
||||
private boolean ignoreUnavailable;
|
||||
|
||||
private boolean verbose = true;
|
||||
|
||||
public GetSnapshotsRequest() {
|
||||
}
|
||||
|
||||
|
@ -123,6 +126,7 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
|
|||
this.ignoreUnavailable = ignoreUnavailable;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return Whether snapshots should be ignored when unavailable (corrupt or temporarily not fetchable)
|
||||
*/
|
||||
|
@ -130,12 +134,36 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
|
|||
return ignoreUnavailable;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to {@code false} to only show the snapshot names and the indices they contain.
|
||||
* This is useful when the snapshots belong to a cloud-based repository where each
|
||||
* blob read is a concern (cost wise and performance wise), as the snapshot names and
|
||||
* indices they contain can be retrieved from a single index blob in the repository,
|
||||
* whereas the rest of the information requires reading a snapshot metadata file for
|
||||
* each snapshot requested. Defaults to {@code true}, which returns all information
|
||||
* about each requested snapshot.
|
||||
*/
|
||||
public GetSnapshotsRequest verbose(boolean verbose) {
|
||||
this.verbose = verbose;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns whether the request will return a verbose response.
|
||||
*/
|
||||
public boolean verbose() {
|
||||
return verbose;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
repository = in.readString();
|
||||
snapshots = in.readStringArray();
|
||||
ignoreUnavailable = in.readBoolean();
|
||||
if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) {
|
||||
verbose = in.readBoolean();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -144,5 +172,8 @@ public class GetSnapshotsRequest extends MasterNodeRequest<GetSnapshotsRequest>
|
|||
out.writeString(repository);
|
||||
out.writeStringArray(snapshots);
|
||||
out.writeBoolean(ignoreUnavailable);
|
||||
if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) {
|
||||
out.writeBoolean(verbose);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,4 +96,18 @@ public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilde
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to {@code false} to only show the snapshot names and the indices they contain.
|
||||
* This is useful when the snapshots belong to a cloud-based repository where each
|
||||
* blob read is a concern (cost wise and performance wise), as the snapshot names and
|
||||
* indices they contain can be retrieved from a single index blob in the repository,
|
||||
* whereas the rest of the information requires reading a snapshot metadata file for
|
||||
* each snapshot requested. Defaults to {@code true}, which returns all information
|
||||
* about each requested snapshot.
|
||||
*/
|
||||
public GetSnapshotsRequestBuilder setVerbose(boolean verbose) {
|
||||
request.verbose(verbose);
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.get;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
|
@ -30,6 +31,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
@ -39,11 +41,13 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Transport Action for get snapshots operation
|
||||
|
@ -76,31 +80,35 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final GetSnapshotsRequest request, ClusterState state,
|
||||
protected void masterOperation(final GetSnapshotsRequest request, final ClusterState state,
|
||||
final ActionListener<GetSnapshotsResponse> listener) {
|
||||
try {
|
||||
final String repository = request.repository();
|
||||
List<SnapshotInfo> snapshotInfoBuilder = new ArrayList<>();
|
||||
final Map<String, SnapshotId> allSnapshotIds = new HashMap<>();
|
||||
final List<SnapshotId> currentSnapshotIds = new ArrayList<>();
|
||||
final RepositoryData repositoryData = snapshotsService.getRepositoryData(repository);
|
||||
final List<SnapshotInfo> currentSnapshots = new ArrayList<>();
|
||||
for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) {
|
||||
SnapshotId snapshotId = snapshotInfo.snapshotId();
|
||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||
currentSnapshotIds.add(snapshotId);
|
||||
currentSnapshots.add(snapshotInfo);
|
||||
}
|
||||
|
||||
final RepositoryData repositoryData;
|
||||
if (isCurrentSnapshotsOnly(request.snapshots()) == false) {
|
||||
repositoryData = snapshotsService.getRepositoryData(repository);
|
||||
for (SnapshotId snapshotId : repositoryData.getAllSnapshotIds()) {
|
||||
allSnapshotIds.put(snapshotId.getName(), snapshotId);
|
||||
}
|
||||
} else {
|
||||
repositoryData = null;
|
||||
}
|
||||
|
||||
final Set<SnapshotId> toResolve = new HashSet<>();
|
||||
if (isAllSnapshots(request.snapshots())) {
|
||||
toResolve.addAll(allSnapshotIds.values());
|
||||
} else {
|
||||
for (String snapshotOrPattern : request.snapshots()) {
|
||||
if (GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshotOrPattern)) {
|
||||
toResolve.addAll(currentSnapshotIds);
|
||||
toResolve.addAll(currentSnapshots.stream().map(SnapshotInfo::snapshotId).collect(Collectors.toList()));
|
||||
} else if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) {
|
||||
if (allSnapshotIds.containsKey(snapshotOrPattern)) {
|
||||
toResolve.add(allSnapshotIds.get(snapshotOrPattern));
|
||||
|
@ -121,9 +129,23 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
}
|
||||
}
|
||||
|
||||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(
|
||||
repository, new ArrayList<>(toResolve), repositoryData.getIncompatibleSnapshotIds(), request.ignoreUnavailable()));
|
||||
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
|
||||
final List<SnapshotInfo> snapshotInfos;
|
||||
if (request.verbose()) {
|
||||
final Set<SnapshotId> incompatibleSnapshots = repositoryData != null ?
|
||||
new HashSet<>(repositoryData.getIncompatibleSnapshotIds()) : Collections.emptySet();
|
||||
snapshotInfos = snapshotsService.snapshots(repository, new ArrayList<>(toResolve),
|
||||
incompatibleSnapshots, request.ignoreUnavailable());
|
||||
} else {
|
||||
if (repositoryData != null) {
|
||||
// want non-current snapshots as well, which are found in the repository data
|
||||
snapshotInfos = buildSimpleSnapshotInfos(toResolve, repositoryData, currentSnapshots);
|
||||
} else {
|
||||
// only want current snapshots
|
||||
snapshotInfos = currentSnapshots.stream().map(SnapshotInfo::basic).collect(Collectors.toList());
|
||||
CollectionUtil.timSort(snapshotInfos);
|
||||
}
|
||||
}
|
||||
listener.onResponse(new GetSnapshotsResponse(snapshotInfos));
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
@ -136,4 +158,32 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
private boolean isCurrentSnapshotsOnly(String[] snapshots) {
|
||||
return (snapshots.length == 1 && GetSnapshotsRequest.CURRENT_SNAPSHOT.equalsIgnoreCase(snapshots[0]));
|
||||
}
|
||||
|
||||
private List<SnapshotInfo> buildSimpleSnapshotInfos(final Set<SnapshotId> toResolve,
|
||||
final RepositoryData repositoryData,
|
||||
final List<SnapshotInfo> currentSnapshots) {
|
||||
List<SnapshotInfo> snapshotInfos = new ArrayList<>();
|
||||
for (SnapshotInfo snapshotInfo : currentSnapshots) {
|
||||
if (toResolve.remove(snapshotInfo.snapshotId())) {
|
||||
snapshotInfos.add(snapshotInfo.basic());
|
||||
}
|
||||
}
|
||||
Map<SnapshotId, List<String>> snapshotsToIndices = new HashMap<>();
|
||||
for (IndexId indexId : repositoryData.getIndices().values()) {
|
||||
for (SnapshotId snapshotId : repositoryData.getSnapshots(indexId)) {
|
||||
if (toResolve.contains(snapshotId)) {
|
||||
snapshotsToIndices.computeIfAbsent(snapshotId, (k) -> new ArrayList<>())
|
||||
.add(indexId.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
for (Map.Entry<SnapshotId, List<String>> entry : snapshotsToIndices.entrySet()) {
|
||||
final List<String> indices = entry.getValue();
|
||||
CollectionUtil.timSort(indices);
|
||||
final SnapshotId snapshotId = entry.getKey();
|
||||
snapshotInfos.add(new SnapshotInfo(snapshotId, indices, repositoryData.getSnapshotState(snapshotId)));
|
||||
}
|
||||
CollectionUtil.timSort(snapshotInfos);
|
||||
return Collections.unmodifiableList(snapshotInfos);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -163,16 +163,6 @@ final class Bootstrap {
|
|||
|
||||
try {
|
||||
spawner.spawnNativePluginControllers(environment);
|
||||
Runtime.getRuntime().addShutdownHook(new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
spawner.close();
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Failed to destroy spawned controllers", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (IOException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
|
@ -191,7 +181,7 @@ final class Bootstrap {
|
|||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
IOUtils.close(node);
|
||||
IOUtils.close(node, spawner);
|
||||
LoggerContext context = (LoggerContext) LogManager.getContext(false);
|
||||
Configurator.shutdown(context);
|
||||
} catch (IOException ex) {
|
||||
|
@ -269,7 +259,7 @@ final class Bootstrap {
|
|||
|
||||
static void stop() throws IOException {
|
||||
try {
|
||||
IOUtils.close(INSTANCE.node);
|
||||
IOUtils.close(INSTANCE.node, INSTANCE.spawner);
|
||||
} finally {
|
||||
INSTANCE.keepAliveLatch.countDown();
|
||||
}
|
||||
|
|
|
@ -276,7 +276,11 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
for (Index index : closeIndices) {
|
||||
final IndexMetaData currentMetaData = currentState.getMetaData().getIndexSafe(index);
|
||||
final IndexMetaData updatedMetaData = updatedState.metaData().getIndexSafe(index);
|
||||
// Verifies that the current index settings can be updated with the updated dynamic settings.
|
||||
indicesService.verifyIndexMetadata(currentMetaData, updatedMetaData);
|
||||
// Now check that we can create the index with the updated settings (dynamic and non-dynamic).
|
||||
// This step is mandatory since we allow to update non-dynamic settings on closed indices.
|
||||
indicesService.verifyIndexMetadata(updatedMetaData, updatedMetaData);
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw ExceptionsHelper.convertToElastic(ex);
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment;
|
|||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.function.BiFunction;
|
||||
|
@ -36,31 +37,46 @@ import java.util.function.Function;
|
|||
* Provides pre-configured, shared {@link TokenFilter}s.
|
||||
*/
|
||||
public final class PreConfiguredTokenFilter implements AnalysisModule.AnalysisProvider<TokenFilterFactory> {
|
||||
/**
|
||||
* Create a pre-configured token filter that may not vary at all.
|
||||
*/
|
||||
public static PreConfiguredTokenFilter singleton(String name, boolean useFilterForMultitermQueries,
|
||||
Function<TokenStream, TokenStream> create) {
|
||||
return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, CachingStrategy.ONE,
|
||||
(tokenStream, version) -> create.apply(tokenStream));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a pre-configured token filter that may vary based on the Lucene version.
|
||||
*/
|
||||
public static PreConfiguredTokenFilter luceneVersion(String name, boolean useFilterForMultitermQueries,
|
||||
BiFunction<TokenStream, org.apache.lucene.util.Version, TokenStream> create) {
|
||||
return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, CachingStrategy.LUCENE,
|
||||
(tokenStream, version) -> create.apply(tokenStream, version.luceneVersion));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a pre-configured token filter that may vary based on the Elasticsearch version.
|
||||
*/
|
||||
public static PreConfiguredTokenFilter elasticsearchVersion(String name, boolean useFilterForMultitermQueries,
|
||||
BiFunction<TokenStream, org.elasticsearch.Version, TokenStream> create) {
|
||||
return new PreConfiguredTokenFilter(name, useFilterForMultitermQueries, CachingStrategy.ELASTICSEARCH,
|
||||
(tokenStream, version) -> create.apply(tokenStream, version));
|
||||
}
|
||||
|
||||
private final String name;
|
||||
private final boolean useFilterForMultitermQueries;
|
||||
private final PreBuiltCacheFactory.PreBuiltCache<TokenFilterFactory> cache;
|
||||
private final BiFunction<TokenStream, Version, TokenStream> create;
|
||||
|
||||
/**
|
||||
* Standard ctor with all the power.
|
||||
*/
|
||||
public PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries,
|
||||
PreBuiltCacheFactory.CachingStrategy cachingStrategy, BiFunction<TokenStream, Version, TokenStream> create) {
|
||||
private PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries,
|
||||
PreBuiltCacheFactory.CachingStrategy cache, BiFunction<TokenStream, Version, TokenStream> create) {
|
||||
this.name = name;
|
||||
this.useFilterForMultitermQueries = useFilterForMultitermQueries;
|
||||
cache = PreBuiltCacheFactory.getCache(cachingStrategy);
|
||||
this.cache = PreBuiltCacheFactory.getCache(cache);
|
||||
this.create = create;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience ctor for token streams that don't vary based on version.
|
||||
*/
|
||||
public PreConfiguredTokenFilter(String name, boolean useFilterForMultitermQueries,
|
||||
PreBuiltCacheFactory.CachingStrategy cachingStrategy, Function<TokenStream, TokenStream> create) {
|
||||
this(name, useFilterForMultitermQueries, cachingStrategy, (input, version) -> create.apply(input));
|
||||
// TODO why oh why aren't these all CachingStrategy.ONE? They *can't* vary based on version because they don't get it, right?!
|
||||
}
|
||||
|
||||
@Override
|
||||
public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) throws IOException {
|
||||
return getTokenFilterFactory(Version.indexCreated(settings));
|
||||
|
|
|
@ -1515,8 +1515,6 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
*/
|
||||
public void updateGlobalCheckpointOnReplica(final long globalCheckpoint) {
|
||||
verifyReplicationTarget();
|
||||
// we sample the recovery stage before sampling the local checkpoint or we are subject to a race condition in the below assertion
|
||||
final RecoveryState.Stage stage = recoveryState().getStage();
|
||||
final SequenceNumbersService seqNoService = getEngine().seqNoService();
|
||||
final long localCheckpoint = seqNoService.getLocalCheckpoint();
|
||||
if (globalCheckpoint > localCheckpoint) {
|
||||
|
@ -1526,10 +1524,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
* case that the global checkpoint update from the primary is ahead of the local checkpoint on this shard. In this case, we
|
||||
* ignore the global checkpoint update. This can happen if we are in the translog stage of recovery. Prior to this, the engine
|
||||
* is not opened and this shard will not receive global checkpoint updates, and after this the shard will be contributing to
|
||||
* calculations of the the global checkpoint.
|
||||
* calculations of the the global checkpoint. However, we can not assert that we are in the translog stage of recovery here as
|
||||
* while the global checkpoint update may have emanated from the primary when we were in that state, we could subsequently move
|
||||
* to recovery finalization, or even finished recovery before the update arrives here.
|
||||
*/
|
||||
assert stage == RecoveryState.Stage.TRANSLOG
|
||||
: "expected recovery stage [" + RecoveryState.Stage.TRANSLOG + "] but was [" + stage + "]";
|
||||
return;
|
||||
}
|
||||
seqNoService.updateGlobalCheckpointOnReplica(globalCheckpoint);
|
||||
|
|
|
@ -272,10 +272,8 @@ public final class AnalysisModule {
|
|||
NamedRegistry<PreConfiguredTokenFilter> preConfiguredTokenFilters = new NamedRegistry<>("pre-configured token_filter");
|
||||
|
||||
// Add filters available in lucene-core
|
||||
preConfiguredTokenFilters.register("lowercase",
|
||||
new PreConfiguredTokenFilter("lowercase", true, CachingStrategy.LUCENE, LowerCaseFilter::new));
|
||||
preConfiguredTokenFilters.register("standard",
|
||||
new PreConfiguredTokenFilter("standard", false, CachingStrategy.LUCENE, StandardFilter::new));
|
||||
preConfiguredTokenFilters.register("lowercase", PreConfiguredTokenFilter.singleton("lowercase", true, LowerCaseFilter::new));
|
||||
preConfiguredTokenFilters.register("standard", PreConfiguredTokenFilter.singleton("standard", false, StandardFilter::new));
|
||||
/* Note that "stop" is available in lucene-core but it's pre-built
|
||||
* version uses a set of English stop words that are in
|
||||
* lucene-analyzers-common so "stop" is defined in the analysis-common
|
||||
|
@ -288,9 +286,12 @@ public final class AnalysisModule {
|
|||
// This has been migrated but has to stick around until PreBuiltTokenizers is removed.
|
||||
continue;
|
||||
default:
|
||||
if (CachingStrategy.ONE != preBuilt.getCachingStrategy()) {
|
||||
throw new UnsupportedOperationException("shim not available for " + preBuilt.getCachingStrategy());
|
||||
}
|
||||
String name = preBuilt.name().toLowerCase(Locale.ROOT);
|
||||
preConfiguredTokenFilters.register(name,
|
||||
new PreConfiguredTokenFilter(name, preBuilt.isMultiTermAware(), preBuilt.getCachingStrategy(), preBuilt::create));
|
||||
preConfiguredTokenFilters.register(name, PreConfiguredTokenFilter.singleton(name, preBuilt.isMultiTermAware(),
|
||||
tokenStream -> preBuilt.create(tokenStream, Version.CURRENT)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.lucene.analysis.core.DecimalDigitFilter;
|
|||
import org.apache.lucene.analysis.cz.CzechStemFilter;
|
||||
import org.apache.lucene.analysis.de.GermanNormalizationFilter;
|
||||
import org.apache.lucene.analysis.de.GermanStemFilter;
|
||||
import org.apache.lucene.analysis.en.PorterStemFilter;
|
||||
import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
|
||||
import org.apache.lucene.analysis.fr.FrenchAnalyzer;
|
||||
import org.apache.lucene.analysis.hi.HindiNormalizationFilter;
|
||||
|
@ -70,20 +69,6 @@ public enum PreBuiltTokenFilters {
|
|||
},
|
||||
|
||||
// Extended Token Filters
|
||||
SNOWBALL(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new SnowballFilter(tokenStream, "English");
|
||||
}
|
||||
},
|
||||
|
||||
STEMMER(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
return new PorterStemFilter(tokenStream);
|
||||
}
|
||||
},
|
||||
|
||||
ELISION(CachingStrategy.ONE) {
|
||||
@Override
|
||||
public TokenStream create(TokenStream tokenStream, Version version) {
|
||||
|
|
|
@ -20,14 +20,17 @@
|
|||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashSet;
|
||||
|
@ -51,8 +54,8 @@ public final class RepositoryData {
|
|||
/**
|
||||
* An instance initialized for an empty repository.
|
||||
*/
|
||||
public static final RepositoryData EMPTY =
|
||||
new RepositoryData(EMPTY_REPO_GEN, Collections.emptyList(), Collections.emptyMap(), Collections.emptyList());
|
||||
public static final RepositoryData EMPTY = new RepositoryData(EMPTY_REPO_GEN,
|
||||
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList());
|
||||
|
||||
/**
|
||||
* The generational id of the index file from which the repository data was read.
|
||||
|
@ -61,7 +64,11 @@ public final class RepositoryData {
|
|||
/**
|
||||
* The ids of the snapshots in the repository.
|
||||
*/
|
||||
private final List<SnapshotId> snapshotIds;
|
||||
private final Map<String, SnapshotId> snapshotIds;
|
||||
/**
|
||||
* The states of each snapshot in the repository.
|
||||
*/
|
||||
private final Map<String, SnapshotState> snapshotStates;
|
||||
/**
|
||||
* The indices found in the repository across all snapshots, as a name to {@link IndexId} mapping
|
||||
*/
|
||||
|
@ -75,19 +82,22 @@ public final class RepositoryData {
|
|||
*/
|
||||
private final List<SnapshotId> incompatibleSnapshotIds;
|
||||
|
||||
public RepositoryData(long genId, List<SnapshotId> snapshotIds, Map<IndexId, Set<SnapshotId>> indexSnapshots,
|
||||
public RepositoryData(long genId,
|
||||
Map<String, SnapshotId> snapshotIds,
|
||||
Map<String, SnapshotState> snapshotStates,
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots,
|
||||
List<SnapshotId> incompatibleSnapshotIds) {
|
||||
this.genId = genId;
|
||||
this.snapshotIds = Collections.unmodifiableList(snapshotIds);
|
||||
this.indices = Collections.unmodifiableMap(indexSnapshots.keySet()
|
||||
.stream()
|
||||
this.snapshotIds = Collections.unmodifiableMap(snapshotIds);
|
||||
this.snapshotStates = Collections.unmodifiableMap(snapshotStates);
|
||||
this.indices = Collections.unmodifiableMap(indexSnapshots.keySet().stream()
|
||||
.collect(Collectors.toMap(IndexId::getName, Function.identity())));
|
||||
this.indexSnapshots = Collections.unmodifiableMap(indexSnapshots);
|
||||
this.incompatibleSnapshotIds = Collections.unmodifiableList(incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
protected RepositoryData copy() {
|
||||
return new RepositoryData(genId, snapshotIds, indexSnapshots, incompatibleSnapshotIds);
|
||||
return new RepositoryData(genId, snapshotIds, snapshotStates, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -98,17 +108,17 @@ public final class RepositoryData {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable list of the snapshot ids.
|
||||
* Returns an unmodifiable collection of the snapshot ids.
|
||||
*/
|
||||
public List<SnapshotId> getSnapshotIds() {
|
||||
return snapshotIds;
|
||||
public Collection<SnapshotId> getSnapshotIds() {
|
||||
return Collections.unmodifiableCollection(snapshotIds.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an immutable collection of the snapshot ids in the repository that are incompatible with the
|
||||
* current ES version.
|
||||
*/
|
||||
public List<SnapshotId> getIncompatibleSnapshotIds() {
|
||||
public Collection<SnapshotId> getIncompatibleSnapshotIds() {
|
||||
return incompatibleSnapshotIds;
|
||||
}
|
||||
|
||||
|
@ -116,13 +126,22 @@ public final class RepositoryData {
|
|||
* Returns an immutable collection of all the snapshot ids in the repository, both active and
|
||||
* incompatible snapshots.
|
||||
*/
|
||||
public List<SnapshotId> getAllSnapshotIds() {
|
||||
public Collection<SnapshotId> getAllSnapshotIds() {
|
||||
List<SnapshotId> allSnapshotIds = new ArrayList<>(snapshotIds.size() + incompatibleSnapshotIds.size());
|
||||
allSnapshotIds.addAll(snapshotIds);
|
||||
allSnapshotIds.addAll(snapshotIds.values());
|
||||
allSnapshotIds.addAll(incompatibleSnapshotIds);
|
||||
return Collections.unmodifiableList(allSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link SnapshotState} for the given snapshot. Returns {@code null} if
|
||||
* there is no state for the snapshot.
|
||||
*/
|
||||
@Nullable
|
||||
public SnapshotState getSnapshotState(final SnapshotId snapshotId) {
|
||||
return snapshotStates.get(snapshotId.getUUID());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable map of the index names to {@link IndexId} in the repository.
|
||||
*/
|
||||
|
@ -134,15 +153,19 @@ public final class RepositoryData {
|
|||
* Add a snapshot and its indices to the repository; returns a new instance. If the snapshot
|
||||
* already exists in the repository data, this method throws an IllegalArgumentException.
|
||||
*/
|
||||
public RepositoryData addSnapshot(final SnapshotId snapshotId, final List<IndexId> snapshottedIndices) {
|
||||
if (snapshotIds.contains(snapshotId)) {
|
||||
public RepositoryData addSnapshot(final SnapshotId snapshotId,
|
||||
final SnapshotState snapshotState,
|
||||
final List<IndexId> snapshottedIndices) {
|
||||
if (snapshotIds.containsKey(snapshotId.getUUID())) {
|
||||
// if the snapshot id already exists in the repository data, it means an old master
|
||||
// that is blocked from the cluster is trying to finalize a snapshot concurrently with
|
||||
// the new master, so we make the operation idempotent
|
||||
return this;
|
||||
}
|
||||
List<SnapshotId> snapshots = new ArrayList<>(snapshotIds);
|
||||
snapshots.add(snapshotId);
|
||||
Map<String, SnapshotId> snapshots = new HashMap<>(snapshotIds);
|
||||
snapshots.put(snapshotId.getUUID(), snapshotId);
|
||||
Map<String, SnapshotState> newSnapshotStates = new HashMap<>(snapshotStates);
|
||||
newSnapshotStates.put(snapshotId.getUUID(), snapshotState);
|
||||
Map<IndexId, Set<SnapshotId>> allIndexSnapshots = new HashMap<>(indexSnapshots);
|
||||
for (final IndexId indexId : snapshottedIndices) {
|
||||
if (allIndexSnapshots.containsKey(indexId)) {
|
||||
|
@ -158,17 +181,18 @@ public final class RepositoryData {
|
|||
allIndexSnapshots.put(indexId, ids);
|
||||
}
|
||||
}
|
||||
return new RepositoryData(genId, snapshots, allIndexSnapshots, incompatibleSnapshotIds);
|
||||
return new RepositoryData(genId, snapshots, newSnapshotStates, allIndexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a snapshot and remove any indices that no longer exist in the repository due to the deletion of the snapshot.
|
||||
*/
|
||||
public RepositoryData removeSnapshot(final SnapshotId snapshotId) {
|
||||
List<SnapshotId> newSnapshotIds = snapshotIds
|
||||
.stream()
|
||||
Map<String, SnapshotId> newSnapshotIds = snapshotIds.values().stream()
|
||||
.filter(id -> snapshotId.equals(id) == false)
|
||||
.collect(Collectors.toList());
|
||||
.collect(Collectors.toMap(SnapshotId::getUUID, Function.identity()));
|
||||
Map<String, SnapshotState> newSnapshotStates = new HashMap<>(snapshotStates);
|
||||
newSnapshotStates.remove(snapshotId.getUUID());
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
for (final IndexId indexId : indices.values()) {
|
||||
Set<SnapshotId> set;
|
||||
|
@ -176,7 +200,8 @@ public final class RepositoryData {
|
|||
assert snapshotIds != null;
|
||||
if (snapshotIds.contains(snapshotId)) {
|
||||
if (snapshotIds.size() == 1) {
|
||||
// removing the snapshot will mean no more snapshots have this index, so just skip over it
|
||||
// removing the snapshot will mean no more snapshots
|
||||
// have this index, so just skip over it
|
||||
continue;
|
||||
}
|
||||
set = new LinkedHashSet<>(snapshotIds);
|
||||
|
@ -187,21 +212,7 @@ public final class RepositoryData {
|
|||
indexSnapshots.put(indexId, set);
|
||||
}
|
||||
|
||||
return new RepositoryData(genId, newSnapshotIds, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a new {@link RepositoryData} instance containing the same snapshot data as the
|
||||
* invoking instance, with the given incompatible snapshots added to the new instance.
|
||||
*/
|
||||
public RepositoryData addIncompatibleSnapshots(final List<SnapshotId> incompatibleSnapshotIds) {
|
||||
List<SnapshotId> newSnapshotIds = new ArrayList<>(this.snapshotIds);
|
||||
List<SnapshotId> newIncompatibleSnapshotIds = new ArrayList<>(this.incompatibleSnapshotIds);
|
||||
for (SnapshotId snapshotId : incompatibleSnapshotIds) {
|
||||
newSnapshotIds.remove(snapshotId);
|
||||
newIncompatibleSnapshotIds.add(snapshotId);
|
||||
}
|
||||
return new RepositoryData(this.genId, newSnapshotIds, this.indexSnapshots, newIncompatibleSnapshotIds);
|
||||
return new RepositoryData(genId, newSnapshotIds, newSnapshotStates, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -219,7 +230,7 @@ public final class RepositoryData {
|
|||
* Initializes the indices in the repository metadata; returns a new instance.
|
||||
*/
|
||||
public RepositoryData initIndices(final Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
return new RepositoryData(genId, snapshotIds, indexSnapshots, incompatibleSnapshotIds);
|
||||
return new RepositoryData(genId, snapshotIds, snapshotStates, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -232,6 +243,7 @@ public final class RepositoryData {
|
|||
}
|
||||
@SuppressWarnings("unchecked") RepositoryData that = (RepositoryData) obj;
|
||||
return snapshotIds.equals(that.snapshotIds)
|
||||
&& snapshotStates.equals(that.snapshotStates)
|
||||
&& indices.equals(that.indices)
|
||||
&& indexSnapshots.equals(that.indexSnapshots)
|
||||
&& incompatibleSnapshotIds.equals(that.incompatibleSnapshotIds);
|
||||
|
@ -239,7 +251,7 @@ public final class RepositoryData {
|
|||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshotIds, indices, indexSnapshots, incompatibleSnapshotIds);
|
||||
return Objects.hash(snapshotIds, snapshotStates, indices, indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -291,6 +303,9 @@ public final class RepositoryData {
|
|||
private static final String INCOMPATIBLE_SNAPSHOTS = "incompatible-snapshots";
|
||||
private static final String INDICES = "indices";
|
||||
private static final String INDEX_ID = "id";
|
||||
private static final String NAME = "name";
|
||||
private static final String UUID = "uuid";
|
||||
private static final String STATE = "state";
|
||||
|
||||
/**
|
||||
* Writes the snapshots metadata and the related indices metadata to x-content, omitting the
|
||||
|
@ -301,7 +316,13 @@ public final class RepositoryData {
|
|||
// write the snapshots list
|
||||
builder.startArray(SNAPSHOTS);
|
||||
for (final SnapshotId snapshot : getSnapshotIds()) {
|
||||
snapshot.toXContent(builder, params);
|
||||
builder.startObject();
|
||||
builder.field(NAME, snapshot.getName());
|
||||
builder.field(UUID, snapshot.getUUID());
|
||||
if (snapshotStates.containsKey(snapshot.getUUID())) {
|
||||
builder.field(STATE, snapshotStates.get(snapshot.getUUID()).value());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
// write the indices map
|
||||
|
@ -313,7 +334,7 @@ public final class RepositoryData {
|
|||
Set<SnapshotId> snapshotIds = indexSnapshots.get(indexId);
|
||||
assert snapshotIds != null;
|
||||
for (final SnapshotId snapshotId : snapshotIds) {
|
||||
snapshotId.toXContent(builder, params);
|
||||
builder.value(snapshotId.getUUID());
|
||||
}
|
||||
builder.endArray();
|
||||
builder.endObject();
|
||||
|
@ -327,20 +348,47 @@ public final class RepositoryData {
|
|||
* Reads an instance of {@link RepositoryData} from x-content, loading the snapshots and indices metadata.
|
||||
*/
|
||||
public static RepositoryData snapshotsFromXContent(final XContentParser parser, long genId) throws IOException {
|
||||
List<SnapshotId> snapshots = new ArrayList<>();
|
||||
Map<String, SnapshotId> snapshots = new HashMap<>();
|
||||
Map<String, SnapshotState> snapshotStates = new HashMap<>();
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
||||
String currentFieldName = parser.currentName();
|
||||
if (SNAPSHOTS.equals(currentFieldName)) {
|
||||
String field = parser.currentName();
|
||||
if (SNAPSHOTS.equals(field)) {
|
||||
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
snapshots.add(SnapshotId.fromXContent(parser));
|
||||
final SnapshotId snapshotId;
|
||||
// the new format from 5.0 which contains the snapshot name and uuid
|
||||
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
|
||||
String name = null;
|
||||
String uuid = null;
|
||||
SnapshotState state = null;
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
String currentFieldName = parser.currentName();
|
||||
parser.nextToken();
|
||||
if (NAME.equals(currentFieldName)) {
|
||||
name = parser.text();
|
||||
} else if (UUID.equals(currentFieldName)) {
|
||||
uuid = parser.text();
|
||||
} else if (STATE.equals(currentFieldName)) {
|
||||
state = SnapshotState.fromValue(parser.numberValue().byteValue());
|
||||
}
|
||||
}
|
||||
snapshotId = new SnapshotId(name, uuid);
|
||||
if (state != null) {
|
||||
snapshotStates.put(uuid, state);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("expected array for [" + currentFieldName + "]");
|
||||
// the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too
|
||||
final String name = parser.text();
|
||||
snapshotId = new SnapshotId(name, name);
|
||||
}
|
||||
} else if (INDICES.equals(currentFieldName)) {
|
||||
snapshots.put(snapshotId.getUUID(), snapshotId);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("expected array for [" + field + "]");
|
||||
}
|
||||
} else if (INDICES.equals(field)) {
|
||||
if (parser.nextToken() != XContentParser.Token.START_OBJECT) {
|
||||
throw new ElasticsearchParseException("start object expected [indices]");
|
||||
}
|
||||
|
@ -361,7 +409,22 @@ public final class RepositoryData {
|
|||
throw new ElasticsearchParseException("start array expected [snapshots]");
|
||||
}
|
||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||
snapshotIds.add(SnapshotId.fromXContent(parser));
|
||||
String uuid = null;
|
||||
// the old format pre 5.4.1 which contains the snapshot name and uuid
|
||||
if (parser.currentToken() == XContentParser.Token.START_OBJECT) {
|
||||
while (parser.nextToken() != XContentParser.Token.END_OBJECT) {
|
||||
String currentFieldName = parser.currentName();
|
||||
parser.nextToken();
|
||||
if (UUID.equals(currentFieldName)) {
|
||||
uuid = parser.text();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// the new format post 5.4.1 that only contains the snapshot uuid,
|
||||
// since we already have the name/uuid combo in the snapshots array
|
||||
uuid = parser.text();
|
||||
}
|
||||
snapshotIds.add(snapshots.get(uuid));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -369,13 +432,13 @@ public final class RepositoryData {
|
|||
indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unknown field name [" + currentFieldName + "]");
|
||||
throw new ElasticsearchParseException("unknown field name [" + field + "]");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("start object expected");
|
||||
}
|
||||
return new RepositoryData(genId, snapshots, indexSnapshots, Collections.emptyList());
|
||||
return new RepositoryData(genId, snapshots, snapshotStates, indexSnapshots, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -419,7 +482,7 @@ public final class RepositoryData {
|
|||
} else {
|
||||
throw new ElasticsearchParseException("start object expected");
|
||||
}
|
||||
return new RepositoryData(this.genId, this.snapshotIds, this.indexSnapshots, incompatibleSnapshotIds);
|
||||
return new RepositoryData(this.genId, this.snapshotIds, this.snapshotStates, this.indexSnapshots, incompatibleSnapshotIds);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -473,10 +473,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
shardFailures);
|
||||
snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, snapshotId.getUUID());
|
||||
final RepositoryData repositoryData = getRepositoryData();
|
||||
List<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
|
||||
if (!snapshotIds.contains(snapshotId)) {
|
||||
writeIndexGen(repositoryData.addSnapshot(snapshotId, indices), repositoryStateId);
|
||||
}
|
||||
writeIndexGen(repositoryData.addSnapshot(snapshotId, blobStoreSnapshot.state(), indices), repositoryStateId);
|
||||
return blobStoreSnapshot;
|
||||
} catch (IOException ex) {
|
||||
throw new RepositoryException(metadata.name(), "failed to update snapshot in repository", ex);
|
||||
|
|
|
@ -42,7 +42,6 @@ public class RestGetSnapshotsAction extends BaseRestHandler {
|
|||
controller.registerHandler(GET, "/_snapshot/{repository}/{snapshot}", this);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
String repository = request.param("repository");
|
||||
|
@ -50,7 +49,7 @@ public class RestGetSnapshotsAction extends BaseRestHandler {
|
|||
|
||||
GetSnapshotsRequest getSnapshotsRequest = getSnapshotsRequest(repository).snapshots(snapshots);
|
||||
getSnapshotsRequest.ignoreUnavailable(request.paramAsBoolean("ignore_unavailable", getSnapshotsRequest.ignoreUnavailable()));
|
||||
|
||||
getSnapshotsRequest.verbose(request.paramAsBoolean("verbose", getSnapshotsRequest.verbose()));
|
||||
getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout()));
|
||||
return channel -> client.admin().cluster().getSnapshots(getSnapshotsRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
|
|
@ -41,7 +41,8 @@ public class ScriptSettings {
|
|||
scriptTypeSettingMap.put(scriptType, Setting.boolSetting(
|
||||
ScriptModes.sourceKey(scriptType),
|
||||
scriptType.isDefaultEnabled(),
|
||||
Property.NodeScope));
|
||||
Property.NodeScope,
|
||||
Property.Deprecated));
|
||||
}
|
||||
SCRIPT_TYPE_SETTING_MAP = Collections.unmodifiableMap(scriptTypeSettingMap);
|
||||
}
|
||||
|
@ -61,7 +62,7 @@ public class ScriptSettings {
|
|||
Map<ScriptContext, Setting<Boolean>> scriptContextSettingMap = new HashMap<>();
|
||||
for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) {
|
||||
scriptContextSettingMap.put(scriptContext,
|
||||
Setting.boolSetting(ScriptModes.operationKey(scriptContext), false, Property.NodeScope));
|
||||
Setting.boolSetting(ScriptModes.operationKey(scriptContext), false, Property.NodeScope, Property.Deprecated));
|
||||
}
|
||||
return scriptContextSettingMap;
|
||||
}
|
||||
|
@ -91,7 +92,7 @@ public class ScriptSettings {
|
|||
Function<Settings, String> defaultLangAndTypeFn = settings -> {
|
||||
final Setting<Boolean> globalTypeSetting = scriptTypeSettingMap.get(scriptType);
|
||||
final Setting<Boolean> langAndTypeSetting = Setting.boolSetting(ScriptModes.getGlobalKey(language, scriptType),
|
||||
defaultIfNothingSet, Property.NodeScope);
|
||||
defaultIfNothingSet, Property.NodeScope, Property.Deprecated);
|
||||
|
||||
if (langAndTypeSetting.exists(settings)) {
|
||||
// fine-grained e.g. script.engine.groovy.inline
|
||||
|
@ -106,7 +107,7 @@ public class ScriptSettings {
|
|||
|
||||
// Setting for something like "script.engine.groovy.inline"
|
||||
final Setting<Boolean> langAndTypeSetting = Setting.boolSetting(ScriptModes.getGlobalKey(language, scriptType),
|
||||
defaultLangAndTypeFn, Property.NodeScope);
|
||||
defaultLangAndTypeFn, Property.NodeScope, Property.Deprecated);
|
||||
scriptModeSettings.add(langAndTypeSetting);
|
||||
|
||||
for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) {
|
||||
|
@ -117,7 +118,7 @@ public class ScriptSettings {
|
|||
final Setting<Boolean> globalOpSetting = scriptContextSettingMap.get(scriptContext);
|
||||
final Setting<Boolean> globalTypeSetting = scriptTypeSettingMap.get(scriptType);
|
||||
final Setting<Boolean> langAndTypeAndContextSetting = Setting.boolSetting(langAndTypeAndContextName,
|
||||
defaultIfNothingSet, Property.NodeScope);
|
||||
defaultIfNothingSet, Property.NodeScope, Property.Deprecated);
|
||||
|
||||
// fallback logic for script mode settings
|
||||
if (langAndTypeAndContextSetting.exists(settings)) {
|
||||
|
@ -138,7 +139,8 @@ public class ScriptSettings {
|
|||
}
|
||||
};
|
||||
// The actual setting for finest grained script settings
|
||||
Setting<Boolean> setting = Setting.boolSetting(langAndTypeAndContextName, defaultSettingFn, Property.NodeScope);
|
||||
Setting<Boolean> setting =
|
||||
Setting.boolSetting(langAndTypeAndContextName, defaultSettingFn, Property.NodeScope, Property.Deprecated);
|
||||
scriptModeSettings.add(setting);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import java.util.Objects;
|
|||
/**
|
||||
* SnapshotId - snapshot name + snapshot UUID
|
||||
*/
|
||||
public final class SnapshotId implements Writeable, ToXContent {
|
||||
public final class SnapshotId implements Comparable<SnapshotId>, Writeable, ToXContent {
|
||||
|
||||
private static final String NAME = "name";
|
||||
private static final String UUID = "uuid";
|
||||
|
@ -106,6 +106,11 @@ public final class SnapshotId implements Writeable, ToXContent {
|
|||
return hashCode;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(final SnapshotId other) {
|
||||
return this.name.compareTo(other.name);
|
||||
}
|
||||
|
||||
private int computeHashCode() {
|
||||
return Objects.hash(name, uuid);
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -69,11 +70,17 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
private static final String SUCCESSFUL_SHARDS = "successful_shards";
|
||||
|
||||
private static final Version VERSION_INCOMPATIBLE_INTRODUCED = Version.V_5_2_0_UNRELEASED;
|
||||
public static final Version VERBOSE_INTRODUCED = Version.V_5_5_0_UNRELEASED;
|
||||
|
||||
private static final Comparator<SnapshotInfo> COMPARATOR =
|
||||
Comparator.comparing(SnapshotInfo::startTime).thenComparing(SnapshotInfo::snapshotId);
|
||||
|
||||
private final SnapshotId snapshotId;
|
||||
|
||||
@Nullable
|
||||
private final SnapshotState state;
|
||||
|
||||
@Nullable
|
||||
private final String reason;
|
||||
|
||||
private final List<String> indices;
|
||||
|
@ -91,6 +98,10 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
|
||||
private final List<SnapshotShardFailure> shardFailures;
|
||||
|
||||
public SnapshotInfo(SnapshotId snapshotId, List<String> indices, SnapshotState state) {
|
||||
this(snapshotId, indices, state, null, null, 0L, 0L, 0, 0, Collections.emptyList());
|
||||
}
|
||||
|
||||
public SnapshotInfo(SnapshotId snapshotId, List<String> indices, long startTime) {
|
||||
this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, 0, 0, Collections.emptyList());
|
||||
}
|
||||
|
@ -104,8 +115,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
private SnapshotInfo(SnapshotId snapshotId, List<String> indices, SnapshotState state, String reason, Version version,
|
||||
long startTime, long endTime, int totalShards, int successfulShards, List<SnapshotShardFailure> shardFailures) {
|
||||
this.snapshotId = Objects.requireNonNull(snapshotId);
|
||||
this.indices = Objects.requireNonNull(indices);
|
||||
this.state = Objects.requireNonNull(state);
|
||||
this.indices = Collections.unmodifiableList(Objects.requireNonNull(indices));
|
||||
this.state = state;
|
||||
this.reason = reason;
|
||||
this.version = version;
|
||||
this.startTime = startTime;
|
||||
|
@ -126,7 +137,11 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
indicesListBuilder.add(in.readString());
|
||||
}
|
||||
indices = Collections.unmodifiableList(indicesListBuilder);
|
||||
if (in.getVersion().onOrAfter(VERBOSE_INTRODUCED)) {
|
||||
state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null;
|
||||
} else {
|
||||
state = SnapshotState.fromValue(in.readByte());
|
||||
}
|
||||
reason = in.readOptionalString();
|
||||
startTime = in.readVLong();
|
||||
endTime = in.readVLong();
|
||||
|
@ -159,6 +174,14 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
null, 0L, 0L, 0, 0, Collections.emptyList());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a new {@link SnapshotInfo} instance from the given {@link SnapshotInfo} with
|
||||
* all information stripped out except the snapshot id, state, and indices.
|
||||
*/
|
||||
public SnapshotInfo basic() {
|
||||
return new SnapshotInfo(snapshotId, indices, state);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns snapshot id
|
||||
*
|
||||
|
@ -169,25 +192,27 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns snapshot state
|
||||
* Returns snapshot state; {@code null} if the state is unknown.
|
||||
*
|
||||
* @return snapshot state
|
||||
*/
|
||||
@Nullable
|
||||
public SnapshotState state() {
|
||||
return state;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns snapshot failure reason
|
||||
* Returns snapshot failure reason; {@code null} if the snapshot succeeded.
|
||||
*
|
||||
* @return snapshot failure reason
|
||||
*/
|
||||
@Nullable
|
||||
public String reason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns indices that were included into this snapshot
|
||||
* Returns indices that were included in this snapshot.
|
||||
*
|
||||
* @return list of indices
|
||||
*/
|
||||
|
@ -196,7 +221,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns time when snapshot started
|
||||
* Returns time when snapshot started; a value of {@code 0L} will be returned if
|
||||
* {@link #state()} returns {@code null}.
|
||||
*
|
||||
* @return snapshot start time
|
||||
*/
|
||||
|
@ -205,9 +231,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns time when snapshot ended
|
||||
* <p>
|
||||
* Can be 0L if snapshot is still running
|
||||
* Returns time when snapshot ended; a value of {@code 0L} will be returned if the
|
||||
* snapshot is still running or if {@link #state()} returns {@code null}.
|
||||
*
|
||||
* @return snapshot end time
|
||||
*/
|
||||
|
@ -216,7 +241,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns total number of shards that were snapshotted
|
||||
* Returns total number of shards that were snapshotted; a value of {@code 0} will
|
||||
* be returned if {@link #state()} returns {@code null}.
|
||||
*
|
||||
* @return number of shards
|
||||
*/
|
||||
|
@ -225,7 +251,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Number of failed shards
|
||||
* Number of failed shards; a value of {@code 0} will be returned if there were no
|
||||
* failed shards, or if {@link #state()} returns {@code null}.
|
||||
*
|
||||
* @return number of failed shards
|
||||
*/
|
||||
|
@ -234,7 +261,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns total number of shards that were successfully snapshotted
|
||||
* Returns total number of shards that were successfully snapshotted; a value of
|
||||
* {@code 0} will be returned if {@link #state()} returns {@code null}.
|
||||
*
|
||||
* @return number of successful shards
|
||||
*/
|
||||
|
@ -243,7 +271,8 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns shard failures
|
||||
* Returns shard failures; an empty list will be returned if there were no shard
|
||||
* failures, or if {@link #state()} returns {@code null}.
|
||||
*
|
||||
* @return shard failures
|
||||
*/
|
||||
|
@ -253,7 +282,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
|
||||
/**
|
||||
* Returns the version of elasticsearch that the snapshot was created with. Will only
|
||||
* return {@code null} if {@link #state()} returns {@link SnapshotState#INCOMPATIBLE}.
|
||||
* return {@code null} if {@link #state()} returns {@code null} or {@link SnapshotState#INCOMPATIBLE}.
|
||||
*
|
||||
* @return version of elasticsearch that the snapshot was created with
|
||||
*/
|
||||
|
@ -263,16 +292,12 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
|
||||
/**
|
||||
* Compares two snapshots by their start time
|
||||
*
|
||||
* @param o other snapshot
|
||||
* @return the value {@code 0} if snapshots were created at the same time;
|
||||
* a value less than {@code 0} if this snapshot was created before snapshot {@code o}; and
|
||||
* a value greater than {@code 0} if this snapshot was created after snapshot {@code o};
|
||||
* Compares two snapshots by their start time; if the start times are the same, then
|
||||
* compares the two snapshots by their snapshot ids.
|
||||
*/
|
||||
@Override
|
||||
public int compareTo(final SnapshotInfo o) {
|
||||
return Long.compare(startTime, o.startTime);
|
||||
return COMPARATOR.compare(this, o);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -328,15 +353,15 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
if (version != null) {
|
||||
builder.field(VERSION_ID, version.id);
|
||||
builder.field(VERSION, version.toString());
|
||||
} else {
|
||||
builder.field(VERSION, "unknown");
|
||||
}
|
||||
builder.startArray(INDICES);
|
||||
for (String index : indices) {
|
||||
builder.value(index);
|
||||
}
|
||||
builder.endArray();
|
||||
if (state != null) {
|
||||
builder.field(STATE, state);
|
||||
}
|
||||
if (reason != null) {
|
||||
builder.field(REASON, reason);
|
||||
}
|
||||
|
@ -349,6 +374,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
builder.field(END_TIME_IN_MILLIS, endTime);
|
||||
builder.timeValueField(DURATION_IN_MILLIS, DURATION, endTime - startTime);
|
||||
}
|
||||
if (!shardFailures.isEmpty()) {
|
||||
builder.startArray(FAILURES);
|
||||
for (SnapshotShardFailure shardFailure : shardFailures) {
|
||||
builder.startObject();
|
||||
|
@ -356,11 +382,14 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
if (totalShards != 0) {
|
||||
builder.startObject(SHARDS);
|
||||
builder.field(TOTAL, totalShards);
|
||||
builder.field(FAILED, failedShards());
|
||||
builder.field(SUCCESSFUL, successfulShards);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -496,11 +525,20 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
for (String index : indices) {
|
||||
out.writeString(index);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(VERBOSE_INTRODUCED)) {
|
||||
if (state != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeByte(state.value());
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
} else {
|
||||
if (out.getVersion().before(VERSION_INCOMPATIBLE_INTRODUCED) && state == SnapshotState.INCOMPATIBLE) {
|
||||
out.writeByte(SnapshotState.FAILED.value());
|
||||
} else {
|
||||
out.writeByte(state.value());
|
||||
}
|
||||
}
|
||||
out.writeOptionalString(reason);
|
||||
out.writeVLong(startTime);
|
||||
out.writeVLong(endTime);
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.apache.logging.log4j.util.Supplier;
|
|||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -67,7 +65,6 @@ import org.elasticsearch.repositories.RepositoriesService;
|
|||
import org.elasticsearch.repositories.Repository;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.repositories.RepositoryMissingException;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -171,7 +168,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
*/
|
||||
public List<SnapshotInfo> snapshots(final String repositoryName,
|
||||
final List<SnapshotId> snapshotIds,
|
||||
final List<SnapshotId> incompatibleSnapshotIds,
|
||||
final Set<SnapshotId> incompatibleSnapshotIds,
|
||||
final boolean ignoreUnavailable) {
|
||||
final Set<SnapshotInfo> snapshotSet = new HashSet<>();
|
||||
final Set<SnapshotId> snapshotIdsToIterate = new HashSet<>(snapshotIds);
|
||||
|
@ -637,6 +634,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
if (event.routingTableChanged()) {
|
||||
processStartedShards(event);
|
||||
}
|
||||
removeFinishedSnapshotFromClusterState(event);
|
||||
finalizeSnapshotDeletionFromPreviousMaster(event);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
|
@ -666,6 +664,26 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes a finished snapshot from the cluster state. This can happen if the previous
|
||||
* master node processed a cluster state update that marked the snapshot as finished,
|
||||
* but the previous master node died before removing the snapshot in progress from the
|
||||
* cluster state. It is then the responsibility of the new master node to end the
|
||||
* snapshot and remove it from the cluster state.
|
||||
*/
|
||||
private void removeFinishedSnapshotFromClusterState(ClusterChangedEvent event) {
|
||||
if (event.localNodeMaster() && !event.previousState().nodes().isLocalNodeElectedMaster()) {
|
||||
SnapshotsInProgress snapshotsInProgress = event.state().custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshotsInProgress != null && !snapshotsInProgress.entries().isEmpty()) {
|
||||
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
|
||||
if (entry.state().completed()) {
|
||||
endSnapshot(entry);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleans up shard snapshots that were running on removed nodes
|
||||
*
|
||||
|
|
|
@ -64,7 +64,7 @@ grant codeBase "${codebase.mocksocket-1.1.jar}" {
|
|||
};
|
||||
|
||||
|
||||
grant codeBase "${codebase.rest-6.0.0-alpha1-SNAPSHOT.jar}" {
|
||||
grant codeBase "${codebase.rest-6.0.0-alpha2-SNAPSHOT.jar}" {
|
||||
// rest makes socket connections for rest tests
|
||||
permission java.net.SocketPermission "*", "connect";
|
||||
};
|
||||
|
|
|
@ -34,7 +34,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.Version.V_5_3_0_UNRELEASED;
|
||||
import static org.elasticsearch.Version.V_6_0_0_alpha1_UNRELEASED;
|
||||
import static org.elasticsearch.Version.V_6_0_0_alpha2_UNRELEASED;
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersion;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
@ -46,30 +46,30 @@ import static org.hamcrest.Matchers.sameInstance;
|
|||
public class VersionTests extends ESTestCase {
|
||||
|
||||
public void testVersionComparison() throws Exception {
|
||||
assertThat(V_5_3_0_UNRELEASED.before(V_6_0_0_alpha1_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.before(V_6_0_0_alpha2_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.before(V_5_3_0_UNRELEASED), is(false));
|
||||
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_6_0_0_alpha1_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_6_0_0_alpha2_UNRELEASED), is(true));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(true));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(false));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.onOrBefore(V_5_3_0_UNRELEASED), is(false));
|
||||
|
||||
assertThat(V_5_3_0_UNRELEASED.after(V_6_0_0_alpha1_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.after(V_6_0_0_alpha2_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.after(V_5_3_0_UNRELEASED), is(false));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.after(V_5_3_0_UNRELEASED), is(true));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.after(V_5_3_0_UNRELEASED), is(true));
|
||||
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_6_0_0_alpha1_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_6_0_0_alpha2_UNRELEASED), is(false));
|
||||
assertThat(V_5_3_0_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED.onOrAfter(V_5_3_0_UNRELEASED), is(true));
|
||||
|
||||
assertTrue(Version.fromString("5.0.0-alpha2").onOrAfter(Version.fromString("5.0.0-alpha1")));
|
||||
assertTrue(Version.fromString("5.0.0").onOrAfter(Version.fromString("5.0.0-beta2")));
|
||||
assertTrue(Version.fromString("5.0.0-rc1").onOrAfter(Version.fromString("5.0.0-beta24")));
|
||||
assertTrue(Version.fromString("5.0.0-alpha24").before(Version.fromString("5.0.0-beta0")));
|
||||
|
||||
assertThat(V_5_3_0_UNRELEASED, is(lessThan(V_6_0_0_alpha1_UNRELEASED)));
|
||||
assertThat(V_5_3_0_UNRELEASED, is(lessThan(V_6_0_0_alpha2_UNRELEASED)));
|
||||
assertThat(V_5_3_0_UNRELEASED.compareTo(V_5_3_0_UNRELEASED), is(0));
|
||||
assertThat(V_6_0_0_alpha1_UNRELEASED, is(greaterThan(V_5_3_0_UNRELEASED)));
|
||||
assertThat(V_6_0_0_alpha2_UNRELEASED, is(greaterThan(V_5_3_0_UNRELEASED)));
|
||||
}
|
||||
|
||||
public void testMin() {
|
||||
|
@ -97,7 +97,7 @@ public class VersionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testMinimumIndexCompatibilityVersion() {
|
||||
assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.V_5_0_0, Version.V_6_0_0_alpha2_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099), Version.V_5_0_0.minimumIndexCompatibilityVersion());
|
||||
assertEquals(Version.fromId(2000099),
|
||||
Version.V_5_1_1_UNRELEASED.minimumIndexCompatibilityVersion());
|
||||
|
@ -157,7 +157,7 @@ public class VersionTests extends ESTestCase {
|
|||
public void testIndexCreatedVersion() {
|
||||
// an actual index has a IndexMetaData.SETTING_INDEX_UUID
|
||||
final Version version = randomFrom(Version.V_5_0_0, Version.V_5_0_2,
|
||||
Version.V_5_2_0_UNRELEASED, Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
Version.V_5_2_0_UNRELEASED, Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
|
||||
}
|
||||
|
||||
|
@ -170,11 +170,11 @@ public class VersionTests extends ESTestCase {
|
|||
assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major));
|
||||
// from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is
|
||||
// released since we need to bump the supported minor in Version#minimumCompatibilityVersion()
|
||||
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
assertEquals(lastVersion.major, Version.V_6_0_0_alpha1_UNRELEASED.minimumCompatibilityVersion().major);
|
||||
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertEquals(lastVersion.major, Version.V_6_0_0_alpha2_UNRELEASED.minimumCompatibilityVersion().major);
|
||||
assertEquals("did you miss to bump the minor in Version#minimumCompatibilityVersion()",
|
||||
lastVersion.minor, Version.V_6_0_0_alpha1_UNRELEASED.minimumCompatibilityVersion().minor);
|
||||
assertEquals(0, Version.V_6_0_0_alpha1_UNRELEASED.minimumCompatibilityVersion().revision);
|
||||
lastVersion.minor, Version.V_6_0_0_alpha2_UNRELEASED.minimumCompatibilityVersion().minor);
|
||||
assertEquals(0, Version.V_6_0_0_alpha2_UNRELEASED.minimumCompatibilityVersion().revision);
|
||||
}
|
||||
|
||||
public void testToString() {
|
||||
|
@ -326,8 +326,8 @@ public class VersionTests extends ESTestCase {
|
|||
|
||||
public void testIsCompatible() {
|
||||
assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion()));
|
||||
assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertTrue(isCompatible(Version.V_5_0_0, Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0));
|
||||
}
|
||||
|
||||
|
|
|
@ -161,6 +161,14 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase {
|
|||
SnapshotInfo snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
||||
assertThat(snapshotInfo.version().toString(), equalTo(version));
|
||||
|
||||
logger.info("--> get less verbose snapshot info");
|
||||
getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots(repo)
|
||||
.setSnapshots(snapshot).setVerbose(false).get();
|
||||
assertEquals(1, getSnapshotsResponse.getSnapshots().size());
|
||||
snapshotInfo = getSnapshotsResponse.getSnapshots().get(0);
|
||||
assertEquals(snapshot, snapshotInfo.snapshotId().getName());
|
||||
assertNull(snapshotInfo.version()); // in verbose=false mode, version doesn't exist
|
||||
|
||||
logger.info("--> restoring snapshot");
|
||||
RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(repo, snapshot).setRestoreGlobalState(true).setWaitForCompletion(true).get();
|
||||
assertThat(response.status(), equalTo(RestStatus.OK));
|
||||
|
|
|
@ -34,7 +34,6 @@ import org.elasticsearch.index.IndexSettings;
|
|||
import org.elasticsearch.indices.analysis.AnalysisModule;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
@ -207,12 +206,11 @@ public class AnalysisRegistryTests extends ESTestCase {
|
|||
|
||||
public void testPreConfiguredTokenFiltersAreCached() throws IOException {
|
||||
AtomicBoolean built = new AtomicBoolean(false);
|
||||
PreConfiguredTokenFilter assertsBuiltOnce = new PreConfiguredTokenFilter("asserts_built_once", false,
|
||||
PreBuiltCacheFactory.CachingStrategy.ONE, (tokens, version) -> {
|
||||
PreConfiguredTokenFilter assertsBuiltOnce = PreConfiguredTokenFilter.singleton("asserts_built_once", false, tokenStream -> {
|
||||
if (false == built.compareAndSet(false, true)) {
|
||||
fail("Attempted to build the token filter twice when it should have been cached");
|
||||
}
|
||||
return new MockTokenFilter(tokens, MockTokenFilter.EMPTY_STOPSET);
|
||||
return new MockTokenFilter(tokenStream, MockTokenFilter.EMPTY_STOPSET);
|
||||
});
|
||||
try (AnalysisRegistry registryWithPreBuiltTokenFilter = new AnalysisRegistry(emptyEnvironment, emptyMap(), emptyMap(), emptyMap(),
|
||||
emptyMap(), emptyMap(), singletonMap("asserts_built_once", assertsBuiltOnce))) {
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
|
@ -113,7 +112,7 @@ public class CustomNormalizerTests extends ESTokenStreamTestCase {
|
|||
private static class MockAnalysisPlugin implements AnalysisPlugin {
|
||||
@Override
|
||||
public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() {
|
||||
return singletonList(new PreConfiguredTokenFilter("mock_forbidden", false, CachingStrategy.ONE, MockLowerCaseFilter::new));
|
||||
return singletonList(PreConfiguredTokenFilter.singleton("mock_forbidden", false, MockLowerCaseFilter::new));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
@ -55,7 +54,7 @@ public class KeywordFieldMapperTests extends ESSingleNodeTestCase {
|
|||
public static class MockAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
||||
@Override
|
||||
public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() {
|
||||
return singletonList(new PreConfiguredTokenFilter("mock_other_lowercase", true, CachingStrategy.ONE, MockLowerCaseFilter::new));
|
||||
return singletonList(PreConfiguredTokenFilter.singleton("mock_other_lowercase", true, MockLowerCaseFilter::new));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1538,7 +1538,7 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
public RepositoryData getRepositoryData() {
|
||||
Map<IndexId, Set<SnapshotId>> map = new HashMap<>();
|
||||
map.put(new IndexId(indexName, "blah"), emptySet());
|
||||
return new RepositoryData(EMPTY_REPO_GEN, Collections.emptyList(), map, Collections.emptyList());
|
||||
return new RepositoryData(EMPTY_REPO_GEN, Collections.emptyMap(), Collections.emptyMap(), map, Collections.emptyList());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.indices.analysis;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenFilter;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.ar.ArabicNormalizationFilter;
|
||||
|
@ -28,6 +29,7 @@ import org.apache.lucene.analysis.fa.PersianNormalizationFilter;
|
|||
import org.apache.lucene.analysis.hunspell.Dictionary;
|
||||
import org.apache.lucene.analysis.miscellaneous.KeywordRepeatFilter;
|
||||
import org.apache.lucene.analysis.standard.StandardAnalyzer;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.store.SimpleFSDirectory;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -43,6 +45,7 @@ import org.elasticsearch.index.analysis.CharFilterFactory;
|
|||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.IndexAnalyzers;
|
||||
import org.elasticsearch.index.analysis.NamedAnalyzer;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||
import org.elasticsearch.index.analysis.StandardTokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.StopTokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
|
@ -61,17 +64,23 @@ import java.io.StringReader;
|
|||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class AnalysisModuleTests extends ESTestCase {
|
||||
private final Settings emptyNodeSettings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
.build();
|
||||
|
||||
public IndexAnalyzers getIndexAnalyzers(Settings settings) throws IOException {
|
||||
return getIndexAnalyzers(getNewRegistry(settings), settings);
|
||||
|
@ -264,6 +273,71 @@ public class AnalysisModuleTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that plugins can register pre-configured token filters that vary in behavior based on Elasticsearch version, Lucene version,
|
||||
* and that do not vary based on version at all.
|
||||
*/
|
||||
public void testPluginPreConfiguredTokenFilters() throws IOException {
|
||||
// Simple token filter that appends text to the term
|
||||
final class AppendTokenFilter extends TokenFilter {
|
||||
private final CharTermAttribute term = addAttribute(CharTermAttribute.class);
|
||||
private final char[] appendMe;
|
||||
|
||||
protected AppendTokenFilter(TokenStream input, String appendMe) {
|
||||
super(input);
|
||||
this.appendMe = appendMe.toCharArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean incrementToken() throws IOException {
|
||||
if (false == input.incrementToken()) {
|
||||
return false;
|
||||
}
|
||||
term.resizeBuffer(term.length() + appendMe.length);
|
||||
System.arraycopy(appendMe, 0, term.buffer(), term.length(), appendMe.length);
|
||||
term.setLength(term.length() + appendMe.length);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
boolean noVersionSupportsMultiTerm = randomBoolean();
|
||||
boolean luceneVersionSupportsMultiTerm = randomBoolean();
|
||||
boolean elasticsearchVersionSupportsMultiTerm = randomBoolean();
|
||||
AnalysisRegistry registry = new AnalysisModule(new Environment(emptyNodeSettings), singletonList(new AnalysisPlugin() {
|
||||
@Override
|
||||
public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() {
|
||||
return Arrays.asList(
|
||||
PreConfiguredTokenFilter.singleton("no_version", noVersionSupportsMultiTerm,
|
||||
tokenStream -> new AppendTokenFilter(tokenStream, "no_version")),
|
||||
PreConfiguredTokenFilter.luceneVersion("lucene_version", luceneVersionSupportsMultiTerm,
|
||||
(tokenStream, luceneVersion) -> new AppendTokenFilter(tokenStream, luceneVersion.toString())),
|
||||
PreConfiguredTokenFilter.elasticsearchVersion("elasticsearch_version", elasticsearchVersionSupportsMultiTerm,
|
||||
(tokenStream, esVersion) -> new AppendTokenFilter(tokenStream, esVersion.toString()))
|
||||
);
|
||||
}
|
||||
})).getAnalysisRegistry();
|
||||
|
||||
Version version = VersionUtils.randomVersion(random());
|
||||
IndexAnalyzers analyzers = getIndexAnalyzers(registry, Settings.builder()
|
||||
.put("index.analysis.analyzer.no_version.tokenizer", "keyword")
|
||||
.put("index.analysis.analyzer.no_version.filter", "no_version")
|
||||
.put("index.analysis.analyzer.lucene_version.tokenizer", "keyword")
|
||||
.put("index.analysis.analyzer.lucene_version.filter", "lucene_version")
|
||||
.put("index.analysis.analyzer.elasticsearch_version.tokenizer", "keyword")
|
||||
.put("index.analysis.analyzer.elasticsearch_version.filter", "elasticsearch_version")
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, version)
|
||||
.build());
|
||||
assertTokenStreamContents(analyzers.get("no_version").tokenStream("", "test"), new String[] {"testno_version"});
|
||||
assertTokenStreamContents(analyzers.get("lucene_version").tokenStream("", "test"), new String[] {"test" + version.luceneVersion});
|
||||
assertTokenStreamContents(analyzers.get("elasticsearch_version").tokenStream("", "test"), new String[] {"test" + version});
|
||||
|
||||
assertEquals("test" + (noVersionSupportsMultiTerm ? "no_version" : ""),
|
||||
analyzers.get("no_version").normalize("", "test").utf8ToString());
|
||||
assertEquals("test" + (luceneVersionSupportsMultiTerm ? version.luceneVersion.toString() : ""),
|
||||
analyzers.get("lucene_version").normalize("", "test").utf8ToString());
|
||||
assertEquals("test" + (elasticsearchVersionSupportsMultiTerm ? version.toString() : ""),
|
||||
analyzers.get("elasticsearch_version").normalize("", "test").utf8ToString());
|
||||
}
|
||||
|
||||
public void testRegisterHunspellDictionary() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString())
|
||||
|
|
|
@ -49,6 +49,18 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class UpdateSettingsIT extends ESIntegTestCase {
|
||||
public void testInvalidUpdateOnClosedIndex() {
|
||||
createIndex("test");
|
||||
assertAcked(client().admin().indices().prepareClose("test").get());
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () ->
|
||||
client()
|
||||
.admin()
|
||||
.indices()
|
||||
.prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put("index.analysis.char_filter.invalid_char.type", "invalid"))
|
||||
.get());
|
||||
assertEquals(exception.getMessage(), "Unknown char_filter type [invalid] for [invalid_char]");
|
||||
}
|
||||
|
||||
public void testInvalidDynamicUpdate() {
|
||||
createIndex("test");
|
||||
|
|
|
@ -367,11 +367,12 @@ public class RelocationIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// refresh is a replication action so this forces a global checkpoint sync which is needed as these are asserted on in tear down
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public void testCancellationCleansTempFiles() throws Exception {
|
||||
final String indexName = "test";
|
||||
|
@ -506,6 +507,10 @@ public class RelocationIT extends ESIntegTestCase {
|
|||
assertNoFailures(afterRelocation);
|
||||
assertSearchHits(afterRelocation, ids.toArray(new String[ids.size()]));
|
||||
}
|
||||
|
||||
// refresh is a replication action so this forces a global checkpoint sync which is needed as these are asserted on in tear down
|
||||
client().admin().indices().prepareRefresh("test").get();
|
||||
|
||||
}
|
||||
|
||||
class RecoveryCorruption extends MockTransportService.DelegateTransport {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -82,7 +83,8 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
for (int i = 0; i < numOld; i++) {
|
||||
indices.add(indexIdMap.get(indexNames.get(i)));
|
||||
}
|
||||
RepositoryData newRepoData = repositoryData.addSnapshot(newSnapshot, indices);
|
||||
RepositoryData newRepoData = repositoryData.addSnapshot(newSnapshot,
|
||||
randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), indices);
|
||||
// verify that the new repository data has the new snapshot and its indices
|
||||
assertTrue(newRepoData.getSnapshotIds().contains(newSnapshot));
|
||||
for (IndexId indexId : indices) {
|
||||
|
@ -97,15 +99,21 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
|
||||
public void testInitIndices() {
|
||||
final int numSnapshots = randomIntBetween(1, 30);
|
||||
final List<SnapshotId> snapshotIds = new ArrayList<>(numSnapshots);
|
||||
final Map<String, SnapshotId> snapshotIds = new HashMap<>(numSnapshots);
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
snapshotIds.add(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()));
|
||||
final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
|
||||
snapshotIds.put(snapshotId.getUUID(), snapshotId);
|
||||
}
|
||||
RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds, Collections.emptyMap(), Collections.emptyList());
|
||||
RepositoryData repositoryData = new RepositoryData(EMPTY_REPO_GEN, snapshotIds,
|
||||
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyList());
|
||||
// test that initializing indices works
|
||||
Map<IndexId, Set<SnapshotId>> indices = randomIndices(snapshotIds);
|
||||
RepositoryData newRepoData = repositoryData.initIndices(indices);
|
||||
assertEquals(repositoryData.getSnapshotIds(), newRepoData.getSnapshotIds());
|
||||
List<SnapshotId> expected = new ArrayList<>(repositoryData.getSnapshotIds());
|
||||
Collections.sort(expected);
|
||||
List<SnapshotId> actual = new ArrayList<>(newRepoData.getSnapshotIds());
|
||||
Collections.sort(actual);
|
||||
assertEquals(expected, actual);
|
||||
for (IndexId indexId : indices.keySet()) {
|
||||
assertEquals(indices.get(indexId), newRepoData.getSnapshots(indexId));
|
||||
}
|
||||
|
@ -136,25 +144,32 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
assertEquals(new IndexId(notInRepoData, notInRepoData), repositoryData.resolveIndexId(notInRepoData));
|
||||
}
|
||||
|
||||
public void testGetSnapshotState() {
|
||||
final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
|
||||
final SnapshotState state = randomFrom(SnapshotState.values());
|
||||
final RepositoryData repositoryData = RepositoryData.EMPTY.addSnapshot(snapshotId, state, Collections.emptyList());
|
||||
assertEquals(state, repositoryData.getSnapshotState(snapshotId));
|
||||
assertNull(repositoryData.getSnapshotState(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID())));
|
||||
}
|
||||
|
||||
public static RepositoryData generateRandomRepoData() {
|
||||
return generateRandomRepoData(new ArrayList<>());
|
||||
final int numIndices = randomIntBetween(1, 30);
|
||||
final List<IndexId> indices = new ArrayList<>(numIndices);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()));
|
||||
}
|
||||
|
||||
public static RepositoryData generateRandomRepoData(final List<SnapshotId> origSnapshotIds) {
|
||||
List<SnapshotId> snapshotIds = randomSnapshots(origSnapshotIds);
|
||||
return new RepositoryData(EMPTY_REPO_GEN, snapshotIds, randomIndices(snapshotIds), Collections.emptyList());
|
||||
}
|
||||
|
||||
private static List<SnapshotId> randomSnapshots(final List<SnapshotId> origSnapshotIds) {
|
||||
final int numSnapshots = randomIntBetween(1, 30);
|
||||
final List<SnapshotId> snapshotIds = new ArrayList<>(origSnapshotIds);
|
||||
RepositoryData repositoryData = RepositoryData.EMPTY;
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
snapshotIds.add(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()));
|
||||
final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID());
|
||||
final List<IndexId> someIndices = indices.subList(0, randomIntBetween(1, numIndices));
|
||||
repositoryData = repositoryData.addSnapshot(snapshotId, randomFrom(SnapshotState.values()), someIndices);
|
||||
}
|
||||
return snapshotIds;
|
||||
return repositoryData;
|
||||
}
|
||||
|
||||
private static Map<IndexId, Set<SnapshotId>> randomIndices(final List<SnapshotId> snapshotIds) {
|
||||
private static Map<IndexId, Set<SnapshotId>> randomIndices(final Map<String, SnapshotId> snapshotIdsMap) {
|
||||
final List<SnapshotId> snapshotIds = new ArrayList<>(snapshotIdsMap.values());
|
||||
final int totalSnapshots = snapshotIds.size();
|
||||
final int numIndices = randomIntBetween(1, 30);
|
||||
final Map<IndexId, Set<SnapshotId>> indices = new HashMap<>(numIndices);
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.repositories.RepositoriesService;
|
|||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.repositories.RepositoryException;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
|
||||
|
@ -143,7 +144,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
|
|||
assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(1L));
|
||||
|
||||
// removing a snapshot and writing to a new index generational file
|
||||
repositoryData = repository.getRepositoryData().removeSnapshot(repositoryData.getSnapshotIds().get(0));
|
||||
repositoryData = repository.getRepositoryData().removeSnapshot(repositoryData.getSnapshotIds().iterator().next());
|
||||
repository.writeIndexGen(repositoryData, repositoryData.getGenId());
|
||||
assertEquals(repository.getRepositoryData(), repositoryData);
|
||||
assertThat(repository.latestIndexBlobId(), equalTo(2L));
|
||||
|
@ -181,8 +182,8 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
|
|||
for (int i = 0; i < numSnapshots; i++) {
|
||||
snapshotIds.add(new SnapshotId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()));
|
||||
}
|
||||
RepositoryData repositoryData = new RepositoryData(readData.getGenId(), Collections.emptyList(), Collections.emptyMap(),
|
||||
snapshotIds);
|
||||
RepositoryData repositoryData = new RepositoryData(readData.getGenId(),
|
||||
Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), snapshotIds);
|
||||
repository.blobContainer().deleteBlob("incompatible-snapshots");
|
||||
repository.writeIncompatibleSnapshots(repositoryData);
|
||||
readData = repository.getRepositoryData();
|
||||
|
@ -228,7 +229,8 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
|
|||
for (int j = 0; j < numIndices; j++) {
|
||||
indexIds.add(new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()));
|
||||
}
|
||||
repoData = repoData.addSnapshot(snapshotId, indexIds);
|
||||
repoData = repoData.addSnapshot(snapshotId,
|
||||
randomFrom(SnapshotState.SUCCESS, SnapshotState.PARTIAL, SnapshotState.FAILED), indexIds);
|
||||
}
|
||||
return repoData;
|
||||
}
|
||||
|
|
|
@ -31,6 +31,8 @@ import java.util.Collections;
|
|||
// TODO: these really should just be part of ScriptService tests, there is nothing special about them
|
||||
public class FileScriptTests extends ESTestCase {
|
||||
|
||||
private ScriptSettings scriptSettings;
|
||||
|
||||
ScriptService makeScriptService(Settings settings) throws Exception {
|
||||
Path homeDir = createTempDir();
|
||||
Path scriptsDir = homeDir.resolve("config").resolve("scripts");
|
||||
|
@ -47,7 +49,7 @@ public class FileScriptTests extends ESTestCase {
|
|||
MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, Collections.singletonMap(scriptSource, script -> "1"));
|
||||
ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singleton(scriptEngine));
|
||||
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
|
||||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
return new ScriptService(settings, new Environment(settings), null, scriptEngineRegistry, scriptContextRegistry, scriptSettings);
|
||||
}
|
||||
|
||||
|
@ -60,7 +62,9 @@ public class FileScriptTests extends ESTestCase {
|
|||
assertNotNull(compiledScript);
|
||||
MockCompiledScript executable = (MockCompiledScript) compiledScript.compiled();
|
||||
assertEquals("script1.mockscript", executable.getName());
|
||||
assertSettingDeprecationsAndWarnings(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
assertSettingDeprecationsAndWarnings(ScriptSettingsTests.buildDeprecatedSettingsArray(
|
||||
new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
scriptSettings, "script.engine." + MockScriptEngine.NAME + ".file.aggs"),
|
||||
"File scripts are deprecated. Use stored or inline scripts instead.");
|
||||
}
|
||||
|
||||
|
@ -81,7 +85,12 @@ public class FileScriptTests extends ESTestCase {
|
|||
assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [file], operation [" + context.getKey() + "] and lang [" + MockScriptEngine.NAME + "] are disabled"));
|
||||
}
|
||||
}
|
||||
assertSettingDeprecationsAndWarnings(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
assertSettingDeprecationsAndWarnings(ScriptSettingsTests.buildDeprecatedSettingsArray(
|
||||
new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING}, scriptSettings,
|
||||
"script.engine." + MockScriptEngine.NAME + ".file.aggs",
|
||||
"script.engine." + MockScriptEngine.NAME + ".file.search",
|
||||
"script.engine." + MockScriptEngine.NAME + ".file.update",
|
||||
"script.engine." + MockScriptEngine.NAME + ".file.ingest"),
|
||||
"File scripts are deprecated. Use stored or inline scripts instead.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,14 +37,18 @@ import static org.hamcrest.Matchers.containsString;
|
|||
public class ScriptContextTests extends ESTestCase {
|
||||
|
||||
private static final String PLUGIN_NAME = "testplugin";
|
||||
private static final String SCRIPT_PLUGIN_CUSTOM_SETTING = "script." + PLUGIN_NAME + "_custom_globally_disabled_op";
|
||||
private static final String SCRIPT_ENGINE_CUSTOM_SETTING = "script.engine." + MockScriptEngine.NAME + ".inline." + PLUGIN_NAME + "_custom_exp_disabled_op";
|
||||
|
||||
private ScriptSettings scriptSettings;
|
||||
|
||||
ScriptService makeScriptService() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
// no file watching, so we don't need a ResourceWatcherService
|
||||
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), "false")
|
||||
.put("script." + PLUGIN_NAME + "_custom_globally_disabled_op", "false")
|
||||
.put("script.engine." + MockScriptEngine.NAME + ".inline." + PLUGIN_NAME + "_custom_exp_disabled_op", "false")
|
||||
.put(SCRIPT_PLUGIN_CUSTOM_SETTING, "false")
|
||||
.put(SCRIPT_ENGINE_CUSTOM_SETTING, "false")
|
||||
.build();
|
||||
|
||||
MockScriptEngine scriptEngine = new MockScriptEngine(MockScriptEngine.NAME, Collections.singletonMap("1", script -> "1"));
|
||||
|
@ -54,7 +58,7 @@ public class ScriptContextTests extends ESTestCase {
|
|||
new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"),
|
||||
new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"));
|
||||
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(customContexts);
|
||||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
ScriptService scriptService = new ScriptService(settings, new Environment(settings), null, scriptEngineRegistry, scriptContextRegistry, scriptSettings);
|
||||
|
||||
ClusterState empty = ClusterState.builder(new ClusterName("_name")).build();
|
||||
|
@ -67,6 +71,8 @@ public class ScriptContextTests extends ESTestCase {
|
|||
return scriptService;
|
||||
}
|
||||
|
||||
|
||||
|
||||
public void testCustomGlobalScriptContextSettings() throws Exception {
|
||||
ScriptService scriptService = makeScriptService();
|
||||
for (ScriptType scriptType : ScriptType.values()) {
|
||||
|
@ -78,7 +84,9 @@ public class ScriptContextTests extends ESTestCase {
|
|||
assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled"));
|
||||
}
|
||||
}
|
||||
assertSettingDeprecationsAndWarnings(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING});
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
scriptSettings, SCRIPT_PLUGIN_CUSTOM_SETTING, SCRIPT_ENGINE_CUSTOM_SETTING));
|
||||
}
|
||||
|
||||
public void testCustomScriptContextSettings() throws Exception {
|
||||
|
@ -95,7 +103,9 @@ public class ScriptContextTests extends ESTestCase {
|
|||
assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS));
|
||||
assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH));
|
||||
assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")));
|
||||
assertSettingDeprecationsAndWarnings(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING});
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
scriptSettings, SCRIPT_PLUGIN_CUSTOM_SETTING, SCRIPT_ENGINE_CUSTOM_SETTING));
|
||||
}
|
||||
|
||||
public void testUnknownPluginScriptContext() throws Exception {
|
||||
|
@ -109,7 +119,9 @@ public class ScriptContextTests extends ESTestCase {
|
|||
assertTrue(e.getMessage(), e.getMessage().contains("script context [" + PLUGIN_NAME + "_unknown] not supported"));
|
||||
}
|
||||
}
|
||||
assertSettingDeprecationsAndWarnings(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING});
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
scriptSettings, SCRIPT_PLUGIN_CUSTOM_SETTING, SCRIPT_ENGINE_CUSTOM_SETTING));
|
||||
}
|
||||
|
||||
public void testUnknownCustomScriptContext() throws Exception {
|
||||
|
@ -129,7 +141,8 @@ public class ScriptContextTests extends ESTestCase {
|
|||
assertTrue(e.getMessage(), e.getMessage().contains("script context [test] not supported"));
|
||||
}
|
||||
}
|
||||
assertSettingDeprecationsAndWarnings(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING});
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(new Setting[] {ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING},
|
||||
scriptSettings, SCRIPT_PLUGIN_CUSTOM_SETTING, SCRIPT_ENGINE_CUSTOM_SETTING));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -26,9 +26,11 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
|
@ -123,9 +125,11 @@ public class ScriptModesTests extends ESTestCase {
|
|||
randomScriptModes[i] = randomBoolean();
|
||||
}
|
||||
ScriptType[] randomScriptTypes = randomScriptTypesSet.toArray(new ScriptType[randomScriptTypesSet.size()]);
|
||||
List<String> deprecated = new ArrayList<>();
|
||||
Settings.Builder builder = Settings.builder();
|
||||
for (int i = 0; i < randomInt; i++) {
|
||||
builder.put("script" + "." + randomScriptTypes[i].getName(), randomScriptModes[i]);
|
||||
deprecated.add("script" + "." + randomScriptTypes[i].getName());
|
||||
}
|
||||
this.scriptModes = new ScriptModes(scriptSettings, builder.build());
|
||||
|
||||
|
@ -141,6 +145,8 @@ public class ScriptModesTests extends ESTestCase {
|
|||
if (randomScriptTypesSet.contains(ScriptType.INLINE) == false) {
|
||||
assertScriptModesAllOps(false, ScriptType.INLINE);
|
||||
}
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, deprecated.toArray(new String[] {})));
|
||||
}
|
||||
|
||||
public void testScriptContextGenericSettings() {
|
||||
|
@ -155,9 +161,11 @@ public class ScriptModesTests extends ESTestCase {
|
|||
randomScriptModes[i] = randomBoolean();
|
||||
}
|
||||
ScriptContext[] randomScriptContexts = randomScriptContextsSet.toArray(new ScriptContext[randomScriptContextsSet.size()]);
|
||||
List<String> deprecated = new ArrayList<>();
|
||||
Settings.Builder builder = Settings.builder();
|
||||
for (int i = 0; i < randomInt; i++) {
|
||||
builder.put("script" + "." + randomScriptContexts[i].getKey(), randomScriptModes[i]);
|
||||
deprecated.add("script" + "." + randomScriptContexts[i].getKey());
|
||||
}
|
||||
this.scriptModes = new ScriptModes(scriptSettings, builder.build());
|
||||
|
||||
|
@ -168,6 +176,8 @@ public class ScriptModesTests extends ESTestCase {
|
|||
ScriptContext[] complementOf = complementOf(randomScriptContexts);
|
||||
assertScriptModes(true, new ScriptType[]{ScriptType.FILE}, complementOf);
|
||||
assertScriptModes(false, new ScriptType[]{ScriptType.STORED, ScriptType.INLINE}, complementOf);
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, deprecated.toArray(new String[] {})));
|
||||
}
|
||||
|
||||
public void testConflictingScriptTypeAndOpGenericSettings() {
|
||||
|
@ -182,6 +192,9 @@ public class ScriptModesTests extends ESTestCase {
|
|||
ScriptContext[] complementOf = complementOf(scriptContext);
|
||||
assertScriptModes(true, new ScriptType[]{ScriptType.FILE, ScriptType.STORED}, complementOf);
|
||||
assertScriptModes(true, new ScriptType[]{ScriptType.INLINE}, complementOf);
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(
|
||||
scriptSettings, "script." + scriptContext.getKey(), "script.stored", "script.inline"));
|
||||
}
|
||||
|
||||
private void assertScriptModesAllOps(boolean expectedScriptEnabled, ScriptType... scriptTypes) {
|
||||
|
|
|
@ -40,10 +40,12 @@ import org.junit.Before;
|
|||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
|
@ -217,11 +219,13 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
public void testDefaultBehaviourFineGrainedSettings() throws IOException {
|
||||
Settings.Builder builder = Settings.builder();
|
||||
//rarely inject the default settings, which have no effect
|
||||
boolean deprecate = false;
|
||||
if (rarely()) {
|
||||
builder.put("script.file", "true");
|
||||
deprecate = true;
|
||||
}
|
||||
buildScriptService(builder.build());
|
||||
createFileScripts("mustache", "dtest");
|
||||
createFileScripts("dtest");
|
||||
|
||||
for (ScriptContext scriptContext : scriptContexts) {
|
||||
// only file scripts are accepted by default
|
||||
|
@ -229,8 +233,13 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
assertCompileRejected("dtest", "script", ScriptType.STORED, scriptContext);
|
||||
assertCompileAccepted("dtest", "file_script", ScriptType.FILE, scriptContext);
|
||||
}
|
||||
if (deprecate) {
|
||||
assertSettingDeprecationsAndWarnings(ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, "script.file"),
|
||||
"File scripts are deprecated. Use stored or inline scripts instead.");
|
||||
} else {
|
||||
assertWarnings("File scripts are deprecated. Use stored or inline scripts instead.");
|
||||
}
|
||||
}
|
||||
|
||||
public void testFineGrainedSettings() throws IOException {
|
||||
//collect the fine-grained settings to set for this run
|
||||
|
@ -263,6 +272,7 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
} while (engineSettings.containsKey(settingKey));
|
||||
engineSettings.put(settingKey, randomBoolean());
|
||||
}
|
||||
List<String> deprecated = new ArrayList<>();
|
||||
//set the selected fine-grained settings
|
||||
Settings.Builder builder = Settings.builder();
|
||||
for (Map.Entry<ScriptType, Boolean> entry : scriptSourceSettings.entrySet()) {
|
||||
|
@ -271,6 +281,7 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
} else {
|
||||
builder.put("script" + "." + entry.getKey().getName(), "false");
|
||||
}
|
||||
deprecated.add("script" + "." + entry.getKey().getName());
|
||||
}
|
||||
for (Map.Entry<ScriptContext, Boolean> entry : scriptContextSettings.entrySet()) {
|
||||
if (entry.getValue()) {
|
||||
|
@ -278,6 +289,7 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
} else {
|
||||
builder.put("script" + "." + entry.getKey().getKey(), "false");
|
||||
}
|
||||
deprecated.add("script" + "." + entry.getKey().getKey());
|
||||
}
|
||||
for (Map.Entry<String, Boolean> entry : engineSettings.entrySet()) {
|
||||
int delimiter = entry.getKey().indexOf('.');
|
||||
|
@ -290,6 +302,7 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
} else {
|
||||
builder.put("script.engine" + "." + lang + "." + part2, "false");
|
||||
}
|
||||
deprecated.add("script.engine" + "." + lang + "." + part2);
|
||||
}
|
||||
|
||||
buildScriptService(builder.build());
|
||||
|
@ -320,7 +333,9 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
}
|
||||
assertWarnings("File scripts are deprecated. Use stored or inline scripts instead.");
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, deprecated.toArray(new String[] {})),
|
||||
"File scripts are deprecated. Use stored or inline scripts instead.");
|
||||
}
|
||||
|
||||
public void testCompileNonRegisteredContext() throws IOException {
|
||||
|
@ -381,6 +396,8 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
scriptService.compile(script, randomFrom(scriptContexts));
|
||||
scriptService.compile(script, randomFrom(scriptContexts));
|
||||
assertEquals(1L, scriptService.stats().getCompilations());
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, "script.inline"));
|
||||
}
|
||||
|
||||
public void testFileScriptCountedInCompilationStats() throws IOException {
|
||||
|
@ -406,6 +423,8 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
scriptService.compile(new Script(ScriptType.INLINE, "test", "2+2", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
assertEquals(2L, scriptService.stats().getCompilations());
|
||||
assertEquals(1L, scriptService.stats().getCacheEvictions());
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, "script.inline"));
|
||||
}
|
||||
|
||||
public void testDefaultLanguage() throws IOException {
|
||||
|
@ -415,6 +434,8 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
CompiledScript script = scriptService.compile(
|
||||
new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "1 + 1", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
assertEquals(script.lang(), Script.DEFAULT_SCRIPT_LANG);
|
||||
assertSettingDeprecationsAndWarnings(
|
||||
ScriptSettingsTests.buildDeprecatedSettingsArray(scriptSettings, "script.inline"));
|
||||
}
|
||||
|
||||
public void testStoreScript() throws Exception {
|
||||
|
|
|
@ -33,6 +33,29 @@ import static org.hamcrest.Matchers.equalTo;
|
|||
|
||||
public class ScriptSettingsTests extends ESTestCase {
|
||||
|
||||
public static Setting<?>[] buildDeprecatedSettingsArray(ScriptSettings scriptSettings, String... keys) {
|
||||
return buildDeprecatedSettingsArray(null, scriptSettings, keys);
|
||||
}
|
||||
|
||||
public static Setting<?>[] buildDeprecatedSettingsArray(Setting<?>[] deprecated, ScriptSettings scriptSettings, String... keys) {
|
||||
Setting<?>[] settings = new Setting[keys.length + (deprecated == null ? 0 : deprecated.length)];
|
||||
int count = 0;
|
||||
|
||||
for (Setting<?> setting : scriptSettings.getSettings()) {
|
||||
for (String key : keys) {
|
||||
if (setting.getKey().equals(key)) {
|
||||
settings[count++] = setting;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (deprecated != null) {
|
||||
System.arraycopy(deprecated, 0, settings, keys.length, deprecated.length);
|
||||
}
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
public void testSettingsAreProperlyPropogated() {
|
||||
ScriptEngineRegistry scriptEngineRegistry =
|
||||
new ScriptEngineRegistry(Collections.singletonList(new CustomScriptEngine()));
|
||||
|
@ -47,6 +70,7 @@ public class ScriptSettingsTests extends ESTestCase {
|
|||
assertThat(setting.getDefaultRaw(s), equalTo(Boolean.toString(enabled)));
|
||||
}
|
||||
}
|
||||
assertSettingDeprecationsAndWarnings(buildDeprecatedSettingsArray(scriptSettings, "script.inline"));
|
||||
}
|
||||
|
||||
private static class CustomScriptEngine implements ScriptEngine {
|
||||
|
|
|
@ -88,8 +88,10 @@ import java.util.ArrayList;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -2754,4 +2756,91 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testGetSnapshotsFromIndexBlobOnly() throws Exception {
|
||||
logger.info("--> creating repository");
|
||||
final Path repoPath = randomRepoPath();
|
||||
final Client client = client();
|
||||
assertAcked(client.admin().cluster()
|
||||
.preparePutRepository("test-repo")
|
||||
.setType("fs")
|
||||
.setVerify(false)
|
||||
.setSettings(Settings.builder().put("location", repoPath)));
|
||||
|
||||
logger.info("--> creating random number of indices");
|
||||
final int numIndices = randomIntBetween(1, 10);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
assertAcked(prepareCreate("test-idx-" + i).setSettings(Settings.builder()
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0)));
|
||||
}
|
||||
|
||||
logger.info("--> creating random number of snapshots");
|
||||
final int numSnapshots = randomIntBetween(1, 10);
|
||||
final Map<String, List<String>> indicesPerSnapshot = new HashMap<>();
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
// index some additional docs (maybe) for each index
|
||||
for (int j = 0; j < numIndices; j++) {
|
||||
if (randomBoolean()) {
|
||||
final int numDocs = randomIntBetween(1, 5);
|
||||
for (int k = 0; k < numDocs; k++) {
|
||||
index("test-idx-" + j, "doc", Integer.toString(k), "foo", "bar" + k);
|
||||
}
|
||||
refresh();
|
||||
}
|
||||
}
|
||||
final boolean all = randomBoolean();
|
||||
boolean atLeastOne = false;
|
||||
List<String> indices = new ArrayList<>();
|
||||
for (int j = 0; j < numIndices; j++) {
|
||||
if (all || randomBoolean() || !atLeastOne) {
|
||||
indices.add("test-idx-" + j);
|
||||
atLeastOne = true;
|
||||
}
|
||||
}
|
||||
final String snapshotName = "test-snap-" + i;
|
||||
indicesPerSnapshot.put(snapshotName, indices);
|
||||
client.admin().cluster()
|
||||
.prepareCreateSnapshot("test-repo", snapshotName)
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices(indices.toArray(new String[indices.size()]))
|
||||
.get();
|
||||
}
|
||||
|
||||
logger.info("--> verify _all returns snapshot info");
|
||||
GetSnapshotsResponse response = client().admin().cluster()
|
||||
.prepareGetSnapshots("test-repo")
|
||||
.setSnapshots("_all")
|
||||
.setVerbose(false)
|
||||
.get();
|
||||
assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size());
|
||||
verifySnapshotInfo(response, indicesPerSnapshot);
|
||||
|
||||
logger.info("--> verify wildcard returns snapshot info");
|
||||
response = client().admin().cluster()
|
||||
.prepareGetSnapshots("test-repo")
|
||||
.setSnapshots("test-snap-*")
|
||||
.setVerbose(false)
|
||||
.get();
|
||||
assertEquals(indicesPerSnapshot.size(), response.getSnapshots().size());
|
||||
verifySnapshotInfo(response, indicesPerSnapshot);
|
||||
|
||||
logger.info("--> verify individual requests return snapshot info");
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
response = client().admin().cluster()
|
||||
.prepareGetSnapshots("test-repo")
|
||||
.setSnapshots("test-snap-" + i)
|
||||
.setVerbose(false)
|
||||
.get();
|
||||
assertEquals(1, response.getSnapshots().size());
|
||||
verifySnapshotInfo(response, indicesPerSnapshot);
|
||||
}
|
||||
}
|
||||
|
||||
private void verifySnapshotInfo(final GetSnapshotsResponse response, final Map<String, List<String>> indicesPerSnapshot) {
|
||||
for (SnapshotInfo snapshotInfo : response.getSnapshots()) {
|
||||
final List<String> expected = snapshotInfo.indices();
|
||||
assertEquals(expected, indicesPerSnapshot.get(snapshotInfo.snapshotId().getName()));
|
||||
assertEquals(SnapshotState.SUCCESS, snapshotInfo.state());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -433,8 +433,7 @@ gcloud config set project es-cloud
|
|||
[[discovery-gce-usage-tips-permissions]]
|
||||
===== Machine Permissions
|
||||
|
||||
If you have created a machine without the correct permissions, you will see `403 unauthorized` error messages. The only
|
||||
way to alter these permissions is to delete the instance (NOT THE DISK). Then create another with the correct permissions.
|
||||
If you have created a machine without the correct permissions, you will see `403 unauthorized` error messages. To change machine permission on an existing instance, first stop the instance then Edit. Scroll down to `Access Scopes` to change permission. The other way to alter these permissions is to delete the instance (NOT THE DISK). Then create another with the correct permissions.
|
||||
|
||||
Creating machines with gcloud::
|
||||
+
|
||||
|
|
|
@ -23,9 +23,24 @@ the region of the configured bucket.
|
|||
|
||||
* Specifying s3 signer type has been removed, including `cloud.aws.signer` and `cloud.aws.s3.signer`.
|
||||
|
||||
* All `cloud.aws` and `repositories.s3` settings have been removed. Use `s3.client.*` settings instead.
|
||||
* Global repositories settings have been removed. This includes `repositories.s3.bucket`,
|
||||
`repositories.s3.server_side_encryption`, `repositories.s3.buffer_size`,
|
||||
`repositories.s3.max_retries`, `repositories.s3.use_throttle_retries`,
|
||||
`repositories.s3.chunk_size`, `repositories.s3.compress`, `repositories.s3.storage_class`,
|
||||
`repositories.s3.canned_acl`, `repositories.s3.base_path`, and
|
||||
`repositories.s3.path_style_access`. Instead, these settings should be set directly in the
|
||||
settings per repository.
|
||||
See {plugins}/repository-s3-repository.html[S3 Repository settings].
|
||||
|
||||
* All repository level client settings have been removed. Use `s3.client.*` settings instead.
|
||||
* Shared client settings have been removed. This includes `cloud.aws.access_key`,
|
||||
`cloud.aws.secret_key`, `cloud.aws.protocol`, `cloud.aws.proxy.host`,
|
||||
`cloud.aws.proxy.port`, `cloud.aws.proxy.username`, `cloud.aws.proxy.password`,
|
||||
`cloud.aws.signer`, `cloud.aws.read_timeout`, `cloud.aws.s3.access_key`,
|
||||
`cloud.aws.s3.secret_key`, `cloud.aws.s3.protocol`, `cloud.aws.s3.proxy.host`,
|
||||
`cloud.aws.s3.proxy.port`, `cloud.aws.s3.proxy.username`, `cloud.aws.s3.proxy.password`,
|
||||
`cloud.aws.s3.signer`, `cloud.aws.s3.read_timeout`, `repositories.s3.access_key`,
|
||||
`repositories.s3.secret_key`, `repositories.s3.endpoint` and `repositories.s3.protocol`.
|
||||
Instead, use the new named client settings under `s3.client.CLIENT_NAME.*`.
|
||||
|
||||
==== Azure Repository plugin
|
||||
|
||||
|
|
|
@ -374,7 +374,7 @@ has <<painless-api-reference-Matcher-group-1, `group(int)`>> and
|
|||
We have a few justifications for this different way of dispatching methods:
|
||||
|
||||
1. It makes operating on `def` types simpler and, presumably, faster. Using
|
||||
receiver, name, and arity means when Painless sees a call on a `def` objects it
|
||||
receiver, name, and arity means when Painless sees a call on a `def` object it
|
||||
can dispatch the appropriate method without having to do expensive comparisons
|
||||
of the types of the parameters. The same is true for invocations with `def`
|
||||
typed parameters.
|
||||
|
|
|
@ -350,6 +350,15 @@ GET /_snapshot/my_backup/_all
|
|||
The command fails if some of the snapshots are unavailable. The boolean parameter `ignore_unavailable` can be used to
|
||||
return all snapshots that are currently available.
|
||||
|
||||
Getting all snapshots in the repository can be costly on cloud-based repositories,
|
||||
both from a cost and performance perspective. If the only information required is
|
||||
the snapshot names/uuids in the repository and the indices in each snapshot, then
|
||||
the optional boolean parameter `verbose` can be set to `false` to execute a more
|
||||
performant and cost-effective retrieval of the snapshots in the repository. Note
|
||||
that setting `verbose` to `false` will omit all other information about the snapshot
|
||||
such as status information, the number of snapshotted shards, etc. The default
|
||||
value of the `verbose` parameter is `true`.
|
||||
|
||||
A currently running snapshot can be retrieved using the following command:
|
||||
|
||||
[source,sh]
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
[[modules-tribe]]
|
||||
== Tribe node
|
||||
|
||||
deprecated[5.4.0,The `tribe` node is deprecated in favour of <<modules-cross-cluster-search>> and will be removed in Elasticsearch 7.0.]
|
||||
|
||||
The _tribes_ feature allows a _tribe node_ to act as a federated client across
|
||||
multiple clusters.
|
||||
|
||||
|
|
|
@ -36,13 +36,13 @@ import org.apache.lucene.analysis.miscellaneous.WordDelimiterGraphFilter;
|
|||
import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter;
|
||||
import org.apache.lucene.analysis.ngram.NGramTokenFilter;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilter;
|
||||
import org.apache.lucene.analysis.snowball.SnowballFilter;
|
||||
import org.apache.lucene.analysis.standard.ClassicFilter;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.HtmlStripCharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.PreConfiguredTokenFilter;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider;
|
||||
import org.elasticsearch.indices.analysis.PreBuiltCacheFactory.CachingStrategy;
|
||||
import org.elasticsearch.plugins.AnalysisPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
|
@ -73,41 +73,40 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
|||
|
||||
@Override
|
||||
public List<PreConfiguredTokenFilter> getPreConfiguredTokenFilters() {
|
||||
// TODO we should revisit the caching strategies.
|
||||
List<PreConfiguredTokenFilter> filters = new ArrayList<>();
|
||||
filters.add(new PreConfiguredTokenFilter("asciifolding", true, CachingStrategy.ONE, input -> new ASCIIFoldingFilter(input)));
|
||||
filters.add(new PreConfiguredTokenFilter("classic", false, CachingStrategy.ONE, ClassicFilter::new));
|
||||
filters.add(new PreConfiguredTokenFilter("common_grams", false, CachingStrategy.LUCENE, input ->
|
||||
new CommonGramsFilter(input, CharArraySet.EMPTY_SET)));
|
||||
filters.add(new PreConfiguredTokenFilter("edge_ngram", false, CachingStrategy.LUCENE, input ->
|
||||
filters.add(PreConfiguredTokenFilter.singleton("asciifolding", true, input -> new ASCIIFoldingFilter(input)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("classic", false, ClassicFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("common_grams", false,
|
||||
input -> new CommonGramsFilter(input, CharArraySet.EMPTY_SET)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, input ->
|
||||
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE)));
|
||||
// TODO deprecate edgeNGram
|
||||
filters.add(new PreConfiguredTokenFilter("edgeNGram", false, CachingStrategy.LUCENE, input ->
|
||||
filters.add(PreConfiguredTokenFilter.singleton("edgeNGram", false, input ->
|
||||
new EdgeNGramTokenFilter(input, EdgeNGramTokenFilter.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenFilter.DEFAULT_MAX_GRAM_SIZE)));
|
||||
filters.add(new PreConfiguredTokenFilter("kstem", false, CachingStrategy.ONE, KStemFilter::new));
|
||||
filters.add(new PreConfiguredTokenFilter("length", false, CachingStrategy.LUCENE, input ->
|
||||
filters.add(PreConfiguredTokenFilter.singleton("kstem", false, KStemFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("length", false, input ->
|
||||
new LengthFilter(input, 0, Integer.MAX_VALUE))); // TODO this one seems useless
|
||||
filters.add(new PreConfiguredTokenFilter("ngram", false, CachingStrategy.LUCENE, NGramTokenFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("ngram", false, NGramTokenFilter::new));
|
||||
// TODO deprecate nGram
|
||||
filters.add(new PreConfiguredTokenFilter("nGram", false, CachingStrategy.LUCENE, NGramTokenFilter::new));
|
||||
filters.add(new PreConfiguredTokenFilter("porter_stem", false, CachingStrategy.ONE, PorterStemFilter::new));
|
||||
filters.add(new PreConfiguredTokenFilter("reverse", false, CachingStrategy.LUCENE, input -> new ReverseStringFilter(input)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("nGram", false, NGramTokenFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("reverse", false, input -> new ReverseStringFilter(input)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("snowball", false, input -> new SnowballFilter(input, "English")));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("stemmer", false, PorterStemFilter::new));
|
||||
// The stop filter is in lucene-core but the English stop words set is in lucene-analyzers-common
|
||||
filters.add(new PreConfiguredTokenFilter("stop", false, CachingStrategy.LUCENE, input ->
|
||||
new StopFilter(input, StopAnalyzer.ENGLISH_STOP_WORDS_SET)));
|
||||
filters.add(new PreConfiguredTokenFilter("trim", false, CachingStrategy.LUCENE, TrimFilter::new));
|
||||
filters.add(new PreConfiguredTokenFilter("truncate", false, CachingStrategy.ONE, input ->
|
||||
new TruncateTokenFilter(input, 10)));
|
||||
filters.add(new PreConfiguredTokenFilter("unique", false, CachingStrategy.ONE, input -> new UniqueTokenFilter(input)));
|
||||
filters.add(new PreConfiguredTokenFilter("uppercase", true, CachingStrategy.LUCENE, UpperCaseFilter::new));
|
||||
filters.add(new PreConfiguredTokenFilter("word_delimiter", false, CachingStrategy.ONE, input ->
|
||||
filters.add(PreConfiguredTokenFilter.singleton("stop", false, input -> new StopFilter(input, StopAnalyzer.ENGLISH_STOP_WORDS_SET)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("trim", false, TrimFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("truncate", false, input -> new TruncateTokenFilter(input, 10)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("unique", false, input -> new UniqueTokenFilter(input)));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("uppercase", true, UpperCaseFilter::new));
|
||||
filters.add(PreConfiguredTokenFilter.singleton("word_delimiter", false, input ->
|
||||
new WordDelimiterFilter(input,
|
||||
WordDelimiterFilter.GENERATE_WORD_PARTS
|
||||
| WordDelimiterFilter.GENERATE_NUMBER_PARTS
|
||||
| WordDelimiterFilter.SPLIT_ON_CASE_CHANGE
|
||||
| WordDelimiterFilter.SPLIT_ON_NUMERICS
|
||||
| WordDelimiterFilter.STEM_ENGLISH_POSSESSIVE, null)));
|
||||
filters.add(new PreConfiguredTokenFilter("word_delimiter_graph", false, CachingStrategy.ONE, input ->
|
||||
filters.add(PreConfiguredTokenFilter.singleton("word_delimiter_graph", false, input ->
|
||||
new WordDelimiterGraphFilter(input,
|
||||
WordDelimiterGraphFilter.GENERATE_WORD_PARTS
|
||||
| WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS
|
||||
|
|
|
@ -19,7 +19,9 @@
|
|||
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.en.PorterStemFilterFactory;
|
||||
import org.apache.lucene.analysis.reverse.ReverseStringFilterFactory;
|
||||
import org.apache.lucene.analysis.snowball.SnowballPorterFilterFactory;
|
||||
import org.elasticsearch.index.analysis.HtmlStripCharFilterFactory;
|
||||
import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase;
|
||||
|
||||
|
@ -77,6 +79,8 @@ public class CommonAnalysisFactoryTests extends AnalysisFactoryTestCase {
|
|||
filters.put("nGram", null);
|
||||
filters.put("porter_stem", null);
|
||||
filters.put("reverse", ReverseStringFilterFactory.class);
|
||||
filters.put("snowball", SnowballPorterFilterFactory.class);
|
||||
filters.put("stemmer", PorterStemFilterFactory.class);
|
||||
filters.put("stop", null);
|
||||
filters.put("trim", null);
|
||||
filters.put("truncate", null);
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
*/
|
||||
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.Paths
|
||||
|
@ -68,6 +70,61 @@ task hdfsFixture(type: org.elasticsearch.gradle.test.AntFixture) {
|
|||
baseDir
|
||||
}
|
||||
|
||||
// MIT Kerberos Vagrant Testing Fixture
|
||||
String box = "krb5kdc"
|
||||
Map<String,String> vagrantEnvVars = [
|
||||
'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}",
|
||||
'VAGRANT_VAGRANTFILE' : 'Vagrantfile',
|
||||
'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}"
|
||||
]
|
||||
|
||||
task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'box'
|
||||
subcommand 'update'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
}
|
||||
|
||||
task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) {
|
||||
command 'up'
|
||||
args '--provision', '--provider', 'virtualbox'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn krb5kdcUpdate
|
||||
}
|
||||
|
||||
task krb5AddPrincipals {
|
||||
dependsOn krb5kdcFixture
|
||||
}
|
||||
|
||||
List<String> principals = [ "elasticsearch", "hdfs/hdfs.build.elastic.co" ]
|
||||
String realm = "BUILD.ELASTIC.CO"
|
||||
|
||||
for (String principal : principals) {
|
||||
Task create = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'ssh'
|
||||
args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal"
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn krb5kdcFixture
|
||||
}
|
||||
krb5AddPrincipals.dependsOn(create)
|
||||
}
|
||||
|
||||
task secureHdfsFixture(type: org.elasticsearch.gradle.test.AntFixture) {
|
||||
dependsOn project.configurations.hdfsFixture, krb5kdcFixture, krb5AddPrincipals
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }"
|
||||
|
||||
Path keytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab")
|
||||
Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf")
|
||||
|
||||
args "-Djava.security.krb5.conf=${krb5Config}", 'hdfs.MiniHDFS',
|
||||
baseDir,
|
||||
"hdfs/hdfs.build.elastic.co@${realm}",
|
||||
"${keytabPath}"
|
||||
}
|
||||
|
||||
boolean fixtureSupported = false;
|
||||
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
||||
// hdfs fixture will not start without hadoop native libraries on windows
|
||||
|
@ -89,12 +146,69 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) {
|
|||
|
||||
if (fixtureSupported) {
|
||||
integTestCluster.dependsOn hdfsFixture
|
||||
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository'
|
||||
} else {
|
||||
logger.warn("hdfsFixture unsupported, please set HADOOP_HOME and put HADOOP_HOME\\bin in PATH")
|
||||
// just tests that the plugin loads
|
||||
integTestRunner.systemProperty 'tests.rest.suite', 'hdfs_repository/10_basic'
|
||||
}
|
||||
|
||||
boolean secureFixtureSupported = false;
|
||||
if (fixtureSupported) {
|
||||
// Only do secure fixture support if the regular fixture is supported,
|
||||
// and if vagrant is installed. The ignoreExitValue on exec only matters
|
||||
// in cases where the command can be found and successfully started. In
|
||||
// situations where the vagrant command isn't able to be started at all
|
||||
// (it's not installed) then Gradle still throws ExecException.
|
||||
ByteArrayOutputStream pipe = new ByteArrayOutputStream()
|
||||
try {
|
||||
ExecResult runResult = exec {
|
||||
commandLine 'vagrant', '--version'
|
||||
standardOutput pipe
|
||||
ignoreExitValue true
|
||||
}
|
||||
String output = pipe.toString().trim()
|
||||
if (runResult.exitValue == 0) {
|
||||
secureFixtureSupported = (output ==~ /Vagrant 1\.(8\.[6-9]|9\.[0-9])+/)
|
||||
} else {
|
||||
logger.warn("Could not read installed vagrant version:\n" + output)
|
||||
}
|
||||
} catch (org.gradle.process.internal.ExecException e) {
|
||||
logger.warn("Could not find vagrant: " + e.message)
|
||||
// Swallow error. Vagrant isn't installed. Leave secure fixture support off.
|
||||
}
|
||||
}
|
||||
|
||||
// Create a Integration Test suite just for security based tests
|
||||
if (secureFixtureSupported) {
|
||||
// This must execute before the afterEvaluate block from integTestSecure
|
||||
project.afterEvaluate {
|
||||
Path elasticsearchKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("elasticsearch.keytab").toAbsolutePath()
|
||||
Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath()
|
||||
|
||||
project.integTestSecureCluster.dependsOn(project.bundlePlugin)
|
||||
project.integTestSecure.clusterConfig.plugin(project.path)
|
||||
project.integTestSecure.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}")
|
||||
project.integTestSecure.clusterConfig.jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Xmx" + System.getProperty('tests.heap.size', '512m') +
|
||||
" " + "-Djava.security.krb5.conf=${krb5conf}" +
|
||||
" " + System.getProperty('tests.jvm.argline', '')
|
||||
}
|
||||
|
||||
RestIntegTestTask integTestSecure = project.tasks.create('integTestSecure', RestIntegTestTask.class) {
|
||||
description = "Runs rest tests against an elasticsearch cluster with HDFS secured by MIT Kerberos."
|
||||
}
|
||||
|
||||
integTestSecure.mustRunAfter(project.integTest)
|
||||
project.check.dependsOn(integTestSecure)
|
||||
|
||||
// Fixture dependencies
|
||||
integTestSecureCluster.dependsOn secureHdfsFixture, krb5kdcFixture
|
||||
integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository'
|
||||
} else {
|
||||
logger.warn("secured hdfsFixture is unsupported, please install Vagrant 1.8.6+ to enable")
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// classes are missing, because we added hadoop jars one by one until tests pass.
|
||||
'com.google.gson.stream.JsonReader',
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Check plugin is installed
|
||||
#
|
||||
"Plugin loaded":
|
||||
- do:
|
||||
cluster.state: {}
|
||||
|
||||
# Get master node id
|
||||
- set: { master_node: master }
|
||||
|
||||
- do:
|
||||
nodes.info: {}
|
||||
|
||||
- match: { nodes.$master.plugins.0.name: repository-hdfs }
|
||||
---
|
||||
#
|
||||
# Check that we can't use file:// repositories or anything like that
|
||||
# We only test this plugin against hdfs://
|
||||
#
|
||||
"HDFS only":
|
||||
- do:
|
||||
catch: /Invalid scheme/
|
||||
snapshot.create_repository:
|
||||
repository: misconfigured_repository
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "file://bogus"
|
||||
path: "foo/bar"
|
|
@ -0,0 +1,29 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Tests creating a repository
|
||||
#
|
||||
"HDFS Repository Creation":
|
||||
# Create repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repository_create
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/test/repository_create"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: test_repository_create
|
||||
|
||||
- is_true: test_repository_create
|
||||
- match: {test_repository_create.settings.path : "/user/elasticsearch/test/repository_create"}
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: test_repository_create
|
|
@ -0,0 +1,54 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Tests creating a repository, then deleting it and creating it again.
|
||||
#
|
||||
"HDFS Delete Repository":
|
||||
# Create repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repo_hdfs_1
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/foo/bar"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: test_repo_hdfs_1
|
||||
|
||||
- is_true: test_repo_hdfs_1
|
||||
- match: {test_repo_hdfs_1.settings.path : "/user/elasticsearch/foo/bar"}
|
||||
|
||||
# Delete repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: test_repo_hdfs_1
|
||||
|
||||
# Get repository: It should be gone
|
||||
- do:
|
||||
catch: /repository_missing_exception/
|
||||
snapshot.get_repository:
|
||||
repository: test_repo_hdfs_1
|
||||
|
||||
# Create it again
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repo_hdfs_1
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/foo/bar"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Get repository again
|
||||
- do:
|
||||
snapshot.get_repository:
|
||||
repository: test_repo_hdfs_1
|
||||
|
||||
- is_true: test_repo_hdfs_1
|
|
@ -0,0 +1,25 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Tests explicit verify
|
||||
#
|
||||
"HDFS Repository Verify":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repository_verify
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/test/repository_verify"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Verify repository
|
||||
- do:
|
||||
snapshot.verify_repository:
|
||||
repository: test_repository_verify
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: test_repository_verify
|
|
@ -0,0 +1,50 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Actually perform a snapshot to hdfs
|
||||
#
|
||||
---
|
||||
"take snapshot":
|
||||
# Create repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_snapshot_repository
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/test/snapshot"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Create index
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
|
||||
# Create snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: test_snapshot_repository
|
||||
snapshot: test_snapshot
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: test_snapshot }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.successful: 1 }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
# Remove our snapshot
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: test_snapshot_repository
|
||||
snapshot: test_snapshot
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: test_snapshot_repository
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Tests retrieving information about snapshot
|
||||
#
|
||||
---
|
||||
"Get a snapshot":
|
||||
# Create repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_snapshot_get_repository
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/test/snapshot_get"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Create index
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
|
||||
# Wait for green
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
# Create snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: test_snapshot_get_repository
|
||||
snapshot: test_snapshot_get
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: test_snapshot_get }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.successful: 1 }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
|
||||
# Get snapshot info
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: test_snapshot_get_repository
|
||||
snapshot: test_snapshot_get
|
||||
|
||||
- length: { snapshots: 1 }
|
||||
- match: { snapshots.0.snapshot : test_snapshot_get }
|
||||
|
||||
# List snapshot info
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: test_snapshot_get_repository
|
||||
snapshot: "*"
|
||||
|
||||
- length: { snapshots: 1 }
|
||||
- match: { snapshots.0.snapshot : test_snapshot_get }
|
||||
|
||||
# Remove our snapshot
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: test_snapshot_get_repository
|
||||
snapshot: test_snapshot_get
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: test_snapshot_get_repository
|
|
@ -0,0 +1,81 @@
|
|||
# Integration tests for HDFS Repository plugin
|
||||
#
|
||||
# Actually perform a snapshot to hdfs, then restore it
|
||||
#
|
||||
---
|
||||
"Create a snapshot and then restore it":
|
||||
|
||||
# Create repository
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_restore_repository
|
||||
body:
|
||||
type: hdfs
|
||||
settings:
|
||||
uri: "hdfs://localhost:9998"
|
||||
path: "/user/elasticsearch/test/restore"
|
||||
security:
|
||||
principal: "elasticsearch@BUILD.ELASTIC.CO"
|
||||
|
||||
# Create index
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
|
||||
# Wait for green
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
# Take snapshot
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: test_restore_repository
|
||||
snapshot: test_restore
|
||||
wait_for_completion: true
|
||||
|
||||
- match: { snapshot.snapshot: test_restore }
|
||||
- match: { snapshot.state : SUCCESS }
|
||||
- match: { snapshot.shards.successful: 1 }
|
||||
- match: { snapshot.shards.failed : 0 }
|
||||
- is_true: snapshot.version
|
||||
- gt: { snapshot.version_id: 0}
|
||||
|
||||
# Close index
|
||||
- do:
|
||||
indices.close:
|
||||
index : test_index
|
||||
|
||||
# Restore index
|
||||
- do:
|
||||
snapshot.restore:
|
||||
repository: test_restore_repository
|
||||
snapshot: test_restore
|
||||
wait_for_completion: true
|
||||
|
||||
# Check recovery stats
|
||||
- do:
|
||||
indices.recovery:
|
||||
index: test_index
|
||||
|
||||
- match: { test_index.shards.0.type: SNAPSHOT }
|
||||
- match: { test_index.shards.0.stage: DONE }
|
||||
- match: { test_index.shards.0.index.files.recovered: 0}
|
||||
- match: { test_index.shards.0.index.size.recovered_in_bytes: 0}
|
||||
- match: { test_index.shards.0.index.files.reused: 1}
|
||||
- gt: { test_index.shards.0.index.size.reused_in_bytes: 0}
|
||||
|
||||
# Remove our snapshot
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: test_restore_repository
|
||||
snapshot: test_restore
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: test_restore_repository
|
|
@ -24,27 +24,20 @@ import java.util.Map;
|
|||
import java.util.function.Function;
|
||||
|
||||
import com.amazonaws.ClientConfiguration;
|
||||
import com.amazonaws.Protocol;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.auth.BasicAWSCredentials;
|
||||
import com.amazonaws.auth.InstanceProfileCredentialsProvider;
|
||||
import com.amazonaws.http.IdleConnectionReaper;
|
||||
import com.amazonaws.internal.StaticCredentialsProvider;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3Client;
|
||||
import com.amazonaws.services.s3.S3ClientOptions;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.getValue;
|
||||
|
||||
class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service {
|
||||
|
||||
|
@ -70,33 +63,17 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
|
||||
S3ClientSettings clientSettings = clientsSettings.get(clientName);
|
||||
if (clientSettings == null) {
|
||||
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. " +
|
||||
"Existing client configs: " +
|
||||
throw new IllegalArgumentException("Unknown s3 client name [" + clientName + "]. Existing client configs: " +
|
||||
Strings.collectionToDelimitedString(clientsSettings.keySet(), ","));
|
||||
}
|
||||
|
||||
// If the user defined a path style access setting, we rely on it,
|
||||
// otherwise we use the default value set by the SDK
|
||||
Boolean pathStyleAccess = null;
|
||||
if (S3Repository.Repository.PATH_STYLE_ACCESS_SETTING.exists(repositorySettings) ||
|
||||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING.exists(settings)) {
|
||||
pathStyleAccess = getValue(repositorySettings, settings,
|
||||
S3Repository.Repository.PATH_STYLE_ACCESS_SETTING,
|
||||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING);
|
||||
}
|
||||
|
||||
logger.debug("creating S3 client with client_name [{}], endpoint [{}], path_style_access [{}]",
|
||||
clientName, clientSettings.endpoint, pathStyleAccess);
|
||||
logger.debug("creating S3 client with client_name [{}], endpoint [{}]", clientName, clientSettings.endpoint);
|
||||
|
||||
AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings);
|
||||
ClientConfiguration configuration = buildConfiguration(clientSettings, repositorySettings);
|
||||
|
||||
client = new AmazonS3Client(credentials, configuration);
|
||||
|
||||
if (pathStyleAccess != null) {
|
||||
client.setS3ClientOptions(new S3ClientOptions().withPathStyleAccess(pathStyleAccess));
|
||||
}
|
||||
|
||||
if (Strings.hasText(clientSettings.endpoint)) {
|
||||
client.setEndpoint(clientSettings.endpoint);
|
||||
}
|
||||
|
@ -121,14 +98,8 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
clientConfiguration.setProxyPassword(clientSettings.proxyPassword);
|
||||
}
|
||||
|
||||
Integer maxRetries = getRepoValue(repositorySettings, S3Repository.Repository.MAX_RETRIES_SETTING, clientSettings.maxRetries);
|
||||
if (maxRetries != null) {
|
||||
// If not explicitly set, default to 3 with exponential backoff policy
|
||||
clientConfiguration.setMaxErrorRetry(maxRetries);
|
||||
}
|
||||
boolean useThrottleRetries = getRepoValue(repositorySettings,
|
||||
S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING, clientSettings.throttleRetries);
|
||||
clientConfiguration.setUseThrottleRetries(useThrottleRetries);
|
||||
clientConfiguration.setMaxErrorRetry(clientSettings.maxRetries);
|
||||
clientConfiguration.setUseThrottleRetries(clientSettings.throttleRetries);
|
||||
clientConfiguration.setSocketTimeout(clientSettings.readTimeoutMillis);
|
||||
|
||||
return clientConfiguration;
|
||||
|
@ -145,14 +116,6 @@ class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Se
|
|||
}
|
||||
}
|
||||
|
||||
/** Returns the value for a given setting from the repository, or returns the fallback value. */
|
||||
private static <T> T getRepoValue(Settings repositorySettings, Setting<T> repositorySetting, T fallback) {
|
||||
if (repositorySetting.exists(repositorySettings)) {
|
||||
return repositorySetting.get(repositorySettings);
|
||||
}
|
||||
return fallback;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() throws ElasticsearchException {
|
||||
}
|
||||
|
|
|
@ -81,11 +81,11 @@ class S3ClientSettings {
|
|||
|
||||
/** The number of retries to use when an s3 request fails. */
|
||||
static final Setting.AffixSetting<Integer> MAX_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "max_retries",
|
||||
key -> Setting.intSetting(key, S3Repository.Repositories.MAX_RETRIES_SETTING, 0, Property.NodeScope));
|
||||
key -> Setting.intSetting(key, ClientConfiguration.DEFAULT_RETRY_POLICY.getMaxErrorRetry(), 0, Property.NodeScope));
|
||||
|
||||
/** Whether retries should be throttled (ie use backoff). */
|
||||
static final Setting.AffixSetting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.affixKeySetting(PREFIX, "use_throttle_retries",
|
||||
key -> Setting.boolSetting(key, S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING, Property.NodeScope));
|
||||
key -> Setting.boolSetting(key, ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope));
|
||||
|
||||
/** Credentials to authenticate with s3. */
|
||||
final BasicAWSCredentials credentials;
|
||||
|
|
|
@ -51,29 +51,13 @@ import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
|||
*/
|
||||
class S3Repository extends BlobStoreRepository {
|
||||
|
||||
public static final String TYPE = "s3";
|
||||
|
||||
/**
|
||||
* Global S3 repositories settings. Starting with: repositories.s3
|
||||
* NOTE: These are legacy settings. Use the named client config settings above.
|
||||
*/
|
||||
public interface Repositories {
|
||||
/**
|
||||
* repositories.s3.bucket: The name of the bucket to be used for snapshots.
|
||||
*/
|
||||
Setting<String> BUCKET_SETTING = Setting.simpleString("repositories.s3.bucket", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.server_side_encryption: When set to true files are encrypted on server side using AES256 algorithm.
|
||||
* Defaults to false.
|
||||
*/
|
||||
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING =
|
||||
Setting.boolSetting("repositories.s3.server_side_encryption", false, Property.NodeScope, Property.Deprecated);
|
||||
static final String TYPE = "s3";
|
||||
|
||||
/**
|
||||
* Default is to use 100MB (S3 defaults) for heaps above 2GB and 5% of
|
||||
* the available memory for smaller heaps.
|
||||
*/
|
||||
ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
|
||||
private static final ByteSizeValue DEFAULT_BUFFER_SIZE = new ByteSizeValue(
|
||||
Math.max(
|
||||
ByteSizeUnit.MB.toBytes(5), // minimum value
|
||||
Math.min(
|
||||
|
@ -81,126 +65,52 @@ class S3Repository extends BlobStoreRepository {
|
|||
JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() / 20)),
|
||||
ByteSizeUnit.BYTES);
|
||||
|
||||
|
||||
static final Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");
|
||||
|
||||
/**
|
||||
* repositories.s3.buffer_size: Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
|
||||
* When set to true files are encrypted on server side using AES256 algorithm.
|
||||
* Defaults to false.
|
||||
*/
|
||||
static final Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false);
|
||||
|
||||
/**
|
||||
* Minimum threshold below which the chunk is uploaded using a single request. Beyond this threshold,
|
||||
* the S3 repository will use the AWS Multipart Upload API to split the chunk into several parts, each of buffer_size length, and
|
||||
* to upload each part in its own request. Note that setting a buffer size lower than 5mb is not allowed since it will prevents the
|
||||
* use of the Multipart API and may result in upload errors. Defaults to the minimum between 100MB and 5% of the heap size.
|
||||
*/
|
||||
Setting<ByteSizeValue> BUFFER_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("repositories.s3.buffer_size", DEFAULT_BUFFER_SIZE,
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated);
|
||||
static final Setting<ByteSizeValue> BUFFER_SIZE_SETTING = Setting.byteSizeSetting("buffer_size", DEFAULT_BUFFER_SIZE,
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
|
||||
/**
|
||||
* repositories.s3.max_retries: Number of retries in case of S3 errors. Defaults to 3.
|
||||
* Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
|
||||
*/
|
||||
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("repositories.s3.max_retries", 3, Property.NodeScope, Property.Deprecated);
|
||||
static final Setting<ByteSizeValue> CHUNK_SIZE_SETTING = Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
|
||||
/**
|
||||
* repositories.s3.use_throttle_retries: Set to `true` if you want to throttle retries. Defaults to AWS SDK default value (`false`).
|
||||
*/
|
||||
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("repositories.s3.use_throttle_retries",
|
||||
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.chunk_size: Big files can be broken down into chunks during snapshotting if needed. Defaults to 1g.
|
||||
*/
|
||||
Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("repositories.s3.chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB), Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.compress: When set to true metadata files are stored in compressed format. This setting doesn’t affect index
|
||||
* When set to true metadata files are stored in compressed format. This setting doesn’t affect index
|
||||
* files that are already compressed by default. Defaults to false.
|
||||
*/
|
||||
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("repositories.s3.compress", false, Property.NodeScope, Property.Deprecated);
|
||||
static final Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false);
|
||||
|
||||
/**
|
||||
* repositories.s3.storage_class: Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
|
||||
* Sets the S3 storage class type for the backup files. Values may be standard, reduced_redundancy,
|
||||
* standard_ia. Defaults to standard.
|
||||
*/
|
||||
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("repositories.s3.storage_class", Property.NodeScope, Property.Deprecated);
|
||||
static final Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class");
|
||||
|
||||
/**
|
||||
* repositories.s3.canned_acl: The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write,
|
||||
* The S3 repository supports all S3 canned ACLs : private, public-read, public-read-write,
|
||||
* authenticated-read, log-delivery-write, bucket-owner-read, bucket-owner-full-control. Defaults to private.
|
||||
*/
|
||||
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("repositories.s3.canned_acl", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.base_path: Specifies the path within bucket to repository data. Defaults to root directory.
|
||||
*/
|
||||
Setting<String> BASE_PATH_SETTING = Setting.simpleString("repositories.s3.base_path", Property.NodeScope, Property.Deprecated);
|
||||
/**
|
||||
* repositories.s3.path_style_access: When set to true configures the client to use path-style access for all requests.
|
||||
Amazon S3 supports virtual-hosted-style and path-style access in all Regions. The path-style syntax, however,
|
||||
requires that you use the region-specific endpoint when attempting to access a bucket.
|
||||
The default behaviour is to detect which access style to use based on the configured endpoint (an IP will result
|
||||
in path-style access) and the bucket being accessed (some buckets are not valid DNS names). Setting this flag
|
||||
will result in path-style access being used for all requests.
|
||||
*/
|
||||
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("repositories.s3.path_style_access", false,
|
||||
Property.NodeScope, Property.Deprecated);
|
||||
}
|
||||
static final Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");
|
||||
|
||||
/**
|
||||
* Per S3 repository specific settings. Same settings as Repositories settings but without the repositories.s3 prefix.
|
||||
* If undefined, they use the repositories.s3.xxx equivalent setting.
|
||||
* Specifies the path within bucket to repository data. Defaults to root directory.
|
||||
*/
|
||||
public interface Repository {
|
||||
|
||||
Setting<String> BUCKET_SETTING = Setting.simpleString("bucket");
|
||||
|
||||
/**
|
||||
* server_side_encryption
|
||||
* @see Repositories#SERVER_SIDE_ENCRYPTION_SETTING
|
||||
*/
|
||||
Setting<Boolean> SERVER_SIDE_ENCRYPTION_SETTING = Setting.boolSetting("server_side_encryption", false);
|
||||
|
||||
/**
|
||||
* buffer_size
|
||||
* @see Repositories#BUFFER_SIZE_SETTING
|
||||
*/
|
||||
Setting<ByteSizeValue> BUFFER_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("buffer_size", Repositories.DEFAULT_BUFFER_SIZE,
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
/**
|
||||
* max_retries
|
||||
* @see Repositories#MAX_RETRIES_SETTING
|
||||
*/
|
||||
Setting<Integer> MAX_RETRIES_SETTING = Setting.intSetting("max_retries", 3, Property.Deprecated);
|
||||
/**
|
||||
* use_throttle_retries
|
||||
* @see Repositories#USE_THROTTLE_RETRIES_SETTING
|
||||
*/
|
||||
Setting<Boolean> USE_THROTTLE_RETRIES_SETTING = Setting.boolSetting("use_throttle_retries",
|
||||
ClientConfiguration.DEFAULT_THROTTLE_RETRIES, Property.Deprecated);
|
||||
/**
|
||||
* chunk_size
|
||||
* @see Repositories#CHUNK_SIZE_SETTING
|
||||
*/
|
||||
Setting<ByteSizeValue> CHUNK_SIZE_SETTING =
|
||||
Setting.byteSizeSetting("chunk_size", new ByteSizeValue(1, ByteSizeUnit.GB),
|
||||
new ByteSizeValue(5, ByteSizeUnit.MB), new ByteSizeValue(5, ByteSizeUnit.TB));
|
||||
/**
|
||||
* compress
|
||||
* @see Repositories#COMPRESS_SETTING
|
||||
*/
|
||||
Setting<Boolean> COMPRESS_SETTING = Setting.boolSetting("compress", false);
|
||||
/**
|
||||
* storage_class
|
||||
* @see Repositories#STORAGE_CLASS_SETTING
|
||||
*/
|
||||
Setting<String> STORAGE_CLASS_SETTING = Setting.simpleString("storage_class");
|
||||
/**
|
||||
* canned_acl
|
||||
* @see Repositories#CANNED_ACL_SETTING
|
||||
*/
|
||||
Setting<String> CANNED_ACL_SETTING = Setting.simpleString("canned_acl");
|
||||
/**
|
||||
* base_path
|
||||
* @see Repositories#BASE_PATH_SETTING
|
||||
*/
|
||||
Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
|
||||
/**
|
||||
* path_style_access
|
||||
* @see Repositories#PATH_STYLE_ACCESS_SETTING
|
||||
*/
|
||||
Setting<Boolean> PATH_STYLE_ACCESS_SETTING = Setting.boolSetting("path_style_access", false, Property.Deprecated);
|
||||
}
|
||||
static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("base_path");
|
||||
|
||||
private final S3BlobStore blobStore;
|
||||
|
||||
|
@ -217,25 +127,25 @@ class S3Repository extends BlobStoreRepository {
|
|||
NamedXContentRegistry namedXContentRegistry, AwsS3Service s3Service) throws IOException {
|
||||
super(metadata, settings, namedXContentRegistry);
|
||||
|
||||
String bucket = getValue(metadata.settings(), settings, Repository.BUCKET_SETTING, Repositories.BUCKET_SETTING);
|
||||
String bucket = BUCKET_SETTING.get(metadata.settings());
|
||||
if (bucket == null) {
|
||||
throw new RepositoryException(metadata.name(), "No bucket defined for s3 gateway");
|
||||
}
|
||||
|
||||
boolean serverSideEncryption = getValue(metadata.settings(), settings, Repository.SERVER_SIDE_ENCRYPTION_SETTING, Repositories.SERVER_SIDE_ENCRYPTION_SETTING);
|
||||
ByteSizeValue bufferSize = getValue(metadata.settings(), settings, Repository.BUFFER_SIZE_SETTING, Repositories.BUFFER_SIZE_SETTING);
|
||||
this.chunkSize = getValue(metadata.settings(), settings, Repository.CHUNK_SIZE_SETTING, Repositories.CHUNK_SIZE_SETTING);
|
||||
this.compress = getValue(metadata.settings(), settings, Repository.COMPRESS_SETTING, Repositories.COMPRESS_SETTING);
|
||||
boolean serverSideEncryption = SERVER_SIDE_ENCRYPTION_SETTING.get(metadata.settings());
|
||||
ByteSizeValue bufferSize = BUFFER_SIZE_SETTING.get(metadata.settings());
|
||||
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
|
||||
this.compress = COMPRESS_SETTING.get(metadata.settings());
|
||||
|
||||
// We make sure that chunkSize is bigger or equal than/to bufferSize
|
||||
if (this.chunkSize.getBytes() < bufferSize.getBytes()) {
|
||||
throw new RepositoryException(metadata.name(), Repository.CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize +
|
||||
") can't be lower than " + Repository.BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
|
||||
throw new RepositoryException(metadata.name(), CHUNK_SIZE_SETTING.getKey() + " (" + this.chunkSize +
|
||||
") can't be lower than " + BUFFER_SIZE_SETTING.getKey() + " (" + bufferSize + ").");
|
||||
}
|
||||
|
||||
// Parse and validate the user's S3 Storage Class setting
|
||||
String storageClass = getValue(metadata.settings(), settings, Repository.STORAGE_CLASS_SETTING, Repositories.STORAGE_CLASS_SETTING);
|
||||
String cannedACL = getValue(metadata.settings(), settings, Repository.CANNED_ACL_SETTING, Repositories.CANNED_ACL_SETTING);
|
||||
String storageClass = STORAGE_CLASS_SETTING.get(metadata.settings());
|
||||
String cannedACL = CANNED_ACL_SETTING.get(metadata.settings());
|
||||
|
||||
logger.debug("using bucket [{}], chunk_size [{}], server_side_encryption [{}], " +
|
||||
"buffer_size [{}], cannedACL [{}], storageClass [{}]",
|
||||
|
@ -244,13 +154,8 @@ class S3Repository extends BlobStoreRepository {
|
|||
AmazonS3 client = s3Service.client(metadata.settings());
|
||||
blobStore = new S3BlobStore(settings, client, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass);
|
||||
|
||||
String basePath = getValue(metadata.settings(), settings, Repository.BASE_PATH_SETTING, Repositories.BASE_PATH_SETTING);
|
||||
String basePath = BASE_PATH_SETTING.get(metadata.settings());
|
||||
if (Strings.hasLength(basePath)) {
|
||||
if (basePath.startsWith("/")) {
|
||||
basePath = basePath.substring(1);
|
||||
deprecationLogger.deprecated("S3 repository base_path trimming the leading `/`, and " +
|
||||
"leading `/` will not be supported for the S3 repository in future releases");
|
||||
}
|
||||
this.basePath = new BlobPath().add(basePath);
|
||||
} else {
|
||||
this.basePath = BlobPath.cleanPath();
|
||||
|
@ -276,15 +181,4 @@ class S3Repository extends BlobStoreRepository {
|
|||
protected ByteSizeValue chunkSize() {
|
||||
return chunkSize;
|
||||
}
|
||||
|
||||
public static <T> T getValue(Settings repositorySettings,
|
||||
Settings globalSettings,
|
||||
Setting<T> repositorySetting,
|
||||
Setting<T> repositoriesSetting) {
|
||||
if (repositorySetting.exists(repositorySettings)) {
|
||||
return repositorySetting.get(repositorySettings);
|
||||
} else {
|
||||
return repositoriesSetting.get(globalSettings);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(
|
||||
|
||||
// named s3 client configuration settings
|
||||
S3ClientSettings.ACCESS_KEY_SETTING,
|
||||
S3ClientSettings.SECRET_KEY_SETTING,
|
||||
|
@ -91,19 +90,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin {
|
|||
S3ClientSettings.PROXY_PASSWORD_SETTING,
|
||||
S3ClientSettings.READ_TIMEOUT_SETTING,
|
||||
S3ClientSettings.MAX_RETRIES_SETTING,
|
||||
S3ClientSettings.USE_THROTTLE_RETRIES_SETTING,
|
||||
|
||||
// Register S3 repositories settings: repositories.s3
|
||||
S3Repository.Repositories.BUCKET_SETTING,
|
||||
S3Repository.Repositories.SERVER_SIDE_ENCRYPTION_SETTING,
|
||||
S3Repository.Repositories.BUFFER_SIZE_SETTING,
|
||||
S3Repository.Repositories.MAX_RETRIES_SETTING,
|
||||
S3Repository.Repositories.CHUNK_SIZE_SETTING,
|
||||
S3Repository.Repositories.COMPRESS_SETTING,
|
||||
S3Repository.Repositories.STORAGE_CLASS_SETTING,
|
||||
S3Repository.Repositories.CANNED_ACL_SETTING,
|
||||
S3Repository.Repositories.BASE_PATH_SETTING,
|
||||
S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING,
|
||||
S3Repository.Repositories.PATH_STYLE_ACCESS_SETTING);
|
||||
S3ClientSettings.USE_THROTTLE_RETRIES_SETTING);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,43 +52,29 @@ import static org.hamcrest.Matchers.notNullValue;
|
|||
@ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, transportClientRatio = 0.0)
|
||||
public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase {
|
||||
|
||||
@Override
|
||||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
// nodeSettings is called before `wipeBefore()` so we need to define basePath here
|
||||
globalBasePath = "repo-" + randomInt();
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put(S3Repository.Repositories.BASE_PATH_SETTING.getKey(), globalBasePath)
|
||||
.build();
|
||||
}
|
||||
|
||||
private String basePath;
|
||||
private String globalBasePath;
|
||||
|
||||
@Before
|
||||
public final void wipeBefore() {
|
||||
wipeRepositories();
|
||||
basePath = "repo-" + randomInt();
|
||||
cleanRepositoryFiles(basePath);
|
||||
cleanRepositoryFiles(globalBasePath);
|
||||
}
|
||||
|
||||
@After
|
||||
public final void wipeAfter() {
|
||||
wipeRepositories();
|
||||
cleanRepositoryFiles(basePath);
|
||||
cleanRepositoryFiles(globalBasePath);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
|
||||
public void testSimpleWorkflow() {
|
||||
Client client = client();
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000));
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000));
|
||||
|
||||
// We sometime test getting the base_path from node settings using repositories.s3.base_path
|
||||
if (usually()) {
|
||||
settings.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath);
|
||||
}
|
||||
settings.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath);
|
||||
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
|
@ -163,10 +149,9 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000))
|
||||
.put(S3Repository.Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true)
|
||||
.put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), randomBoolean())
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000))
|
||||
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
|
@ -257,8 +242,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
try {
|
||||
client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
fail("repository verification should have raise an exception!");
|
||||
} catch (RepositoryVerificationException e) {
|
||||
|
@ -269,7 +254,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
Client client = client();
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -282,8 +267,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -297,8 +282,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repostoriy with endpoint [{}], bucket[{}] and path [{}]", bucketSettings.get("endpoint"), bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
assertRepositoryIsOperational(client, "test-repo");
|
||||
|
@ -315,8 +300,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
try {
|
||||
client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
// Below setting intentionally omitted to assert bucket is not available in default region.
|
||||
// .put("region", privateBucketSettings.get("region"))
|
||||
).get();
|
||||
|
@ -333,8 +318,8 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -349,7 +334,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -370,7 +355,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
logger.info("--> creating s3 repository without any path");
|
||||
PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
|
@ -460,8 +445,7 @@ public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase
|
|||
// We check that settings has been set in elasticsearch.yml integration test file
|
||||
// as described in README
|
||||
assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue());
|
||||
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(
|
||||
Settings.builder().put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), randomBoolean()).build());
|
||||
AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(Settings.EMPTY);
|
||||
try {
|
||||
ObjectListing prevListing = null;
|
||||
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
|
||||
|
|
|
@ -87,17 +87,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
"aws_proxy_password", 3, false, 10000);
|
||||
}
|
||||
|
||||
public void testGlobalMaxRetriesBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null,
|
||||
null, 10, false, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.MAX_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testRepositoryMaxRetries() {
|
||||
Settings settings = Settings.builder()
|
||||
.put("s3.client.default.max_retries", 5)
|
||||
|
@ -106,31 +95,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
null, 5, false, 50000);
|
||||
}
|
||||
|
||||
public void testRepositoryMaxRetriesBackcompat() {
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.Repository.MAX_RETRIES_SETTING.getKey(), 20).build();
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.MAX_RETRIES_SETTING.getKey(), 10)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, repositorySettings, Protocol.HTTPS, null, -1, null,
|
||||
null, 20, false, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.MAX_RETRIES_SETTING,
|
||||
S3Repository.Repository.MAX_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testGlobalThrottleRetriesBackcompat() {
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING.getKey(), true)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, Settings.EMPTY, Protocol.HTTPS, null, -1, null,
|
||||
null, 3, true, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
public void testRepositoryThrottleRetries() {
|
||||
Settings settings = Settings.builder()
|
||||
.put("s3.client.default.use_throttle_retries", true)
|
||||
|
@ -139,20 +103,6 @@ public class AwsS3ServiceImplTests extends ESTestCase {
|
|||
null, 3, true, 50000);
|
||||
}
|
||||
|
||||
public void testRepositoryThrottleRetriesBackcompat() {
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING.getKey(), true).build();
|
||||
Settings settings = Settings.builder()
|
||||
.put(S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING.getKey(), false)
|
||||
.build();
|
||||
launchAWSConfigurationTest(settings, repositorySettings, Protocol.HTTPS, null, -1, null,
|
||||
null, 3, true, 50000);
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[]{
|
||||
S3Repository.Repositories.USE_THROTTLE_RETRIES_SETTING,
|
||||
S3Repository.Repository.USE_THROTTLE_RETRIES_SETTING
|
||||
});
|
||||
}
|
||||
|
||||
private void launchAWSConfigurationTest(Settings settings,
|
||||
Settings singleRepositorySettings,
|
||||
Protocol expectedProtocol,
|
||||
|
|
|
@ -19,12 +19,12 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import com.amazonaws.services.s3.AbstractAmazonS3;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.settings.SecureString;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
@ -33,11 +33,6 @@ import org.elasticsearch.repositories.RepositoryException;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.Repositories;
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.Repository;
|
||||
import static org.elasticsearch.repositories.s3.S3Repository.getValue;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class S3RepositoryTests extends ESTestCase {
|
||||
|
@ -82,15 +77,15 @@ public class S3RepositoryTests extends ESTestCase {
|
|||
|
||||
private void assertValidBuffer(long bufferMB, long chunkMB) throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
}
|
||||
|
||||
private void assertInvalidBuffer(int bufferMB, int chunkMB, Class<? extends Exception> clazz, String msg) throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
.put(S3Repository.BUFFER_SIZE_SETTING.getKey(), new ByteSizeValue(bufferMB, ByteSizeUnit.MB))
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), new ByteSizeValue(chunkMB, ByteSizeUnit.MB)).build());
|
||||
|
||||
Exception e = expectThrows(clazz, () -> new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY,
|
||||
new DummyS3Service()));
|
||||
|
@ -99,26 +94,14 @@ public class S3RepositoryTests extends ESTestCase {
|
|||
|
||||
public void testBasePathSetting() throws IOException {
|
||||
RepositoryMetaData metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.builder()
|
||||
.put(Repository.BASE_PATH_SETTING.getKey(), "/foo/bar").build());
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), "foo/bar").build());
|
||||
S3Repository s3repo = new S3Repository(metadata, Settings.EMPTY, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added
|
||||
assertWarnings("S3 repository base_path" +
|
||||
" trimming the leading `/`, and leading `/` will not be supported for the S3 repository in future releases");
|
||||
metadata = new RepositoryMetaData("dummy-repo", "mock", Settings.EMPTY);
|
||||
Settings settings = Settings.builder().put(Repositories.BASE_PATH_SETTING.getKey(), "/foo/bar").build();
|
||||
s3repo = new S3Repository(metadata, settings, NamedXContentRegistry.EMPTY, new DummyS3Service());
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString()); // make sure leading `/` is removed and trailing is added
|
||||
assertSettingDeprecationsAndWarnings(new Setting<?>[] { Repositories.BASE_PATH_SETTING },
|
||||
"S3 repository base_path" +
|
||||
" trimming the leading `/`, and leading `/` will not be supported for the S3 repository in future releases");
|
||||
assertEquals("foo/bar/", s3repo.basePath().buildAsString());
|
||||
}
|
||||
|
||||
public void testDefaultBufferSize() {
|
||||
ByteSizeValue defaultBufferSize = S3Repository.Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
ByteSizeValue defaultBufferSize = S3Repository.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
assertThat(defaultBufferSize, Matchers.lessThanOrEqualTo(new ByteSizeValue(100, ByteSizeUnit.MB)));
|
||||
assertThat(defaultBufferSize, Matchers.greaterThanOrEqualTo(new ByteSizeValue(5, ByteSizeUnit.MB)));
|
||||
|
||||
ByteSizeValue defaultNodeBufferSize = S3Repository.Repositories.BUFFER_SIZE_SETTING.get(Settings.EMPTY);
|
||||
assertEquals(defaultBufferSize, defaultNodeBufferSize);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,10 @@
|
|||
"ignore_unavailable": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to ignore unavailable snapshots, defaults to false which means a SnapshotMissingException is thrown"
|
||||
},
|
||||
"verbose": {
|
||||
"type": "boolean",
|
||||
"description": "Whether to show verbose snapshot info or only show the basic info found in the repository index blob"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
|
|
@ -33,6 +33,11 @@ setup:
|
|||
|
||||
- is_true: snapshots
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: test_repo_get_1
|
||||
snapshot: test_snapshot
|
||||
|
||||
---
|
||||
"Get missing snapshot info throws an exception":
|
||||
|
||||
|
@ -52,3 +57,39 @@ setup:
|
|||
ignore_unavailable: true
|
||||
|
||||
- is_true: snapshots
|
||||
|
||||
---
|
||||
"Get snapshot info when verbose is false":
|
||||
- skip:
|
||||
version: " - 5.99.99"
|
||||
reason: verbose mode was introduced in 6.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
snapshot.create:
|
||||
repository: test_repo_get_1
|
||||
snapshot: test_snapshot
|
||||
wait_for_completion: true
|
||||
|
||||
- do:
|
||||
snapshot.get:
|
||||
repository: test_repo_get_1
|
||||
snapshot: test_snapshot
|
||||
verbose: false
|
||||
|
||||
- is_true: snapshots
|
||||
- match: { snapshots.0.snapshot: test_snapshot }
|
||||
- match: { snapshots.0.state: SUCCESS }
|
||||
- is_false: snapshots.0.version
|
||||
|
||||
- do:
|
||||
snapshot.delete:
|
||||
repository: test_repo_get_1
|
||||
snapshot: test_snapshot
|
||||
|
|
|
@ -25,6 +25,7 @@ List projects = [
|
|||
'test:framework',
|
||||
'test:fixtures:example-fixture',
|
||||
'test:fixtures:hdfs-fixture',
|
||||
'test:fixtures:krb5kdc-fixture',
|
||||
'test:logger-usage',
|
||||
'modules:aggs-matrix-stats',
|
||||
'modules:analysis-common',
|
||||
|
|
|
@ -19,19 +19,24 @@
|
|||
|
||||
package hdfs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.Locale;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclEntryType;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
||||
/**
|
||||
* MiniHDFS test fixture. There is a CLI tool, but here we can
|
||||
|
@ -43,9 +48,11 @@ public class MiniHDFS {
|
|||
private static String PID_FILE_NAME = "pid";
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
if (args.length != 1) {
|
||||
throw new IllegalArgumentException("MiniHDFS <baseDirectory>");
|
||||
if (args.length != 1 && args.length != 3) {
|
||||
throw new IllegalArgumentException("MiniHDFS <baseDirectory> [<kerberosPrincipal> <kerberosKeytab>]");
|
||||
}
|
||||
boolean secure = args.length == 3;
|
||||
|
||||
// configure Paths
|
||||
Path baseDir = Paths.get(args[0]);
|
||||
// hadoop-home/, so logs will not complain
|
||||
|
@ -57,13 +64,49 @@ public class MiniHDFS {
|
|||
// hdfs-data/, where any data is going
|
||||
Path hdfsHome = baseDir.resolve("hdfs-data");
|
||||
|
||||
// start cluster
|
||||
// configure cluster
|
||||
Configuration cfg = new Configuration();
|
||||
cfg.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsHome.toAbsolutePath().toString());
|
||||
// lower default permission: TODO: needed?
|
||||
cfg.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY, "766");
|
||||
|
||||
// optionally configure security
|
||||
if (secure) {
|
||||
String kerberosPrincipal = args[1];
|
||||
String keytabFile = args[2];
|
||||
|
||||
cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
cfg.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, "true");
|
||||
cfg.set(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
|
||||
cfg.set(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
|
||||
cfg.set(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, kerberosPrincipal);
|
||||
cfg.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, keytabFile);
|
||||
cfg.set(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY, keytabFile);
|
||||
cfg.set(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, "true");
|
||||
cfg.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");
|
||||
}
|
||||
|
||||
UserGroupInformation.setConfiguration(cfg);
|
||||
|
||||
// TODO: remove hardcoded port!
|
||||
MiniDFSCluster dfs = new MiniDFSCluster.Builder(cfg).nameNodePort(9999).build();
|
||||
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg);
|
||||
if (secure) {
|
||||
builder.nameNodePort(9998);
|
||||
} else {
|
||||
builder.nameNodePort(9999);
|
||||
}
|
||||
MiniDFSCluster dfs = builder.build();
|
||||
|
||||
// Set the elasticsearch user directory up
|
||||
if (UserGroupInformation.isSecurityEnabled()) {
|
||||
FileSystem fs = dfs.getFileSystem();
|
||||
org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch");
|
||||
fs.mkdirs(esUserPath);
|
||||
List<AclEntry> acls = new ArrayList<>();
|
||||
acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build());
|
||||
fs.modifyAclEntries(esUserPath, acls);
|
||||
fs.close();
|
||||
}
|
||||
|
||||
// write our PID file
|
||||
Path tmp = Files.createTempFile(baseDir, null, null);
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This Vagrantfile exists to define a virtual machine running MIT's Kerberos 5
|
||||
# for usage as a testing fixture for the build process.
|
||||
#
|
||||
# In order to connect to the KDC process on this virtual machine, find and use
|
||||
# the rendered krb5.conf file in the build output directory (build/conf).
|
||||
#
|
||||
# In order to provision principals in the KDC, use the provided addprinc.sh
|
||||
# script with vagrant's ssh facility:
|
||||
#
|
||||
# vagrant ssh -c /vagrant/src/main/resources/provision/addprinc.sh principal
|
||||
#
|
||||
# You will find the newly created principal's keytab file in the build output
|
||||
# directory (build/keytabs). Principal creation is idempotent, and will recopy
|
||||
# existing user keytabs from the KDC if they already exist.
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.vm.define "krb5kdc" do |config|
|
||||
config.vm.box = "elastic/ubuntu-14.04-x86_64"
|
||||
end
|
||||
|
||||
config.vm.hostname = "kerberos.build.elastic.co"
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
config.cache.scope = :box
|
||||
end
|
||||
|
||||
config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "tcp"
|
||||
config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "udp"
|
||||
|
||||
config.vm.provision "shell", path: "src/main/resources/provision/installkdc.sh"
|
||||
|
||||
end
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
Map<String, String> vagrantEnvVars = [
|
||||
'VAGRANT_CWD' : "${project.projectDir.absolutePath}",
|
||||
'VAGRANT_VAGRANTFILE' : 'Vagrantfile',
|
||||
'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}"
|
||||
]
|
||||
|
||||
String box = "krb5kdc"
|
||||
|
||||
List<String> defaultPrincipals = [ "elasticsearch" ]
|
||||
|
||||
task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'box'
|
||||
subcommand 'update'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
}
|
||||
|
||||
task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'up'
|
||||
args '--provision', '--provider', 'virtualbox'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn update
|
||||
}
|
||||
|
||||
task addDefaultPrincipals {
|
||||
dependsOn up
|
||||
}
|
||||
|
||||
for (String principal : defaultPrincipals) {
|
||||
Task addTask = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'ssh'
|
||||
args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal"
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn up
|
||||
}
|
||||
addDefaultPrincipals.dependsOn(addTask)
|
||||
}
|
||||
|
||||
task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'halt'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
}
|
||||
|
||||
task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) {
|
||||
command 'destroy'
|
||||
args '-f'
|
||||
boxName box
|
||||
environmentVars vagrantEnvVars
|
||||
dependsOn halt
|
||||
}
|
||||
|
||||
thirdPartyAudit.enabled = false
|
||||
licenseHeaders.enabled = false
|
||||
test.enabled = false
|
||||
|
||||
// installKDC uses tabs in it for the Kerberos ACL file.
|
||||
// Ignore it for pattern checking.
|
||||
forbiddenPatterns {
|
||||
exclude "**/installkdc.sh"
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
realm=BUILD.ELASTIC.CO
|
||||
kdc=kerberos.build.elastic.co
|
||||
zone=build.elastic.co
|
|
@ -0,0 +1,58 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -e
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo 'Usage: addprinc.sh <principalNameNoRealm>'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PRINC="$1"
|
||||
USER=$(echo $PRINC | tr "/" "_")
|
||||
|
||||
VDIR=/vagrant
|
||||
RESOURCES=$VDIR/src/main/resources
|
||||
PROV_DIR=$RESOURCES/provision
|
||||
ENVPROP_FILE=$RESOURCES/env.properties
|
||||
BUILD_DIR=$VDIR/build
|
||||
CONF_DIR=$BUILD_DIR/conf
|
||||
KEYTAB_DIR=$BUILD_DIR/keytabs
|
||||
LOCALSTATEDIR=/etc
|
||||
LOGDIR=/var/log/krb5
|
||||
|
||||
mkdir -p $KEYTAB_DIR
|
||||
|
||||
REALM=$(cat $ENVPROP_FILE | grep realm= | head -n 1 | cut -d '=' -f 2)
|
||||
|
||||
ADMIN_PRIN=admin/admin@$REALM
|
||||
ADMIN_KTAB=$LOCALSTATEDIR/admin.keytab
|
||||
|
||||
USER_PRIN=$PRINC@$REALM
|
||||
USER_KTAB=$LOCALSTATEDIR/$USER.keytab
|
||||
|
||||
if [ -f $USER_KTAB ]; then
|
||||
echo "Principal '${PRINC}@${REALM}' already exists. Re-copying keytab..."
|
||||
else
|
||||
echo "Provisioning '${PRINC}@${REALM}' principal and keytab..."
|
||||
sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -randkey $USER_PRIN"
|
||||
sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "ktadd -k $USER_KTAB $USER_PRIN"
|
||||
fi
|
||||
|
||||
sudo cp $USER_KTAB $KEYTAB_DIR/$USER.keytab
|
120
test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh
vendored
Executable file
120
test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh
vendored
Executable file
|
@ -0,0 +1,120 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -e
|
||||
|
||||
# KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html
|
||||
# and helpful input from https://help.ubuntu.com/community/Kerberos
|
||||
|
||||
VDIR=/vagrant
|
||||
RESOURCES=$VDIR/src/main/resources
|
||||
PROV_DIR=$RESOURCES/provision
|
||||
ENVPROP_FILE=$RESOURCES/env.properties
|
||||
BUILD_DIR=$VDIR/build
|
||||
CONF_DIR=$BUILD_DIR/conf
|
||||
KEYTAB_DIR=$BUILD_DIR/keytabs
|
||||
LOCALSTATEDIR=/etc
|
||||
LOGDIR=/var/log/krb5
|
||||
|
||||
MARKER_FILE=/etc/marker
|
||||
|
||||
# Output location for our rendered configuration files and keytabs
|
||||
mkdir -p $BUILD_DIR
|
||||
rm -rf $BUILD_DIR/*
|
||||
mkdir -p $CONF_DIR
|
||||
mkdir -p $KEYTAB_DIR
|
||||
|
||||
if [ -f $MARKER_FILE ]; then
|
||||
echo "Already provisioned..."
|
||||
echo "Recopying configuration files..."
|
||||
cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf
|
||||
cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf
|
||||
exit 0;
|
||||
fi
|
||||
|
||||
# Pull environment information
|
||||
REALM_NAME=$(cat $ENVPROP_FILE | grep realm= | cut -d '=' -f 2)
|
||||
KDC_NAME=$(cat $ENVPROP_FILE | grep kdc= | cut -d '=' -f 2)
|
||||
BUILD_ZONE=$(cat $ENVPROP_FILE | grep zone= | cut -d '=' -f 2)
|
||||
ELASTIC_ZONE=$(echo $BUILD_ZONE | cut -d '.' -f 1,2)
|
||||
|
||||
# Transfer and interpolate krb5.conf
|
||||
cp $PROV_DIR/krb5.conf.template $LOCALSTATEDIR/krb5.conf
|
||||
sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf
|
||||
sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf
|
||||
sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf
|
||||
sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf
|
||||
cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf
|
||||
|
||||
# Transfer and interpolate the kdc.conf
|
||||
mkdir -p $LOCALSTATEDIR/krb5kdc
|
||||
cp $PROV_DIR/kdc.conf.template $LOCALSTATEDIR/krb5kdc/kdc.conf
|
||||
sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf
|
||||
sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf
|
||||
sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf
|
||||
sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf
|
||||
cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf
|
||||
|
||||
# Touch logging locations
|
||||
mkdir -p $LOGDIR
|
||||
touch $LOGDIR/kadmin.log
|
||||
touch $LOGDIR/krb5kdc.log
|
||||
touch $LOGDIR/krb5lib.log
|
||||
|
||||
# Update package manager
|
||||
apt-get update -qqy
|
||||
|
||||
# Installation asks a bunch of questions via debconf. Set the answers ahead of time
|
||||
debconf-set-selections <<< "krb5-config krb5-config/read_conf boolean true"
|
||||
debconf-set-selections <<< "krb5-config krb5-config/kerberos_servers string $KDC_NAME"
|
||||
debconf-set-selections <<< "krb5-config krb5-config/add_servers boolean true"
|
||||
debconf-set-selections <<< "krb5-config krb5-config/admin_server string $KDC_NAME"
|
||||
debconf-set-selections <<< "krb5-config krb5-config/add_servers_realm string $REALM_NAME"
|
||||
debconf-set-selections <<< "krb5-config krb5-config/default_realm string $REALM_NAME"
|
||||
debconf-set-selections <<< "krb5-admin-server krb5-admin-server/kadmind boolean true"
|
||||
debconf-set-selections <<< "krb5-admin-server krb5-admin-server/newrealm note"
|
||||
debconf-set-selections <<< "krb5-kdc krb5-kdc/debconf boolean true"
|
||||
debconf-set-selections <<< "krb5-kdc krb5-kdc/purge_data_too boolean false"
|
||||
|
||||
# Install krb5 packages
|
||||
apt-get install -qqy krb5-{admin-server,kdc}
|
||||
|
||||
# /dev/random produces output very slowly on Ubuntu VM's. Install haveged to increase entropy.
|
||||
apt-get install -qqy haveged
|
||||
haveged
|
||||
|
||||
# Create kerberos database with stash file and garbage password
|
||||
kdb5_util create -s -r $REALM_NAME -P zyxwvutsrpqonmlk9876
|
||||
|
||||
# Set up admin acls
|
||||
cat << EOF > /etc/krb5kdc/kadm5.acl
|
||||
*/admin@$REALM_NAME *
|
||||
*/*@$REALM_NAME i
|
||||
EOF
|
||||
|
||||
# Create admin principal
|
||||
kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME"
|
||||
kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME"
|
||||
|
||||
# Start Kerberos Services
|
||||
krb5kdc
|
||||
kadmind
|
||||
|
||||
# Mark that the vm is already provisioned
|
||||
touch $MARKER_FILE
|
35
test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template
vendored
Normal file
35
test/fixtures/krb5kdc-fixture/src/main/resources/provision/kdc.conf.template
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
[kdcdefaults]
|
||||
kdc_listen = 88
|
||||
kdc_tcp_listen = 88
|
||||
|
||||
[realms]
|
||||
${REALM_NAME} = {
|
||||
kadmind_port = 749
|
||||
max_life = 12h 0m 0s
|
||||
max_renewable_life = 7d 0h 0m 0s
|
||||
master_key_type = aes256-cts
|
||||
# remove aes256-cts:normal since unlimited strength policy needs installed for java to use it.
|
||||
supported_enctypes = aes128-cts:normal des3-hmac-sha1:normal arcfour-hmac:normal des-hmac-sha1:normal des-cbc-md5:normal des-cbc-crc:normal
|
||||
}
|
||||
|
||||
[logging]
|
||||
kdc = FILE:/var/log/krb5kdc.log
|
||||
admin_server = FILE:/var/log/kadmin.log
|
||||
default = FILE:/var/log/krb5lib.log
|
50
test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template
vendored
Normal file
50
test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
# Licensed to Elasticsearch under one or more contributor
|
||||
# license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright
|
||||
# ownership. Elasticsearch licenses this file to you under
|
||||
# the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
[libdefaults]
|
||||
default_realm = ${REALM_NAME}
|
||||
dns_canonicalize_hostname = false
|
||||
dns_lookup_kdc = false
|
||||
dns_lookup_realm = false
|
||||
dns_uri_lookup = false
|
||||
forwardable = true
|
||||
ignore_acceptor_hostname = true
|
||||
rdns = false
|
||||
default_tgs_enctypes = rc4-hmac
|
||||
default_tkt_enctypes = rc4-hmac
|
||||
permitted_enctypes = rc4-hmac
|
||||
# udp_preference_limit = 1
|
||||
kdc_timeout = 3000
|
||||
|
||||
[realms]
|
||||
${REALM_NAME} = {
|
||||
kdc = ${KDC_NAME}:88
|
||||
kdc = ${KDC_NAME}:60088
|
||||
kdc = localhost:60088
|
||||
kdc = localhost:88
|
||||
kdc = 127.0.0.1:60088
|
||||
kdc = 127.0.0.1:88
|
||||
admin_server = ${KDC_NAME}:749
|
||||
default_domain = ${BUILD_ZONE}
|
||||
}
|
||||
|
||||
[domain_realm]
|
||||
${BUILD_ZONE} = ${REALM_NAME}
|
||||
.${BUILD_ZONE} = ${REALM_NAME}
|
||||
${ELASTIC_ZONE} = ${REALM_NAME}
|
||||
.${ELASTIC_ZONE} = ${REALM_NAME}
|
||||
|
|
@ -350,15 +350,11 @@ public abstract class AnalysisFactoryTestCase extends ESTestCase {
|
|||
case LOWERCASE:
|
||||
// This has been migrated but has to stick around until PreBuiltTokenizers is removed.
|
||||
continue;
|
||||
case SNOWBALL:
|
||||
case DUTCH_STEM:
|
||||
case FRENCH_STEM:
|
||||
case RUSSIAN_STEM:
|
||||
luceneFactoryClass = SnowballPorterFilterFactory.class;
|
||||
break;
|
||||
case STEMMER:
|
||||
luceneFactoryClass = PorterStemFilterFactory.class;
|
||||
break;
|
||||
case DELIMITED_PAYLOAD_FILTER:
|
||||
luceneFactoryClass = org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory.class;
|
||||
break;
|
||||
|
|
|
@ -172,8 +172,6 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
|
|||
// This needs to tie into the ESIntegTestCase#indexSettings() method
|
||||
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent())
|
||||
.put("node.name", "node_s_0")
|
||||
.put("script.inline", "true")
|
||||
.put("script.stored", "true")
|
||||
.put(ScriptService.SCRIPT_MAX_COMPILATIONS_PER_MINUTE.getKey(), 1000)
|
||||
.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
|
||||
|
|
|
@ -47,14 +47,14 @@ public class VersionUtilsTests extends ESTestCase {
|
|||
|
||||
// sub range
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0,
|
||||
Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertTrue(got.onOrAfter(Version.V_5_0_0));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
|
||||
// unbounded lower
|
||||
got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
got = VersionUtils.randomVersionBetween(random(), null, Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha1_UNRELEASED));
|
||||
assertTrue(got.onOrBefore(Version.V_6_0_0_alpha2_UNRELEASED));
|
||||
got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.allReleasedVersions().get(0));
|
||||
assertTrue(got.onOrAfter(VersionUtils.getFirstVersion()));
|
||||
assertTrue(got.onOrBefore(VersionUtils.allReleasedVersions().get(0)));
|
||||
|
@ -72,9 +72,9 @@ public class VersionUtilsTests extends ESTestCase {
|
|||
assertEquals(got, VersionUtils.getFirstVersion());
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.CURRENT, Version.CURRENT);
|
||||
assertEquals(got, Version.CURRENT);
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha1_UNRELEASED,
|
||||
Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
assertEquals(got, Version.V_6_0_0_alpha1_UNRELEASED);
|
||||
got = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_alpha2_UNRELEASED,
|
||||
Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
assertEquals(got, Version.V_6_0_0_alpha2_UNRELEASED);
|
||||
|
||||
// implicit range of one
|
||||
got = VersionUtils.randomVersionBetween(random(), null, VersionUtils.getFirstVersion());
|
||||
|
|
Loading…
Reference in New Issue