Remove 2.0 prerelease version constants (#22004)

* Remove 2.0 prerelease version constants

This is a start to addressing #21887. This removes:
* pre 2.0 snapshot format support
* automatic units addition to cluster settings
* bwc check for delete by query in pre 2.0 indexes
This commit is contained in:
Ryan Ernst 2016-12-08 21:48:35 -08:00 committed by GitHub
parent 7f79c99e9a
commit b1cef5fdf8
11 changed files with 27 additions and 385 deletions

View File

@ -35,12 +35,6 @@ public class Version {
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
*/
public static final int V_2_0_0_beta1_ID = 2000001;
public static final Version V_2_0_0_beta1 = new Version(V_2_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_beta2_ID = 2000002;
public static final Version V_2_0_0_beta2 = new Version(V_2_0_0_beta2_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_rc1_ID = 2000051;
public static final Version V_2_0_0_rc1 = new Version(V_2_0_0_rc1_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_0_ID = 2000099;
public static final Version V_2_0_0 = new Version(V_2_0_0_ID, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_1_ID = 2000199;
@ -190,12 +184,6 @@ public class Version {
return V_2_0_1;
case V_2_0_0_ID:
return V_2_0_0;
case V_2_0_0_rc1_ID:
return V_2_0_0_rc1;
case V_2_0_0_beta2_ID:
return V_2_0_0_beta2;
case V_2_0_0_beta1_ID:
return V_2_0_0_beta1;
default:
return new Version(id, org.apache.lucene.util.Version.LATEST);
}

View File

@ -753,80 +753,6 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
return new Builder(metaData);
}
/** All known byte-sized cluster settings. */
public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()));
/** All known time cluster settings. */
public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet(
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(),
DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(),
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(),
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(),
DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(),
ClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey()));
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */
public static MetaData addDefaultUnitsIfNeeded(Logger logger, MetaData metaData) {
Settings.Builder newPersistentSettings = null;
for(Map.Entry<String,String> ent : metaData.persistentSettings().getAsMap().entrySet()) {
String settingName = ent.getKey();
String settingValue = ent.getValue();
if (CLUSTER_BYTES_SIZE_SETTINGS.contains(settingName)) {
try {
Long.parseLong(settingValue);
} catch (NumberFormatException nfe) {
continue;
}
// It's a naked number that previously would be interpreted as default unit (bytes); now we add it:
logger.warn("byte-sized cluster setting [{}] with value [{}] is missing units; assuming default units (b) but in future versions this will be a hard error", settingName, settingValue);
if (newPersistentSettings == null) {
newPersistentSettings = Settings.builder();
newPersistentSettings.put(metaData.persistentSettings());
}
newPersistentSettings.put(settingName, settingValue + "b");
}
if (CLUSTER_TIME_SETTINGS.contains(settingName)) {
try {
Long.parseLong(settingValue);
} catch (NumberFormatException nfe) {
continue;
}
// It's a naked number that previously would be interpreted as default unit (ms); now we add it:
logger.warn("time cluster setting [{}] with value [{}] is missing units; assuming default units (ms) but in future versions this will be a hard error", settingName, settingValue);
if (newPersistentSettings == null) {
newPersistentSettings = Settings.builder();
newPersistentSettings.put(metaData.persistentSettings());
}
newPersistentSettings.put(settingName, settingValue + "ms");
}
}
if (newPersistentSettings != null) {
return new MetaData(
metaData.clusterUUID(),
metaData.version(),
metaData.transientSettings(),
newPersistentSettings.build(),
metaData.getIndices(),
metaData.getTemplates(),
metaData.getCustoms(),
metaData.getConcreteAllIndices(),
metaData.getConcreteAllOpenIndices(),
metaData.getConcreteAllClosedIndices(),
metaData.getAliasAndIndexLookup());
} else {
// No changes:
return metaData;
}
}
public static class Builder {
private String clusterUUID;

View File

@ -106,14 +106,7 @@ public class MetaStateService extends AbstractComponent {
* Loads the global state, *without* index state, see {@link #loadFullState()} for that.
*/
MetaData loadGlobalState() throws IOException {
MetaData globalState = MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths());
// ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing
// TODO: can we somehow only do this for pre-2.0 cluster state?
if (globalState != null) {
return MetaData.addDefaultUnitsIfNeeded(logger, globalState);
} else {
return null;
}
return MetaData.FORMAT.loadLatestState(logger, nodeEnv.nodeDataPaths());
}
/**

View File

@ -169,8 +169,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private static final int BUFFER_SIZE = 4096;
private static final String LEGACY_SNAPSHOT_PREFIX = "snapshot-";
private static final String SNAPSHOT_PREFIX = "snap-";
private static final String SNAPSHOT_CODEC = "snapshot";
@ -185,14 +183,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private static final String METADATA_NAME_FORMAT = "meta-%s.dat";
private static final String LEGACY_METADATA_NAME_FORMAT = "metadata-%s";
private static final String METADATA_CODEC = "metadata";
private static final String INDEX_METADATA_CODEC = "index-metadata";
protected static final String LEGACY_SNAPSHOT_NAME_FORMAT = LEGACY_SNAPSHOT_PREFIX + "%s";
protected static final String SNAPSHOT_NAME_FORMAT = SNAPSHOT_PREFIX + "%s.dat";
protected static final String SNAPSHOT_INDEX_PREFIX = "index-";
@ -213,24 +207,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
private ChecksumBlobStoreFormat<MetaData> globalMetaDataFormat;
private LegacyBlobStoreFormat<MetaData> globalMetaDataLegacyFormat;
private ChecksumBlobStoreFormat<IndexMetaData> indexMetaDataFormat;
private LegacyBlobStoreFormat<IndexMetaData> indexMetaDataLegacyFormat;
private ChecksumBlobStoreFormat<SnapshotInfo> snapshotFormat;
private LegacyBlobStoreFormat<SnapshotInfo> snapshotLegacyFormat;
private final boolean readOnly;
private final ParseFieldMatcher parseFieldMatcher;
private final ChecksumBlobStoreFormat<BlobStoreIndexShardSnapshot> indexShardSnapshotFormat;
private final LegacyBlobStoreFormat<BlobStoreIndexShardSnapshot> indexShardSnapshotLegacyFormat;
private final ChecksumBlobStoreFormat<BlobStoreIndexShardSnapshots> indexShardSnapshotsFormat;
/**
@ -247,7 +233,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
restoreRateLimiter = getRateLimiter(metadata.settings(), "max_restore_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB));
readOnly = metadata.settings().getAsBoolean("readonly", false);
indexShardSnapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher, isCompress());
indexShardSnapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, BlobStoreIndexShardSnapshot.PROTO, parseFieldMatcher);
indexShardSnapshotsFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_INDEX_CODEC, SNAPSHOT_INDEX_NAME_FORMAT, BlobStoreIndexShardSnapshots.PROTO, parseFieldMatcher, isCompress());
}
@ -255,16 +240,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
@Override
protected void doStart() {
this.snapshotsBlobContainer = blobStore().blobContainer(basePath());
ParseFieldMatcher parseFieldMatcher = new ParseFieldMatcher(settings);
globalMetaDataFormat = new ChecksumBlobStoreFormat<>(METADATA_CODEC, METADATA_NAME_FORMAT, MetaData.PROTO, parseFieldMatcher, isCompress());
globalMetaDataLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_METADATA_NAME_FORMAT, MetaData.PROTO, parseFieldMatcher);
indexMetaDataFormat = new ChecksumBlobStoreFormat<>(INDEX_METADATA_CODEC, METADATA_NAME_FORMAT, IndexMetaData.PROTO, parseFieldMatcher, isCompress());
indexMetaDataLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, IndexMetaData.PROTO, parseFieldMatcher);
snapshotFormat = new ChecksumBlobStoreFormat<>(SNAPSHOT_CODEC, SNAPSHOT_NAME_FORMAT, SnapshotInfo.PROTO, parseFieldMatcher, isCompress());
snapshotLegacyFormat = new LegacyBlobStoreFormat<>(LEGACY_SNAPSHOT_NAME_FORMAT, SnapshotInfo.PROTO, parseFieldMatcher);
}
@Override
@ -325,8 +304,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
if (getSnapshots().stream().anyMatch(s -> s.getName().equals(snapshotName))) {
throw new SnapshotCreationException(metadata.name(), snapshotId, "snapshot with the same name already exists");
}
if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getUUID()) ||
snapshotLegacyFormat.exists(snapshotsBlobContainer, snapshotName)) {
if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
throw new SnapshotCreationException(metadata.name(), snapshotId, "snapshot with such name already exists");
}
@ -345,42 +323,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
}
}
// Older repository index files (index-N) only contain snapshot info, not indices info,
// so if the repository data is of the older format, populate it with the indices entries
// so we know which indices of snapshots have blob ids in the older format.
private RepositoryData upgradeRepositoryData(final RepositoryData repositoryData) throws IOException {
final Map<IndexId, Set<SnapshotId>> indexToSnapshots = new HashMap<>();
for (final SnapshotId snapshotId : repositoryData.getSnapshotIds()) {
final SnapshotInfo snapshotInfo;
try {
snapshotInfo = getSnapshotInfo(snapshotId);
} catch (SnapshotException e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " +
"the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " +
"the repository but its data directories will remain.", getMetadata().name(), snapshotId, snapshotId.getUUID()), e);
continue;
}
for (final String indexName : snapshotInfo.indices()) {
final IndexId indexId = new IndexId(indexName, indexName);
if (indexToSnapshots.containsKey(indexId)) {
indexToSnapshots.get(indexId).add(snapshotId);
} else {
indexToSnapshots.put(indexId, Sets.newHashSet(snapshotId));
}
}
}
try {
final RepositoryData updatedRepoData = repositoryData.initIndices(indexToSnapshots);
if (isReadOnly() == false) {
// write the new index gen file with the indices included
writeIndexGen(updatedRepoData);
}
return updatedRepoData;
} catch (IOException e) {
throw new RepositoryException(metadata.name(), "failed to update the repository index blob with indices data on startup", e);
}
}
@Override
public void deleteSnapshot(SnapshotId snapshotId) {
if (isReadOnly()) {
@ -476,17 +418,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete snapshot file [{}]", snapshotInfo.snapshotId(), blobId), e);
}
} else {
// we don't know the version, first try the current format, then the legacy format
try {
snapshotFormat.delete(snapshotsBlobContainer, blobId);
} catch (IOException e) {
// now try legacy format
try {
snapshotLegacyFormat.delete(snapshotsBlobContainer, blobId);
} catch (IOException e2) {
// neither snapshot file could be deleted, log the error
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
}
// snapshot file could not be deleted, log the error
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete snapshot file [{}]", blobId), e);
}
}
}
@ -500,17 +436,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] Unable to delete global metadata file [{}]", snapshotInfo.snapshotId(), blobId), e);
}
} else {
// we don't know the version, first try the current format, then the legacy format
try {
globalMetaDataFormat.delete(snapshotsBlobContainer, blobId);
} catch (IOException e) {
// now try legacy format
try {
globalMetaDataLegacyFormat.delete(snapshotsBlobContainer, blobId);
} catch (IOException e2) {
// neither global metadata file could be deleted, log the error
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
}
// global metadata file could not be deleted, log the error
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Unable to delete global metadata file [{}]", blobId), e);
}
}
}
@ -559,14 +489,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try {
return snapshotFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
} catch (FileNotFoundException | NoSuchFileException ex) {
// File is missing - let's try legacy format instead
try {
return snapshotLegacyFormat.read(snapshotsBlobContainer, snapshotId.getName());
} catch (FileNotFoundException | NoSuchFileException ex1) {
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException | NotXContentException ex1) {
throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex1);
}
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
} catch (IOException | NotXContentException ex) {
throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex);
}
@ -580,8 +503,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
assert ignoreIndexErrors;
if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
snapshotVersion = Version.CURRENT;
} else if (globalMetaDataLegacyFormat.exists(snapshotsBlobContainer, snapshotId.getName())) {
throw new SnapshotException(metadata.name(), snapshotId, "snapshot is too old");
} else {
throw new SnapshotMissingException(metadata.name(), snapshotId);
}
@ -632,41 +553,21 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
* Returns appropriate global metadata format based on the provided version of the snapshot
*/
private BlobStoreFormat<MetaData> globalMetaDataFormat(Version version) {
if(legacyMetaData(version)) {
return globalMetaDataLegacyFormat;
} else {
return globalMetaDataFormat;
}
return globalMetaDataFormat;
}
/**
* Returns appropriate snapshot format based on the provided version of the snapshot
*/
private BlobStoreFormat<SnapshotInfo> snapshotFormat(Version version) {
if(legacyMetaData(version)) {
return snapshotLegacyFormat;
} else {
return snapshotFormat;
}
}
/**
* In v2.0.0 we changed the metadata file format
* @return true if legacy version should be used false otherwise
*/
public static boolean legacyMetaData(Version version) {
return version.before(Version.V_2_0_0_beta1);
return snapshotFormat;
}
/**
* Returns appropriate index metadata format based on the provided version of the snapshot
*/
private BlobStoreFormat<IndexMetaData> indexMetaDataFormat(Version version) {
if(legacyMetaData(version)) {
return indexMetaDataLegacyFormat;
} else {
return indexMetaDataFormat;
}
return indexMetaDataFormat;
}
@Override
@ -719,17 +620,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
public RepositoryData getRepositoryData() {
try {
final long indexGen = latestIndexBlobId();
final String snapshotsIndexBlobName;
final boolean legacyFormat;
if (indexGen == -1) {
// index-N file doesn't exist, either its a fresh repository, or its in the
// old format, so look for the older index file before returning an empty list
snapshotsIndexBlobName = SNAPSHOTS_FILE;
legacyFormat = true;
} else {
snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen);
legacyFormat = false;
}
final String snapshotsIndexBlobName = INDEX_FILE_PREFIX + Long.toString(indexGen);
RepositoryData repositoryData;
try (InputStream blob = snapshotsBlobContainer.readBlob(snapshotsIndexBlobName)) {
@ -739,10 +630,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
repositoryData = RepositoryData.fromXContent(parser);
}
}
if (legacyFormat) {
// pre 5.0 repository data needs to be updated to include the indices
repositoryData = upgradeRepositoryData(repositoryData);
}
return repositoryData;
} catch (NoSuchFileException nsfe) {
// repository doesn't have an index blob, its a new blank repo
@ -968,11 +855,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
}
BlobStoreFormat<BlobStoreIndexShardSnapshot> indexShardSnapshotFormat(Version version) {
if (BlobStoreRepository.legacyMetaData(version)) {
return indexShardSnapshotLegacyFormat;
} else {
return indexShardSnapshotFormat;
}
return indexShardSnapshotFormat;
}
/**
@ -1170,8 +1053,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
BlobStoreIndexShardSnapshot snapshot = null;
if (name.startsWith(SNAPSHOT_PREFIX)) {
snapshot = indexShardSnapshotFormat.readBlob(blobContainer, name);
} else if (name.startsWith(LEGACY_SNAPSHOT_PREFIX)) {
snapshot = indexShardSnapshotLegacyFormat.readBlob(blobContainer, name);
}
if (snapshot != null) {
snapshots.add(new SnapshotFiles(snapshot.snapshot(), snapshot.indexFiles()));

View File

@ -1,60 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.blobstore;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.xcontent.FromXContentBuilder;
import org.elasticsearch.common.xcontent.ToXContent;
import java.io.IOException;
import java.io.InputStream;
/**
* Snapshot metadata file format used before v2.0
*/
public class LegacyBlobStoreFormat<T extends ToXContent> extends BlobStoreFormat<T> {
/**
* @param blobNameFormat format of the blobname in {@link String#format} format
* @param reader the prototype object that can deserialize objects with type T
*/
public LegacyBlobStoreFormat(String blobNameFormat, FromXContentBuilder<T> reader, ParseFieldMatcher parseFieldMatcher) {
super(blobNameFormat, reader, parseFieldMatcher);
}
/**
* Reads and parses the blob with given name.
*
* If required the checksum of the blob will be verified.
*
* @param blobContainer blob container
* @param blobName blob name
* @return parsed blob object
*/
public T readBlob(BlobContainer blobContainer, String blobName) throws IOException {
try (InputStream inputStream = blobContainer.readBlob(blobName)) {
BytesStreamOutput out = new BytesStreamOutput();
Streams.copy(inputStream, out);
return read(out.bytes());
}
}
}

View File

@ -185,16 +185,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
List<String> filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
MetaData metaDataIn = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
final MetaData metaData;
if (snapshotInfo.version().before(Version.V_2_0_0_beta1)) {
// ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing in this snapshot:
metaData = MetaData.addDefaultUnitsIfNeeded(logger, metaDataIn);
} else {
// Units are already enforced:
metaData = metaDataIn;
}
MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
// Make sure that we can restore from this snapshot
validateSnapshotRestorable(request.repositoryName, snapshotInfo);

View File

@ -153,11 +153,12 @@ public class VersionTests extends ESTestCase {
}
public void testMinCompatVersion() {
assertThat(Version.V_2_0_0_beta1.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0_beta1));
assertThat(Version.V_2_1_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0));
assertThat(Version.V_2_2_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0));
assertThat(Version.V_2_3_0.minimumCompatibilityVersion(), equalTo(Version.V_2_0_0));
assertThat(Version.V_5_0_0_alpha1.minimumCompatibilityVersion(), equalTo(Version.V_5_0_0_alpha1));
Version prerelease = VersionUtils.getFirstVersion();
assertThat(prerelease.minimumCompatibilityVersion(), equalTo(prerelease));
Version major = Version.fromString("2.0.0");
assertThat(Version.fromString("2.0.0").minimumCompatibilityVersion(), equalTo(major));
assertThat(Version.fromString("2.2.0").minimumCompatibilityVersion(), equalTo(major));
assertThat(Version.fromString("2.3.0").minimumCompatibilityVersion(), equalTo(major));
// from 6.0 on we are supporting the latest minor of the previous major... this might fail once we add a new version ie. 5.x is
// released since we need to bump the supported minor in Version#minimumCompatibilityVersion()
Version lastVersion = VersionUtils.getPreviousVersion(Version.V_6_0_0_alpha1_UNRELEASED);
@ -169,9 +170,9 @@ public class VersionTests extends ESTestCase {
public void testToString() {
// with 2.0.beta we lowercase
assertEquals("2.0.0-beta1", Version.V_2_0_0_beta1.toString());
assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString());
assertEquals("5.0.0-alpha1", Version.V_5_0_0_alpha1.toString());
assertEquals("2.3.0", Version.V_2_3_0.toString());
assertEquals("2.3.0", Version.fromString("2.3.0").toString());
assertEquals("0.90.0.Beta1", Version.fromString("0.90.0.Beta1").toString());
assertEquals("1.0.0.Beta1", Version.fromString("1.0.0.Beta1").toString());
assertEquals("2.0.0-beta1", Version.fromString("2.0.0-beta1").toString());
@ -180,7 +181,7 @@ public class VersionTests extends ESTestCase {
}
public void testIsBeta() {
assertTrue(Version.V_2_0_0_beta1.isBeta());
assertTrue(Version.fromString("2.0.0-beta1").isBeta());
assertTrue(Version.fromString("1.0.0.Beta1").isBeta());
assertTrue(Version.fromString("0.90.0.Beta1").isBeta());
}

View File

@ -240,7 +240,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
assertRealtimeGetWorks(indexName);
assertNewReplicasWork(indexName);
assertUpgradeWorks(client(), indexName, version);
assertDeleteByQueryWorked(indexName, version);
assertPositionIncrementGapDefaults(indexName, version);
assertAliasWithBadName(indexName, version);
unloadIndex(indexName);
@ -409,17 +408,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase {
// TODO: do something with the replicas! query? index?
}
// #10067: create-bwc-index.py deleted any doc with long_sort:[10-20]
void assertDeleteByQueryWorked(String indexName, Version version) throws Exception {
if (version.onOrAfter(Version.V_2_0_0_beta1)) {
// TODO: remove this once #10262 is fixed
return;
}
// these documents are supposed to be deleted by a delete by query operation in the translog
SearchRequestBuilder searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.queryStringQuery("long_sort:[10 TO 20]"));
assertEquals(0, searchReq.get().getHits().getTotalHits());
}
void assertPositionIncrementGapDefaults(String indexName, Version version) throws Exception {
client().prepareIndex(indexName, "doc", "position_gap_test").setSource("string", Arrays.asList("one", "two three"))
.setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();

View File

@ -28,22 +28,17 @@ import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.FromXContentBuilder;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.translog.BufferedChecksumStreamOutput;
import org.elasticsearch.repositories.blobstore.ChecksumBlobStoreFormat;
import org.elasticsearch.repositories.blobstore.LegacyBlobStoreFormat;
import java.io.EOFException;
import java.io.IOException;
@ -116,67 +111,17 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
}
}
/**
* Extends legacy format with writing functionality. It's used to simulate legacy file formats in tests.
*/
private static final class LegacyEmulationBlobStoreFormat<T extends ToXContent> extends LegacyBlobStoreFormat<T> {
protected final XContentType xContentType;
protected final boolean compress;
public LegacyEmulationBlobStoreFormat(String blobNameFormat, FromXContentBuilder<T> reader, ParseFieldMatcher parseFieldMatcher, boolean compress, XContentType xContentType) {
super(blobNameFormat, reader, parseFieldMatcher);
this.xContentType = xContentType;
this.compress = compress;
}
public void write(T obj, BlobContainer blobContainer, String blobName) throws IOException {
BytesReference bytes = write(obj);
try (StreamInput stream = bytes.streamInput()) {
blobContainer.writeBlob(blobName, stream, bytes.length());
}
}
private BytesReference write(T obj) throws IOException {
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
if (compress) {
try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) {
write(obj, compressedStreamOutput);
}
} else {
write(obj, bytesStreamOutput);
}
return bytesStreamOutput.bytes();
}
}
private void write(T obj, StreamOutput streamOutput) throws IOException {
XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput);
builder.startObject();
obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS);
builder.endObject();
builder.close();
}
}
public void testBlobStoreOperations() throws IOException {
BlobStore blobStore = createTestBlobStore();
BlobContainer blobContainer = blobStore.blobContainer(BlobPath.cleanPath());
ChecksumBlobStoreFormat<BlobObj> checksumJSON = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.JSON);
ChecksumBlobStoreFormat<BlobObj> checksumSMILE = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.SMILE);
ChecksumBlobStoreFormat<BlobObj> checksumSMILECompressed = new ChecksumBlobStoreFormat<>(BLOB_CODEC, "%s", BlobObj.PROTO, parseFieldMatcher, true, XContentType.SMILE);
LegacyEmulationBlobStoreFormat<BlobObj> legacyJSON = new LegacyEmulationBlobStoreFormat<>("%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.JSON);
LegacyEmulationBlobStoreFormat<BlobObj> legacySMILE = new LegacyEmulationBlobStoreFormat<>("%s", BlobObj.PROTO, parseFieldMatcher, false, XContentType.SMILE);
LegacyEmulationBlobStoreFormat<BlobObj> legacySMILECompressed = new LegacyEmulationBlobStoreFormat<>("%s", BlobObj.PROTO, parseFieldMatcher, true, XContentType.SMILE);
// Write blobs in different formats
checksumJSON.write(new BlobObj("checksum json"), blobContainer, "check-json");
checksumSMILE.write(new BlobObj("checksum smile"), blobContainer, "check-smile");
checksumSMILECompressed.write(new BlobObj("checksum smile compressed"), blobContainer, "check-smile-comp");
legacyJSON.write(new BlobObj("legacy json"), blobContainer, "legacy-json");
legacySMILE.write(new BlobObj("legacy smile"), blobContainer, "legacy-smile");
legacySMILECompressed.write(new BlobObj("legacy smile compressed"), blobContainer, "legacy-smile-comp");
// Assert that all checksum blobs can be read by all formats
assertEquals(checksumJSON.read(blobContainer, "check-json").getText(), "checksum json");
@ -185,14 +130,6 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
assertEquals(checksumSMILE.read(blobContainer, "check-smile").getText(), "checksum smile");
assertEquals(checksumJSON.read(blobContainer, "check-smile-comp").getText(), "checksum smile compressed");
assertEquals(checksumSMILE.read(blobContainer, "check-smile-comp").getText(), "checksum smile compressed");
// Assert that all legacy blobs can be read be all formats
assertEquals(legacyJSON.read(blobContainer, "legacy-json").getText(), "legacy json");
assertEquals(legacySMILE.read(blobContainer, "legacy-json").getText(), "legacy json");
assertEquals(legacyJSON.read(blobContainer, "legacy-smile").getText(), "legacy smile");
assertEquals(legacySMILE.read(blobContainer, "legacy-smile").getText(), "legacy smile");
assertEquals(legacyJSON.read(blobContainer, "legacy-smile-comp").getText(), "legacy smile compressed");
assertEquals(legacySMILE.read(blobContainer, "legacy-smile-comp").getText(), "legacy smile compressed");
}
public void testCompressionIsApplied() throws IOException {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
import java.io.InputStreamReader;
@ -113,7 +114,7 @@ public class RemoteRequestBuildersTests extends ESTestCase {
SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
// Test request without any fields
Version remoteVersion = Version.fromId(between(Version.V_2_0_0_beta1_ID, Version.CURRENT.id));
Version remoteVersion = VersionUtils.randomVersion(random());
assertThat(initialSearchParams(searchRequest, remoteVersion),
not(either(hasKey("stored_fields")).or(hasKey("fields"))));
@ -121,16 +122,12 @@ public class RemoteRequestBuildersTests extends ESTestCase {
searchRequest.source().storedField("_source").storedField("_id");
// Test stored_fields for versions that support it
remoteVersion = Version.fromId(between(Version.V_5_0_0_alpha4_ID, Version.CURRENT.id));
remoteVersion = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0_alpha4, null);
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("stored_fields", "_source,_id"));
// Test fields for versions that support it
remoteVersion = Version.fromId(between(Version.V_2_0_0_beta1_ID, Version.V_5_0_0_alpha4_ID - 1));
remoteVersion = VersionUtils.randomVersionBetween(random(), null, Version.V_5_0_0_alpha3);
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("fields", "_source,_id"));
// Test extra fields for versions that need it
remoteVersion = Version.fromId(between(0, Version.V_2_0_0_beta1_ID - 1));
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("fields", "_source,_id,_parent,_routing,_ttl"));
}
public void testInitialSearchParamsMisc() {

View File

@ -184,7 +184,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
public static void beforeClass() {
// we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually.
Version indexVersionCreated = randomBoolean() ? Version.CURRENT
: VersionUtils.randomVersionBetween(random(), Version.V_2_0_0_beta1, Version.CURRENT);
: VersionUtils.randomVersionBetween(random(), null, Version.CURRENT);
nodeSettings = Settings.builder()
.put("node.name", AbstractQueryTestCase.class.toString())
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())