Add 7.1 version constant to 7.x branch (#38513)
This commit adds the 7.1 version constant to the 7.x branch. Co-authored-by: Andy Bristol <andy.bristol@elastic.co> Co-authored-by: Tim Brooks <tim@uncontended.net> Co-authored-by: Christoph Büscher <cbuescher@posteo.de> Co-authored-by: Luca Cavanna <javanna@users.noreply.github.com> Co-authored-by: markharwood <markharwood@gmail.com> Co-authored-by: Ioannis Kakavas <ioannis@elastic.co> Co-authored-by: Nhat Nguyen <nhat.nguyen@elastic.co> Co-authored-by: David Roberts <dave.roberts@elastic.co> Co-authored-by: Jason Tedor <jason@tedor.me> Co-authored-by: Alpar Torok <torokalpar@gmail.com> Co-authored-by: David Turner <david.turner@elastic.co> Co-authored-by: Martijn van Groningen <martijn.v.groningen@gmail.com> Co-authored-by: Tim Vernum <tim@adjective.org> Co-authored-by: Albert Zaharovits <albert.zaharovits@gmail.com>
This commit is contained in:
parent
70956f6f34
commit
fdf6b3f23f
|
@ -171,30 +171,38 @@ public class VersionCollection {
|
|||
}
|
||||
|
||||
public void forPreviousUnreleased(Consumer<UnreleasedVersionInfo> consumer) {
|
||||
getUnreleased().stream()
|
||||
List<UnreleasedVersionInfo> collect = getUnreleased().stream()
|
||||
.filter(version -> version.equals(currentVersion) == false)
|
||||
.forEach(version -> consumer.accept(
|
||||
new UnreleasedVersionInfo(
|
||||
.map(version -> new UnreleasedVersionInfo(
|
||||
version,
|
||||
getBranchFor(version),
|
||||
getGradleProjectNameFor(version)
|
||||
)
|
||||
));
|
||||
)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
collect.forEach(uvi -> consumer.accept(uvi));
|
||||
}
|
||||
|
||||
private String getGradleProjectNameFor(Version version) {
|
||||
if (version.equals(currentVersion)) {
|
||||
throw new IllegalArgumentException("The Gradle project to build " + version + " is the current build.");
|
||||
}
|
||||
|
||||
Map<Integer, List<Version>> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor();
|
||||
|
||||
if (version.getRevision() == 0) {
|
||||
if (releasedMajorGroupedByMinor
|
||||
.get(releasedMajorGroupedByMinor.keySet().stream().max(Integer::compareTo).orElse(0))
|
||||
.contains(version)) {
|
||||
return "minor";
|
||||
List<Version> unreleasedStagedOrMinor = getUnreleased().stream()
|
||||
.filter(v -> v.getRevision() == 0)
|
||||
.collect(Collectors.toList());
|
||||
if (unreleasedStagedOrMinor.size() > 2) {
|
||||
if (unreleasedStagedOrMinor.get(unreleasedStagedOrMinor.size() - 2).equals(version)) {
|
||||
return "minor";
|
||||
} else{
|
||||
return "staged";
|
||||
}
|
||||
} else {
|
||||
return "staged";
|
||||
return "minor";
|
||||
}
|
||||
} else {
|
||||
if (releasedMajorGroupedByMinor
|
||||
|
@ -239,8 +247,10 @@ public class VersionCollection {
|
|||
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 1));
|
||||
if (groupByMinor.getOrDefault(greatestMinor - 1, emptyList()).size() == 1) {
|
||||
// we found that the previous minor is staged but not yet released
|
||||
// in this case, the minor before that has a bugfix
|
||||
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2));
|
||||
// in this case, the minor before that has a bugfix, should there be such a minor
|
||||
if (greatestMinor >= 2) {
|
||||
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -81,6 +81,9 @@ public class VersionCollectionTests extends GradleUnitTestCase {
|
|||
"6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3",
|
||||
"6_2_4", "6_3_0", "6_3_1", "6_3_2", "6_4_0", "6_4_1", "6_4_2"
|
||||
));
|
||||
sampleVersions.put("7.1.0", asList(
|
||||
"7_1_0", "7_0_0", "6_7_0", "6_6_1", "6_6_0"
|
||||
));
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
|
@ -145,6 +148,11 @@ public class VersionCollectionTests extends GradleUnitTestCase {
|
|||
singletonList("7.3.0"),
|
||||
getVersionCollection("8.0.0").getWireCompatible()
|
||||
);
|
||||
assertVersionsEquals(
|
||||
asList("6.7.0", "7.0.0"),
|
||||
getVersionCollection("7.1.0").getWireCompatible()
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
public void testWireCompatibleUnreleased() {
|
||||
|
@ -171,6 +179,10 @@ public class VersionCollectionTests extends GradleUnitTestCase {
|
|||
singletonList("7.3.0"),
|
||||
getVersionCollection("8.0.0").getUnreleasedWireCompatible()
|
||||
);
|
||||
assertVersionsEquals(
|
||||
asList("6.7.0", "7.0.0"),
|
||||
getVersionCollection("7.1.0").getWireCompatible()
|
||||
);
|
||||
}
|
||||
|
||||
public void testIndexCompatible() {
|
||||
|
@ -286,7 +298,7 @@ public class VersionCollectionTests extends GradleUnitTestCase {
|
|||
getVersionCollection("6.4.2")
|
||||
);
|
||||
assertUnreleasedBranchNames(
|
||||
asList("5.6", "6.4", "6.5"),
|
||||
asList("5.6", "6.4", "6.x"),
|
||||
getVersionCollection("6.6.0")
|
||||
);
|
||||
assertUnreleasedBranchNames(
|
||||
|
@ -309,13 +321,17 @@ public class VersionCollectionTests extends GradleUnitTestCase {
|
|||
getVersionCollection("6.4.2")
|
||||
);
|
||||
assertUnreleasedGradleProjectNames(
|
||||
asList("maintenance", "bugfix", "staged"),
|
||||
asList("maintenance", "bugfix", "minor"),
|
||||
getVersionCollection("6.6.0")
|
||||
);
|
||||
assertUnreleasedGradleProjectNames(
|
||||
asList("bugfix", "staged", "minor"),
|
||||
getVersionCollection("8.0.0")
|
||||
);
|
||||
assertUnreleasedGradleProjectNames(
|
||||
asList("staged", "minor"),
|
||||
getVersionCollection("7.1.0")
|
||||
);
|
||||
}
|
||||
|
||||
public void testCompareToAuthoritative() {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 7.0.0
|
||||
elasticsearch = 7.1.0
|
||||
lucene = 8.0.0-snapshot-83f9835
|
||||
|
||||
# optional dependencies
|
||||
|
|
|
@ -141,6 +141,12 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre
|
|||
extension += '.gz'
|
||||
}
|
||||
}
|
||||
if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) {
|
||||
classifier = "-amd64"
|
||||
}
|
||||
if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) {
|
||||
classifier = "-x86_64"
|
||||
}
|
||||
if (bwcVersion.onOrAfter('6.3.0')) {
|
||||
baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages'
|
||||
// add oss variant first
|
||||
|
|
|
@ -31,12 +31,12 @@ RUN groupadd -g 1000 elasticsearch && \
|
|||
WORKDIR /usr/share/elasticsearch
|
||||
|
||||
COPY ${elasticsearch} /opt/
|
||||
|
||||
RUN tar zxf /opt/${elasticsearch} --strip-components=1
|
||||
RUN mkdir -p config data logs
|
||||
RUN chmod 0775 config data logs
|
||||
COPY config/elasticsearch.yml config/log4j2.properties config/
|
||||
|
||||
|
||||
################################################################################
|
||||
# Build stage 1 (the actual elasticsearch image):
|
||||
# Copy elasticsearch from stage 0
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
:version: 7.0.0-alpha2
|
||||
:version: 7.1.0
|
||||
:major-version: 7.x
|
||||
:lucene_version: 8.0.0
|
||||
:lucene_version_path: 8_0_0
|
||||
:branch: master
|
||||
:branch: 7.x
|
||||
:jdk: 1.8.0_131
|
||||
:jdk_major: 8
|
||||
:build_flavor: default
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
|
|||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.rest.action.document.RestBulkAction;
|
||||
import org.elasticsearch.rest.action.document.RestGetAction;
|
||||
import org.elasticsearch.rest.action.document.RestIndexAction;
|
||||
import org.elasticsearch.rest.action.document.RestUpdateAction;
|
||||
import org.elasticsearch.rest.action.search.RestExplainAction;
|
||||
import org.elasticsearch.test.NotEqualMessageBuilder;
|
||||
|
@ -80,15 +81,20 @@ import static org.hamcrest.Matchers.startsWith;
|
|||
*/
|
||||
public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
||||
private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1);
|
||||
private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0");
|
||||
|
||||
private String index;
|
||||
private String type;
|
||||
|
||||
@Before
|
||||
public void setIndex() throws IOException {
|
||||
index = getTestName().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
@Before
|
||||
public void setType() {
|
||||
type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc";
|
||||
}
|
||||
|
||||
public void testSearch() throws Exception {
|
||||
int count;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
|
@ -102,7 +108,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
{
|
||||
mappingsAndSettings.startObject("mappings");
|
||||
mappingsAndSettings.startObject("doc");
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.startObject(type);
|
||||
}
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("string");
|
||||
|
@ -121,7 +129,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
|
@ -135,17 +145,20 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
count = randomIntBetween(2000, 3000);
|
||||
byte[] randomByteArray = new byte[16];
|
||||
random().nextBytes(randomByteArray);
|
||||
indexRandomDocuments(count, true, true, i -> {
|
||||
return JsonXContent.contentBuilder().startObject()
|
||||
.field("string", randomAlphaOfLength(10))
|
||||
.field("int", randomInt(100))
|
||||
.field("float", randomFloat())
|
||||
// be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct
|
||||
.field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean())
|
||||
.field("field.with.dots", randomAlphaOfLength(10))
|
||||
.field("binary", Base64.getEncoder().encodeToString(randomByteArray))
|
||||
.endObject();
|
||||
});
|
||||
indexRandomDocuments(
|
||||
count,
|
||||
true,
|
||||
true,
|
||||
i -> JsonXContent.contentBuilder().startObject()
|
||||
.field("string", randomAlphaOfLength(10))
|
||||
.field("int", randomInt(100))
|
||||
.field("float", randomFloat())
|
||||
// be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct
|
||||
.field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean())
|
||||
.field("field.with.dots", randomAlphaOfLength(10))
|
||||
.field("binary", Base64.getEncoder().encodeToString(randomByteArray))
|
||||
.endObject()
|
||||
);
|
||||
refresh();
|
||||
} else {
|
||||
count = countOfIndexedRandomDocuments();
|
||||
|
@ -155,7 +168,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
assertBasicSearchWorks(count);
|
||||
assertAllSearchWorks(count);
|
||||
assertBasicAggregationWorks();
|
||||
assertRealtimeGetWorks();
|
||||
assertRealtimeGetWorks(type);
|
||||
assertStoredBinaryFields(count);
|
||||
}
|
||||
|
||||
|
@ -171,7 +184,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
{
|
||||
mappingsAndSettings.startObject("mappings");
|
||||
mappingsAndSettings.startObject("doc");
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.startObject(type);
|
||||
}
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("field");
|
||||
|
@ -179,7 +194,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
|
@ -191,11 +208,8 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
client().performRequest(createIndex);
|
||||
|
||||
int numDocs = randomIntBetween(2000, 3000);
|
||||
indexRandomDocuments(numDocs, true, false, i -> {
|
||||
return JsonXContent.contentBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject();
|
||||
});
|
||||
indexRandomDocuments(
|
||||
numDocs, true, false, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject());
|
||||
logger.info("Refreshing [{}]", index);
|
||||
client().performRequest(new Request("POST", "/" + index + "/_refresh"));
|
||||
} else {
|
||||
|
@ -225,76 +239,6 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Search on an alias that contains illegal characters that would prevent it from being created after 5.1.0. It should still be
|
||||
* search-able though.
|
||||
*/
|
||||
public void testAliasWithBadName() throws Exception {
|
||||
assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before",
|
||||
getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED));
|
||||
|
||||
int count;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
XContentBuilder mappingsAndSettings = jsonBuilder();
|
||||
mappingsAndSettings.startObject();
|
||||
{
|
||||
mappingsAndSettings.startObject("settings");
|
||||
mappingsAndSettings.field("number_of_shards", 1);
|
||||
mappingsAndSettings.field("number_of_replicas", 0);
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
{
|
||||
mappingsAndSettings.startObject("mappings");
|
||||
mappingsAndSettings.startObject("doc");
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("key");
|
||||
mappingsAndSettings.field("type", "keyword");
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
Request createIndex = new Request("PUT", "/" + index);
|
||||
createIndex.setJsonEntity(Strings.toString(mappingsAndSettings));
|
||||
client().performRequest(createIndex);
|
||||
|
||||
String aliasName = "%23" + index; // %23 == #
|
||||
client().performRequest(new Request("PUT", "/" + index + "/_alias/" + aliasName));
|
||||
Response response = client().performRequest(new Request("HEAD", "/" + index + "/_alias/" + aliasName));
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
|
||||
count = randomIntBetween(32, 128);
|
||||
indexRandomDocuments(count, true, true, i -> {
|
||||
return JsonXContent.contentBuilder().startObject()
|
||||
.field("key", "value")
|
||||
.endObject();
|
||||
});
|
||||
refresh();
|
||||
} else {
|
||||
count = countOfIndexedRandomDocuments();
|
||||
}
|
||||
|
||||
Request request = new Request("GET", "/_cluster/state");
|
||||
request.addParameter("metric", "metadata");
|
||||
logger.error("clusterState=" + entityAsMap(client().performRequest(request)));
|
||||
// We can read from the alias just like we can read from the index.
|
||||
String aliasName = "%23" + index; // %23 == #
|
||||
Map<String, Object> searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search")));
|
||||
int totalHits = extractTotalHits(searchRsp);
|
||||
assertEquals(count, totalHits);
|
||||
if (isRunningAgainstOldCluster() == false) {
|
||||
// We can remove the alias.
|
||||
Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName));
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
// and check that it is gone:
|
||||
response = client().performRequest(new Request("HEAD", "/" + index + "/_alias/" + aliasName));
|
||||
assertEquals(404, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterState() throws Exception {
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
XContentBuilder mappingsAndSettings = jsonBuilder();
|
||||
|
@ -348,31 +292,45 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
mappingsAndSettings.startObject();
|
||||
{
|
||||
mappingsAndSettings.startObject("mappings");
|
||||
mappingsAndSettings.startObject("doc");
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("field");
|
||||
mappingsAndSettings.field("type", "text");
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.startObject(type);
|
||||
}
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("field");
|
||||
{
|
||||
mappingsAndSettings.field("type", "text");
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster() == false) {
|
||||
// the default number of shards is now one so we have to set the number of shards to be more than one explicitly
|
||||
mappingsAndSettings.startObject("settings");
|
||||
{
|
||||
mappingsAndSettings.field("index.number_of_shards", 5);
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
Request createIndex = new Request("PUT", "/" + index);
|
||||
createIndex.setJsonEntity(Strings.toString(mappingsAndSettings));
|
||||
RequestOptions.Builder options = createIndex.getOptions().toBuilder();
|
||||
options.setWarningsHandler(WarningsHandler.PERMISSIVE);
|
||||
expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE);
|
||||
createIndex.setOptions(options);
|
||||
client().performRequest(createIndex);
|
||||
|
||||
numDocs = randomIntBetween(512, 1024);
|
||||
indexRandomDocuments(numDocs, true, true, i -> {
|
||||
return JsonXContent.contentBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject();
|
||||
});
|
||||
indexRandomDocuments(
|
||||
numDocs, true, true, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject());
|
||||
|
||||
ensureGreen(index); // wait for source index to be available on both nodes before starting shrink
|
||||
|
||||
|
@ -381,7 +339,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
client().performRequest(updateSettingsRequest);
|
||||
|
||||
Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex);
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_6_4_0)) {
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_6_4_0) && getOldClusterVersion().before(Version.V_7_0_0)) {
|
||||
shrinkIndexRequest.addParameter("copy_settings", "true");
|
||||
}
|
||||
shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}");
|
||||
|
@ -419,16 +377,30 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
mappingsAndSettings.startObject();
|
||||
{
|
||||
mappingsAndSettings.startObject("mappings");
|
||||
mappingsAndSettings.startObject("doc");
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("field");
|
||||
mappingsAndSettings.field("type", "text");
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.startObject(type);
|
||||
}
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("field");
|
||||
{
|
||||
mappingsAndSettings.field("type", "text");
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster() == false) {
|
||||
// the default number of shards is now one so we have to set the number of shards to be more than one explicitly
|
||||
mappingsAndSettings.startObject("settings");
|
||||
mappingsAndSettings.field("index.number_of_shards", 5);
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
Request createIndex = new Request("PUT", "/" + index);
|
||||
|
@ -439,11 +411,12 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
client().performRequest(createIndex);
|
||||
|
||||
numDocs = randomIntBetween(512, 1024);
|
||||
indexRandomDocuments(numDocs, true, true, i -> {
|
||||
return JsonXContent.contentBuilder().startObject()
|
||||
.field("field", "value")
|
||||
.endObject();
|
||||
});
|
||||
indexRandomDocuments(
|
||||
numDocs,
|
||||
true,
|
||||
true,
|
||||
i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()
|
||||
);
|
||||
} else {
|
||||
ensureGreen(index); // wait for source index to be available on both nodes before starting shrink
|
||||
|
||||
|
@ -510,7 +483,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
bulk.append("{\"index\":{}}\n");
|
||||
bulk.append("{\"test\":\"test\"}\n");
|
||||
}
|
||||
Request bulkRequest = new Request("POST", "/" + index + "_write/doc/_bulk");
|
||||
Request bulkRequest = new Request("POST", "/" + index + "_write/" + type + "/_bulk");
|
||||
bulkRequest.setJsonEntity(bulk.toString());
|
||||
bulkRequest.addParameter("refresh", "");
|
||||
bulkRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE));
|
||||
|
@ -633,7 +606,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
assertTotalHits(termsCount, boolTerms);
|
||||
}
|
||||
|
||||
void assertRealtimeGetWorks() throws IOException {
|
||||
void assertRealtimeGetWorks(final String typeName) throws IOException {
|
||||
Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings");
|
||||
disableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : -1 }}");
|
||||
client().performRequest(disableAutoRefresh);
|
||||
|
@ -644,13 +617,15 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
Map<?, ?> hit = (Map<?, ?>) ((List<?>)(XContentMapValues.extractValue("hits.hits", searchResponse))).get(0);
|
||||
String docId = (String) hit.get("_id");
|
||||
|
||||
Request updateRequest = new Request("POST", "/" + index + "/doc/" + docId + "/_update");
|
||||
Request updateRequest = new Request("POST", "/" + index + "/" + typeName + "/" + docId + "/_update");
|
||||
updateRequest.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE));
|
||||
updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}");
|
||||
client().performRequest(updateRequest);
|
||||
|
||||
Request getRequest = new Request("GET", "/" + index + "/doc/" + docId);
|
||||
getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
Request getRequest = new Request("GET", "/" + index + "/" + typeName + "/" + docId);
|
||||
if (getOldClusterVersion().before(Version.V_6_7_0)) {
|
||||
getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
Map<String, Object> getRsp = entityAsMap(client().performRequest(getRequest));
|
||||
Map<?, ?> source = (Map<?, ?>) getRsp.get("_source");
|
||||
assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo"));
|
||||
|
@ -689,7 +664,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
void assertTotalHits(int expectedTotalHits, Map<?, ?> response) {
|
||||
int actualTotalHits = extractTotalHits(response);
|
||||
assertEquals(expectedTotalHits, actualTotalHits);
|
||||
assertEquals(response.toString(), expectedTotalHits, actualTotalHits);
|
||||
}
|
||||
|
||||
int extractTotalHits(Map<?, ?> response) {
|
||||
|
@ -704,7 +679,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
* Tests that a single document survives. Super basic smoke test.
|
||||
*/
|
||||
public void testSingleDoc() throws IOException {
|
||||
String docLocation = "/" + index + "/doc/1";
|
||||
String docLocation = "/" + index + "/" + type + "/1";
|
||||
String doc = "{\"test\": \"test\"}";
|
||||
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
|
@ -715,7 +690,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
|
||||
Request request = new Request("GET", docLocation);
|
||||
request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
if (getOldClusterVersion().before(Version.V_6_7_0)) {
|
||||
request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
assertThat(toStr(client().performRequest(request)), containsString(doc));
|
||||
}
|
||||
|
||||
|
@ -779,8 +756,12 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
if (shouldHaveTranslog) {
|
||||
// Update a few documents so we are sure to have a translog
|
||||
indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false,
|
||||
i -> jsonBuilder().startObject().field("field", "value").endObject());
|
||||
indexRandomDocuments(
|
||||
count / 10,
|
||||
false, // flushing here would invalidate the whole thing
|
||||
false,
|
||||
i -> jsonBuilder().startObject().field("field", "value").endObject()
|
||||
);
|
||||
}
|
||||
saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog));
|
||||
} else {
|
||||
|
@ -791,6 +772,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
// Count the documents in the index to make sure we have as many as we put there
|
||||
Request countRequest = new Request("GET", "/" + index + "/_search");
|
||||
countRequest.addParameter("size", "0");
|
||||
refresh();
|
||||
Map<String, Object> countResponse = entityAsMap(client().performRequest(countRequest));
|
||||
assertTotalHits(count, countResponse);
|
||||
|
||||
|
@ -863,7 +845,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
*/
|
||||
public void testSnapshotRestore() throws IOException {
|
||||
int count;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
if (isRunningAgainstOldCluster() && getOldClusterVersion().major < 8) {
|
||||
// Create the index
|
||||
count = between(200, 300);
|
||||
indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject());
|
||||
|
@ -894,13 +876,19 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
templateBuilder.endObject();
|
||||
templateBuilder.startObject("mappings"); {
|
||||
templateBuilder.startObject("doc"); {
|
||||
templateBuilder.startObject("_source"); {
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
templateBuilder.startObject(type);
|
||||
}
|
||||
{
|
||||
templateBuilder.startObject("_source");
|
||||
{
|
||||
templateBuilder.field("enabled", true);
|
||||
}
|
||||
templateBuilder.endObject();
|
||||
}
|
||||
templateBuilder.endObject();
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
templateBuilder.endObject();
|
||||
}
|
||||
}
|
||||
templateBuilder.endObject();
|
||||
templateBuilder.startObject("aliases"); {
|
||||
|
@ -922,7 +910,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
// In 7.0, type names are no longer expected by default in put index template requests.
|
||||
// We therefore use the deprecated typed APIs when running against the current version.
|
||||
if (isRunningAgainstOldCluster() == false) {
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
createTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
}
|
||||
createTemplateRequest.setOptions(allowTypeRemovalWarnings());
|
||||
|
@ -1016,12 +1004,13 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
int numDocs = between(10, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject());
|
||||
Request request = new Request("POST", "/" + index + "/doc/" + i);
|
||||
Request request = new Request("POST", "/" + index + "/" + type + "/" + i);
|
||||
if (isRunningAgainstAncientCluster() == false) {
|
||||
request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
request.setJsonEntity(doc);
|
||||
client().performRequest(request);
|
||||
if (rarely()) {
|
||||
refresh();
|
||||
}
|
||||
refresh();
|
||||
}
|
||||
client().performRequest(new Request("POST", "/" + index + "/_flush"));
|
||||
int liveDocs = numDocs;
|
||||
|
@ -1029,11 +1018,11 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
for (int i = 0; i < numDocs; i++) {
|
||||
if (randomBoolean()) {
|
||||
String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject());
|
||||
Request request = new Request("POST", "/" + index + "/doc/" + i);
|
||||
Request request = new Request("POST", "/" + index + "/" + type + "/" + i);
|
||||
request.setJsonEntity(doc);
|
||||
client().performRequest(request);
|
||||
} else if (randomBoolean()) {
|
||||
client().performRequest(new Request("DELETE", "/" + index + "/doc/" + i));
|
||||
client().performRequest(new Request("DELETE", "/" + index + "/" + type + "/" + i));
|
||||
liveDocs--;
|
||||
}
|
||||
}
|
||||
|
@ -1046,7 +1035,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException {
|
||||
private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException {
|
||||
// Check the snapshot metadata, especially the version
|
||||
Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName);
|
||||
Map<String, Object> listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest));
|
||||
|
@ -1103,7 +1092,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n");
|
||||
bulk.append("{\"test\":\"test\"}\n");
|
||||
}
|
||||
Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/doc/_bulk");
|
||||
Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/" + type + "/_bulk");
|
||||
writeToRestoredRequest.addParameter("refresh", "true");
|
||||
writeToRestoredRequest.setJsonEntity(bulk.toString());
|
||||
writeToRestoredRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE));
|
||||
|
@ -1132,7 +1121,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
// In 7.0, type names are no longer returned by default in get index template requests.
|
||||
// We therefore use the deprecated typed APIs when running against the current version.
|
||||
if (isRunningAgainstOldCluster() == false) {
|
||||
if (isRunningAgainstAncientCluster() == false) {
|
||||
getTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true");
|
||||
}
|
||||
getTemplateRequest.setOptions(allowTypeRemovalWarnings());
|
||||
|
@ -1145,7 +1134,14 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
expectedTemplate.put("index_patterns", singletonList("evil_*"));
|
||||
}
|
||||
expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1")));
|
||||
expectedTemplate.put("mappings", singletonMap("doc", singletonMap("_source", singletonMap("enabled", true))));
|
||||
// We don't have the type in the response starting with 7.0, but we won't have it on old cluster after upgrade
|
||||
// either so look at the response to figure out the correct assertions
|
||||
if (isTypeInTemplateResponse(getTemplateResponse)) {
|
||||
expectedTemplate.put("mappings", singletonMap(type, singletonMap("_source", singletonMap("enabled", true))));
|
||||
} else {
|
||||
expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true)));
|
||||
}
|
||||
|
||||
expectedTemplate.put("order", 0);
|
||||
Map<String, Object> aliases = new HashMap<>();
|
||||
aliases.put("alias1", emptyMap());
|
||||
|
@ -1155,18 +1151,33 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
if (false == expectedTemplate.equals(getTemplateResponse)) {
|
||||
NotEqualMessageBuilder builder = new NotEqualMessageBuilder();
|
||||
builder.compareMaps(getTemplateResponse, expectedTemplate);
|
||||
logger.info("expected: {}\nactual:{}", expectedTemplate, getTemplateResponse);
|
||||
fail("template doesn't match:\n" + builder.toString());
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private boolean isTypeInTemplateResponse(Map<String, Object> getTemplateResponse) {
|
||||
return ( (Map<String, Object>) (
|
||||
(Map<String, Object>) getTemplateResponse.getOrDefault("test_template", emptyMap())
|
||||
).get("mappings")).get("_source") == null;
|
||||
}
|
||||
|
||||
// TODO tests for upgrades after shrink. We've had trouble with shrink in the past.
|
||||
|
||||
private void indexRandomDocuments(int count, boolean flushAllowed, boolean saveInfo,
|
||||
CheckedFunction<Integer, XContentBuilder, IOException> docSupplier) throws IOException {
|
||||
private void indexRandomDocuments(
|
||||
final int count,
|
||||
final boolean flushAllowed,
|
||||
final boolean saveInfo,
|
||||
final CheckedFunction<Integer, XContentBuilder, IOException> docSupplier)
|
||||
throws IOException {
|
||||
logger.info("Indexing {} random documents", count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
logger.debug("Indexing document [{}]", i);
|
||||
Request createDocument = new Request("POST", "/" + index + "/doc/" + i);
|
||||
Request createDocument = new Request("POST", "/" + index + "/" + type + "/" + i);
|
||||
if (isRunningAgainstAncientCluster() == false) {
|
||||
createDocument.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i)));
|
||||
client().performRequest(createDocument);
|
||||
if (rarely()) {
|
||||
|
@ -1191,16 +1202,21 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
infoDoc.field("value", value);
|
||||
infoDoc.endObject();
|
||||
// Only create the first version so we know how many documents are created when the index is first created
|
||||
Request request = new Request("PUT", "/info/doc/" + index + "_" + type);
|
||||
Request request = new Request("PUT", "/info/" + this.type + "/" + index + "_" + type);
|
||||
request.addParameter("op_type", "create");
|
||||
request.setJsonEntity(Strings.toString(infoDoc));
|
||||
if (isRunningAgainstAncientCluster() == false) {
|
||||
request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
client().performRequest(request);
|
||||
}
|
||||
|
||||
private String loadInfoDocument(String type) throws IOException {
|
||||
Request request = new Request("GET", "/info/doc/" + index + "_" + type);
|
||||
Request request = new Request("GET", "/info/" + this.type + "/" + index + "_" + type);
|
||||
request.addParameter("filter_path", "_source");
|
||||
request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
String doc = toStr(client().performRequest(request));
|
||||
Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc);
|
||||
assertTrue(doc, m.find());
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.RequestOptions;
|
||||
import org.elasticsearch.client.Response;
|
||||
|
@ -145,6 +146,7 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
|
||||
public void testQueryBuilderBWC() throws Exception {
|
||||
final String type = getOldClusterVersion().before(Version.V_7_0_0) ? "doc" : "_doc";
|
||||
String index = "queries";
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
XContentBuilder mappingsAndSettings = jsonBuilder();
|
||||
|
@ -157,7 +159,9 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
{
|
||||
mappingsAndSettings.startObject("mappings");
|
||||
mappingsAndSettings.startObject("doc");
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.startObject(type);
|
||||
}
|
||||
mappingsAndSettings.startObject("properties");
|
||||
{
|
||||
mappingsAndSettings.startObject("query");
|
||||
|
@ -176,7 +180,9 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
mappingsAndSettings.endObject();
|
||||
if (isRunningAgainstAncientCluster()) {
|
||||
mappingsAndSettings.endObject();
|
||||
}
|
||||
}
|
||||
mappingsAndSettings.endObject();
|
||||
Request request = new Request("PUT", "/" + index);
|
||||
|
@ -188,7 +194,7 @@ public class QueryBuilderBWCIT extends AbstractFullClusterRestartTestCase {
|
|||
assertEquals(200, rsp.getStatusLine().getStatusCode());
|
||||
|
||||
for (int i = 0; i < CANDIDATES.size(); i++) {
|
||||
request = new Request("PUT", "/" + index + "/doc/" + Integer.toString(i));
|
||||
request = new Request("PUT", "/" + index + "/" + type + "/" + Integer.toString(i));
|
||||
request.setJsonEntity((String) CANDIDATES.get(i)[0]);
|
||||
rsp = client().performRequest(request);
|
||||
assertEquals(201, rsp.getStatusLine().getStatusCode());
|
||||
|
|
|
@ -300,7 +300,11 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
|||
if (randomBoolean()) {
|
||||
indexDocs(index, i, 1); // update
|
||||
} else if (randomBoolean()) {
|
||||
client().performRequest(new Request("DELETE", index + "/test/" + i));
|
||||
if (getNodeId(v -> v.onOrAfter(Version.V_7_0_0)) == null) {
|
||||
client().performRequest(new Request("DELETE", index + "/test/" + i));
|
||||
} else {
|
||||
client().performRequest(new Request("DELETE", index + "/_doc/" + i));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,8 +67,3 @@
|
|||
- match: { hits.total: 1 }
|
||||
- match: { hits.hits.0._id: q3 }
|
||||
|
||||
---
|
||||
"Index with _all is available":
|
||||
- do:
|
||||
indices.get:
|
||||
index: all-index
|
||||
|
|
|
@ -200,19 +200,3 @@
|
|||
wait_for_completion: true
|
||||
task_id: $task
|
||||
|
||||
---
|
||||
"Create an index with _all explicitly disabled":
|
||||
- skip:
|
||||
features: warnings
|
||||
- do:
|
||||
warnings:
|
||||
- "[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement, you can use [copy_to] on mapping fields to create your own catch all field."
|
||||
indices.create:
|
||||
index: all-index
|
||||
body:
|
||||
mappings:
|
||||
_all:
|
||||
enabled: false
|
||||
properties:
|
||||
field:
|
||||
type: text
|
||||
|
|
|
@ -125,17 +125,4 @@
|
|||
task_id: $task_id
|
||||
- match: { task.headers.X-Opaque-Id: "Reindexing Again" }
|
||||
|
||||
---
|
||||
"Index with _all is available":
|
||||
- do:
|
||||
indices.get:
|
||||
index: all-index
|
||||
|
||||
- do:
|
||||
indices.get_mapping:
|
||||
include_type_name: false
|
||||
index: all-index
|
||||
|
||||
- is_true: all-index.mappings._all
|
||||
- match: { all-index.mappings._all.enabled: false}
|
||||
|
||||
|
|
|
@ -124,7 +124,9 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0);
|
||||
public static final int V_7_0_0_ID = 7000099;
|
||||
public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||
public static final Version CURRENT = V_7_0_0;
|
||||
public static final int V_7_1_0_ID = 7010099;
|
||||
public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0);
|
||||
public static final Version CURRENT = V_7_1_0;
|
||||
|
||||
|
||||
static {
|
||||
|
@ -138,6 +140,8 @@ public class Version implements Comparable<Version>, ToXContentFragment {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_7_1_0_ID:
|
||||
return V_7_1_0;
|
||||
case V_7_0_0_ID:
|
||||
return V_7_0_0;
|
||||
case V_6_7_0_ID:
|
||||
|
|
|
@ -367,10 +367,9 @@ public class VersionTests extends ESTestCase {
|
|||
() -> new IllegalStateException("expected previous minor version for [" + currentOrNextMajorVersion + "]"));
|
||||
final Version previousMinorVersion = VersionUtils.getPreviousMinorVersion();
|
||||
|
||||
assert previousMinorVersion.major == currentOrNextMajorVersion.major
|
||||
|| previousMinorVersion.major == lastMinorFromPreviousMajor.major;
|
||||
boolean isCompatible = previousMinorVersion.major == currentOrNextMajorVersion.major
|
||||
|| previousMinorVersion.minor == lastMinorFromPreviousMajor.minor;
|
||||
boolean isCompatible =
|
||||
previousMinorVersion.major == currentOrNextMajorVersion.major
|
||||
|| previousMinorVersion.minor == lastMinorFromPreviousMajor.minor;
|
||||
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
|
|
|
@ -3637,6 +3637,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
}
|
||||
|
||||
@TestLogging("org.elasticsearch.snapshots:TRACE")
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38489")
|
||||
public void testAbortedSnapshotDuringInitDoesNotStart() throws Exception {
|
||||
final Client client = client();
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ public class VersionUtils {
|
|||
// this breaks b/c 5.x is still in version list but master doesn't care about it!
|
||||
//assert majorVersions.size() == 2;
|
||||
// TODO: remove oldVersions, we should only ever have 2 majors in Version
|
||||
List<Version> oldVersions = majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList());
|
||||
List<List<Version>> oldVersions = splitByMinor(majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList()));
|
||||
List<List<Version>> previousMajor = splitByMinor(majorVersions.get((int)current.major - 1));
|
||||
List<List<Version>> currentMajor = splitByMinor(majorVersions.get((int)current.major));
|
||||
|
||||
|
@ -78,12 +78,21 @@ public class VersionUtils {
|
|||
moveLastToUnreleased(stableVersions, unreleasedVersions);
|
||||
}
|
||||
// remove the next bugfix
|
||||
moveLastToUnreleased(stableVersions, unreleasedVersions);
|
||||
if (stableVersions.isEmpty() == false) {
|
||||
moveLastToUnreleased(stableVersions, unreleasedVersions);
|
||||
}
|
||||
}
|
||||
|
||||
List<Version> releasedVersions = Stream.concat(oldVersions.stream(),
|
||||
Stream.concat(previousMajor.stream(), currentMajor.stream()).flatMap(List::stream))
|
||||
.collect(Collectors.toList());
|
||||
// If none of the previous major was released, then the last minor and bugfix of the old version was not released either.
|
||||
if (previousMajor.isEmpty()) {
|
||||
assert currentMajor.isEmpty() : currentMajor;
|
||||
// minor of the old version is being staged
|
||||
moveLastToUnreleased(oldVersions, unreleasedVersions);
|
||||
// bugix of the old version is also being staged
|
||||
moveLastToUnreleased(oldVersions, unreleasedVersions);
|
||||
}
|
||||
List<Version> releasedVersions = Stream.of(oldVersions, previousMajor, currentMajor)
|
||||
.flatMap(List::stream).flatMap(List::stream).collect(Collectors.toList());
|
||||
Collections.sort(unreleasedVersions); // we add unreleased out of order, so need to sort here
|
||||
return new Tuple<>(Collections.unmodifiableList(releasedVersions), Collections.unmodifiableList(unreleasedVersions));
|
||||
}
|
||||
|
|
|
@ -63,6 +63,10 @@ public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase
|
|||
|
||||
private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version"));
|
||||
|
||||
public final boolean isRunningAgainstAncientCluster() {
|
||||
return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_7_0_0);
|
||||
}
|
||||
|
||||
public final Version getOldClusterVersion() {
|
||||
return oldClusterVersion;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.ml.job.config.RuleCondition;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.time.Clock;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.EnumSet;
|
||||
|
@ -28,7 +29,7 @@ import static org.hamcrest.Matchers.containsString;
|
|||
public class ScheduledEventTests extends AbstractSerializingTestCase<ScheduledEvent> {
|
||||
|
||||
public static ScheduledEvent createScheduledEvent(String calendarId) {
|
||||
ZonedDateTime start = Clock.systemUTC().instant().atZone(ZoneOffset.UTC);
|
||||
ZonedDateTime start = nowWithMillisResolution();
|
||||
return new ScheduledEvent(randomAlphaOfLength(10), start, start.plusSeconds(randomIntBetween(1, 10000)),
|
||||
calendarId, null);
|
||||
}
|
||||
|
@ -119,4 +120,8 @@ public class ScheduledEventTests extends AbstractSerializingTestCase<ScheduledEv
|
|||
ScheduledEvent.LENIENT_PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
||||
private static ZonedDateTime nowWithMillisResolution() {
|
||||
return Instant.ofEpochMilli(Clock.systemUTC().millis()).atZone(ZoneOffset.UTC);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.xpack.watcher.watch.WatchStoreUtils;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.time.Clock;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.util.ArrayList;
|
||||
|
@ -102,7 +101,7 @@ final class WatcherIndexingListener implements IndexingOperationListener, Cluste
|
|||
@Override
|
||||
public Engine.Index preIndex(ShardId shardId, Engine.Index operation) {
|
||||
if (isWatchDocument(shardId.getIndexName(), operation.type())) {
|
||||
ZonedDateTime now = Instant.ofEpochMilli(clock.millis()).atZone(ZoneOffset.UTC);
|
||||
ZonedDateTime now = clock.instant().atZone(ZoneOffset.UTC);
|
||||
try {
|
||||
Watch watch = parser.parseWithSecrets(operation.id(), true, operation.source(), now, XContentType.JSON,
|
||||
operation.getIfSeqNo(), operation.getIfPrimaryTerm());
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.mockito.ArgumentCaptor;
|
|||
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.ZonedDateTime;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
@ -275,6 +276,7 @@ public class IndexActionTests extends ESTestCase {
|
|||
fieldName + "] or [ctx.payload._doc." + fieldName + "]"));
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38581")
|
||||
public void testIndexActionExecuteSingleDoc() throws Exception {
|
||||
boolean customId = randomBoolean();
|
||||
boolean docIdAsParam = customId && randomBoolean();
|
||||
|
@ -324,8 +326,9 @@ public class IndexActionTests extends ESTestCase {
|
|||
assertThat(indexRequest.getRefreshPolicy(), is(expectedRefreshPolicy));
|
||||
|
||||
if (timestampField != null) {
|
||||
final DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME;
|
||||
assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(2)));
|
||||
assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, executionTime.toString()));
|
||||
assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, formatter.format(executionTime)));
|
||||
} else {
|
||||
assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(1)));
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateReque
|
|||
import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule;
|
||||
import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger;
|
||||
import org.hamcrest.Matcher;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
@ -60,6 +61,13 @@ import static org.hamcrest.Matchers.startsWith;
|
|||
|
||||
public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
||||
|
||||
private String type;
|
||||
|
||||
@Before
|
||||
public void setType() {
|
||||
type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings restClientSettings() {
|
||||
String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8));
|
||||
|
@ -76,7 +84,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
* Tests that a single document survives. Super basic smoke test.
|
||||
*/
|
||||
public void testSingleDoc() throws IOException {
|
||||
String docLocation = "/testsingledoc/doc/1";
|
||||
String docLocation = "/testsingledoc/" + type + "/1";
|
||||
String doc = "{\"test\": \"test\"}";
|
||||
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
|
@ -87,7 +95,9 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
}
|
||||
|
||||
Request getRequest = new Request("GET", docLocation);
|
||||
getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
if (getOldClusterVersion().before(Version.V_6_7_0)) {
|
||||
getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
assertThat(toStr(client().performRequest(getRequest)), containsString(doc));
|
||||
}
|
||||
|
||||
|
@ -148,17 +158,21 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
public void testWatcher() throws Exception {
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
logger.info("Adding a watch on old cluster {}", getOldClusterVersion());
|
||||
Request createBwcWatch = new Request("PUT", "/_xpack/watcher/watch/bwc_watch");
|
||||
Request createBwcWatch = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_watch");
|
||||
Request createBwcThrottlePeriod = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_throttle_period");
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) {
|
||||
createBwcWatch.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
createBwcThrottlePeriod.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
createBwcWatch.setJsonEntity(loadWatch("simple-watch.json"));
|
||||
client().performRequest(createBwcWatch);
|
||||
|
||||
logger.info("Adding a watch with \"fun\" throttle periods on old cluster");
|
||||
Request createBwcThrottlePeriod = new Request("PUT", "_xpack/watcher/watch/bwc_throttle_period");
|
||||
createBwcThrottlePeriod.setJsonEntity(loadWatch("throttle-period-watch.json"));
|
||||
client().performRequest(createBwcThrottlePeriod);
|
||||
|
||||
logger.info("Adding a watch with \"fun\" read timeout on old cluster");
|
||||
Request createFunnyTimeout = new Request("PUT", "_xpack/watcher/watch/bwc_funny_timeout");
|
||||
Request createFunnyTimeout = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_funny_timeout");
|
||||
createFunnyTimeout.setJsonEntity(loadWatch("funny-timeout-watch.json"));
|
||||
client().performRequest(createFunnyTimeout);
|
||||
|
||||
|
@ -246,7 +260,11 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
// index documents for the rollup job
|
||||
final StringBuilder bulk = new StringBuilder();
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n");
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) {
|
||||
bulk.append("{\"index\":{\"_index\":\"rollup-docs\"}}\n");
|
||||
} else {
|
||||
bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n");
|
||||
}
|
||||
String date = String.format(Locale.ROOT, "%04d-01-01T00:%02d:00Z", year, i);
|
||||
bulk.append("{\"timestamp\":\"").append(date).append("\",\"value\":").append(i).append("}\n");
|
||||
}
|
||||
|
@ -257,7 +275,8 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
client().performRequest(bulkRequest);
|
||||
|
||||
// create the rollup job
|
||||
final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-job-test");
|
||||
final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test");
|
||||
|
||||
createRollupJobRequest.setJsonEntity("{"
|
||||
+ "\"index_pattern\":\"rollup-*\","
|
||||
+ "\"rollup_index\":\"results-rollup\","
|
||||
|
@ -278,7 +297,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE));
|
||||
|
||||
// start the rollup job
|
||||
final Request startRollupJobRequest = new Request("POST", "/_xpack/rollup/job/rollup-job-test/_start");
|
||||
final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-job-test/_start");
|
||||
Map<String, Object> startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest));
|
||||
assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE));
|
||||
|
||||
|
@ -304,12 +323,12 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0));
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
|
||||
final Request indexRequest = new Request("POST", "/id-test-rollup/doc/1");
|
||||
final Request indexRequest = new Request("POST", "/id-test-rollup" + type + "/1");
|
||||
indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}");
|
||||
client().performRequest(indexRequest);
|
||||
|
||||
// create the rollup job
|
||||
final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-id-test");
|
||||
final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-id-test");
|
||||
createRollupJobRequest.setJsonEntity("{"
|
||||
+ "\"index_pattern\":\"id-test-rollup\","
|
||||
+ "\"rollup_index\":\"id-test-results-rollup\","
|
||||
|
@ -337,7 +356,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE));
|
||||
|
||||
// start the rollup job
|
||||
final Request startRollupJobRequest = new Request("POST", "/_xpack/rollup/job/rollup-id-test/_start");
|
||||
final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-id-test/_start");
|
||||
Map<String, Object> startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest));
|
||||
assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE));
|
||||
|
||||
|
@ -365,9 +384,11 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
} else {
|
||||
|
||||
final Request indexRequest = new Request("POST", "/id-test-rollup/doc/2");
|
||||
final Request indexRequest = new Request("POST", "/id-test-rollup/" + type + "/2");
|
||||
indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}");
|
||||
indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
if (getOldClusterVersion().before(Version.V_6_7_0)) {
|
||||
indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
client().performRequest(indexRequest);
|
||||
|
||||
assertRollUpJob("rollup-id-test");
|
||||
|
@ -431,12 +452,8 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
client().performRequest(doc2);
|
||||
return;
|
||||
}
|
||||
final Request sqlRequest;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
sqlRequest = new Request("POST", "/_xpack/sql");
|
||||
} else {
|
||||
sqlRequest = new Request("POST", "/_sql");
|
||||
}
|
||||
final Request sqlRequest = new Request("POST", getSQLEndpoint());
|
||||
|
||||
sqlRequest.setJsonEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}");
|
||||
ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(sqlRequest));
|
||||
assertEquals(400, e.getResponse().getStatusLine().getStatusCode());
|
||||
|
@ -458,8 +475,21 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
private void assertWatchIndexContentsWork() throws Exception {
|
||||
// Fetch a basic watch
|
||||
Request getRequest = new Request("GET", "_watcher/watch/bwc_watch");
|
||||
getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE,
|
||||
WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE));
|
||||
if (getOldClusterVersion().before(Version.V_7_0_0)) {
|
||||
getRequest.setOptions(
|
||||
expectWarnings(
|
||||
IndexAction.TYPES_DEPRECATION_MESSAGE,
|
||||
WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE
|
||||
)
|
||||
);
|
||||
} else {
|
||||
getRequest.setOptions(
|
||||
expectWarnings(
|
||||
IndexAction.TYPES_DEPRECATION_MESSAGE
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
Map<String, Object> bwcWatch = entityAsMap(client().performRequest(getRequest));
|
||||
|
||||
logger.error("-----> {}", bwcWatch);
|
||||
|
@ -475,8 +505,20 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
// Fetch a watch with "fun" throttle periods
|
||||
getRequest = new Request("GET", "_watcher/watch/bwc_throttle_period");
|
||||
getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE,
|
||||
WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE));
|
||||
if (getOldClusterVersion().before(Version.V_7_0_0)) {
|
||||
getRequest.setOptions(
|
||||
expectWarnings(
|
||||
IndexAction.TYPES_DEPRECATION_MESSAGE,
|
||||
WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE
|
||||
)
|
||||
);
|
||||
} else {
|
||||
getRequest.setOptions(
|
||||
expectWarnings(
|
||||
IndexAction.TYPES_DEPRECATION_MESSAGE
|
||||
)
|
||||
);
|
||||
}
|
||||
bwcWatch = entityAsMap(client().performRequest(getRequest));
|
||||
assertThat(bwcWatch.get("found"), equalTo(true));
|
||||
source = (Map<String, Object>) bwcWatch.get("watch");
|
||||
|
@ -560,7 +602,13 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
try {
|
||||
Map<String, Object> response = entityAsMap(client().performRequest(request));
|
||||
Map<String, Object> hits = (Map<String, Object>) response.get("hits");
|
||||
int total = (int) hits.get("total");
|
||||
logger.info("Hits are: {}", hits);
|
||||
int total;
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) {
|
||||
total = (int) ((Map<String, Object>) hits.get("total")).get("value");
|
||||
} else {
|
||||
total = (int) hits.get("total");
|
||||
}
|
||||
assertThat(total, greaterThanOrEqualTo(expectedHits));
|
||||
} catch (IOException ioe) {
|
||||
if (ioe instanceof ResponseException) {
|
||||
|
@ -580,12 +628,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
private void createUser(final boolean oldCluster) throws Exception {
|
||||
final String id = oldCluster ? "preupgrade_user" : "postupgrade_user";
|
||||
Request request;
|
||||
if (oldCluster) {
|
||||
request = new Request("PUT", "/_xpack/security/user/" + id);
|
||||
} else {
|
||||
request = new Request("PUT", "/_security/user/" + id);
|
||||
}
|
||||
Request request = new Request("PUT", getSecurityEndpoint() + "/user/" + id);
|
||||
request.setJsonEntity(
|
||||
"{\n" +
|
||||
" \"password\" : \"j@rV1s\",\n" +
|
||||
|
@ -599,12 +642,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
private void createRole(final boolean oldCluster) throws Exception {
|
||||
final String id = oldCluster ? "preupgrade_role" : "postupgrade_role";
|
||||
Request request;
|
||||
if (oldCluster) {
|
||||
request = new Request("PUT", "/_xpack/security/role/" + id);
|
||||
} else {
|
||||
request = new Request("PUT", "/_security/role/" + id);
|
||||
}
|
||||
Request request = new Request("PUT", getSecurityEndpoint() + "/role/" + id);
|
||||
request.setJsonEntity(
|
||||
"{\n" +
|
||||
" \"run_as\": [ \"abc\" ],\n" +
|
||||
|
@ -625,20 +663,59 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
private void assertUserInfo(final boolean oldCluster) throws Exception {
|
||||
final String user = oldCluster ? "preupgrade_user" : "postupgrade_user";
|
||||
Map<String, Object> response = oldCluster ?
|
||||
entityAsMap(client().performRequest(new Request("GET", "/_xpack/security/user/" + user))) :
|
||||
entityAsMap(client().performRequest(new Request("GET", "/_security/user/" + user)));
|
||||
Request request = new Request("GET", getSecurityEndpoint() + "/user/" + user);;
|
||||
Map<String, Object> response = entityAsMap(client().performRequest(request));
|
||||
@SuppressWarnings("unchecked") Map<String, Object> userInfo = (Map<String, Object>) response.get(user);
|
||||
assertEquals(user + "@example.com", userInfo.get("email"));
|
||||
assertNotNull(userInfo.get("full_name"));
|
||||
assertNotNull(userInfo.get("roles"));
|
||||
}
|
||||
|
||||
private String getSecurityEndpoint() {
|
||||
String securityEndpoint;
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) {
|
||||
securityEndpoint = "/_security";
|
||||
} else {
|
||||
securityEndpoint = "/_xpack/security";
|
||||
}
|
||||
return securityEndpoint;
|
||||
}
|
||||
|
||||
private String getSQLEndpoint() {
|
||||
String securityEndpoint;
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) {
|
||||
securityEndpoint = "/_sql";
|
||||
} else {
|
||||
securityEndpoint = "/_xpack/sql";
|
||||
}
|
||||
return securityEndpoint;
|
||||
}
|
||||
|
||||
private String getRollupEndpoint() {
|
||||
String securityEndpoint;
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) {
|
||||
securityEndpoint = "/_rollup";
|
||||
} else {
|
||||
securityEndpoint = "/_xpack/rollup";
|
||||
}
|
||||
return securityEndpoint;
|
||||
}
|
||||
|
||||
private String getWatcherEndpoint() {
|
||||
String securityEndpoint;
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) {
|
||||
securityEndpoint = "/_watcher";
|
||||
} else {
|
||||
securityEndpoint = "/_xpack/watcher";
|
||||
}
|
||||
return securityEndpoint;
|
||||
}
|
||||
|
||||
private void assertRoleInfo(final boolean oldCluster) throws Exception {
|
||||
final String role = oldCluster ? "preupgrade_role" : "postupgrade_role";
|
||||
@SuppressWarnings("unchecked") Map<String, Object> response = oldCluster ?
|
||||
(Map<String, Object>) entityAsMap(client().performRequest(new Request("GET", "/_xpack/security/role/" + role))).get(role) :
|
||||
(Map<String, Object>) entityAsMap(client().performRequest(new Request("GET", "/_security/role/" + role))).get(role);
|
||||
@SuppressWarnings("unchecked") Map<String, Object> response = (Map<String, Object>) entityAsMap(
|
||||
client().performRequest(new Request("GET", getSecurityEndpoint() + "/role/" + role))
|
||||
).get(role);
|
||||
assertNotNull(response.get("run_as"));
|
||||
assertNotNull(response.get("cluster"));
|
||||
assertNotNull(response.get("indices"));
|
||||
|
@ -650,12 +727,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
waitForRollUpJob(rollupJob, expectedStates);
|
||||
|
||||
// check that the rollup job is started using the RollUp API
|
||||
final Request getRollupJobRequest;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
getRollupJobRequest = new Request("GET", "/_xpack/rollup/job/" + rollupJob);
|
||||
} else {
|
||||
getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob);
|
||||
}
|
||||
final Request getRollupJobRequest = new Request("GET", getRollupEndpoint() + "/job/" + rollupJob);
|
||||
Map<String, Object> getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest));
|
||||
Map<String, Object> job = getJob(getRollupJobResponse, rollupJob);
|
||||
assertNotNull(job);
|
||||
|
@ -700,12 +772,8 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
|||
|
||||
private void waitForRollUpJob(final String rollupJob, final Matcher<?> expectedStates) throws Exception {
|
||||
assertBusy(() -> {
|
||||
final Request getRollupJobRequest;
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
getRollupJobRequest = new Request("GET", "/_xpack/rollup/job/" + rollupJob);
|
||||
} else {
|
||||
getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob);
|
||||
}
|
||||
final Request getRollupJobRequest = new Request("GET", getRollupEndpoint() + "/job/" + rollupJob);
|
||||
|
||||
Response getRollupJobResponse = client().performRequest(getRollupJobRequest);
|
||||
assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
|
||||
|
|
|
@ -208,6 +208,7 @@ subprojects {
|
|||
Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner")
|
||||
oldClusterTestRunner.configure {
|
||||
systemProperty 'tests.rest.suite', 'old_cluster'
|
||||
systemProperty 'tests.rest.blacklist', ['old_cluster/40_ml_datafeed_crud/*',].join(',')
|
||||
}
|
||||
|
||||
Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses ->
|
||||
|
@ -244,11 +245,6 @@ subprojects {
|
|||
if (version.before('6.0.0')) {
|
||||
keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase'
|
||||
}
|
||||
|
||||
systemProperty 'tests.rest.blacklist', [
|
||||
'mixed_cluster/30_ml_jobs_crud/*',
|
||||
'mixed_cluster/40_ml_datafeed_crud/*',
|
||||
].join(',')
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,12 +16,14 @@ import org.elasticsearch.client.RestClient;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.rest.action.document.RestGetAction;
|
||||
import org.elasticsearch.rest.action.document.RestIndexAction;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -64,7 +66,12 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
|
|||
assertNotNull(token);
|
||||
assertTokenWorks(token);
|
||||
|
||||
// In this test either all or none tests or on a specific version:
|
||||
boolean postSevenDotZeroNodes = getNodeId(v -> v.onOrAfter(Version.V_7_0_0)) != null;
|
||||
Request indexRequest1 = new Request("PUT", "token_backwards_compatibility_it/doc/old_cluster_token1");
|
||||
if (postSevenDotZeroNodes) {
|
||||
indexRequest1.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
indexRequest1.setJsonEntity(
|
||||
"{\n" +
|
||||
" \"token\": \"" + token + "\"\n" +
|
||||
|
@ -79,6 +86,9 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
|
|||
assertNotNull(token);
|
||||
assertTokenWorks(token);
|
||||
Request indexRequest2 = new Request("PUT", "token_backwards_compatibility_it/doc/old_cluster_token2");
|
||||
if (postSevenDotZeroNodes) {
|
||||
indexRequest2.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE));
|
||||
}
|
||||
indexRequest2.setJsonEntity(
|
||||
"{\n" +
|
||||
" \"token\": \"" + token + "\"\n" +
|
||||
|
@ -86,6 +96,19 @@ public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase {
|
|||
client().performRequest(indexRequest2);
|
||||
}
|
||||
|
||||
private String getNodeId(Predicate<Version> versionPredicate) throws IOException {
|
||||
Response response = client().performRequest(new Request("GET", "_nodes"));
|
||||
ObjectPath objectPath = ObjectPath.createFromResponse(response);
|
||||
Map<String, Object> nodesAsMap = objectPath.evaluate("nodes");
|
||||
for (String id : nodesAsMap.keySet()) {
|
||||
Version version = Version.fromString(objectPath.evaluate("nodes." + id + ".version"));
|
||||
if (versionPredicate.test(version)) {
|
||||
return id;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public void testTokenWorksInMixedOrUpgradedCluster() throws Exception {
|
||||
assumeTrue("this test should only run against the mixed or upgraded cluster",
|
||||
CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED);
|
||||
|
|
Loading…
Reference in New Issue