mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-26 01:48:45 +00:00
[7.x] Adds write_index_only option to put mapping API (#59539)
This commit is contained in:
parent
4d7c59bedb
commit
e54b4a729f
@ -195,6 +195,11 @@
|
|||||||
],
|
],
|
||||||
"default":"open",
|
"default":"open",
|
||||||
"description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
"description":"Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||||
|
},
|
||||||
|
"write_index_only":{
|
||||||
|
"type":"boolean",
|
||||||
|
"default":false,
|
||||||
|
"description":"When true, applies mappings only to the write index of an alias or data stream"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"body":{
|
"body":{
|
||||||
|
@ -78,6 +78,8 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||||||
|
|
||||||
private Index concreteIndex;
|
private Index concreteIndex;
|
||||||
|
|
||||||
|
private boolean writeIndexOnly;
|
||||||
|
|
||||||
public PutMappingRequest(StreamInput in) throws IOException {
|
public PutMappingRequest(StreamInput in) throws IOException {
|
||||||
super(in);
|
super(in);
|
||||||
indices = in.readStringArray();
|
indices = in.readStringArray();
|
||||||
@ -93,6 +95,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||||||
} else {
|
} else {
|
||||||
origin = null;
|
origin = null;
|
||||||
}
|
}
|
||||||
|
if (in.getVersion().onOrAfter(Version.V_7_9_0)) {
|
||||||
|
writeIndexOnly = in.readBoolean();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public PutMappingRequest() {
|
public PutMappingRequest() {
|
||||||
@ -323,6 +328,15 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public PutMappingRequest writeIndexOnly(boolean writeIndexOnly) {
|
||||||
|
this.writeIndexOnly = writeIndexOnly;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean writeIndexOnly() {
|
||||||
|
return writeIndexOnly;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
@ -337,6 +351,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||||||
if (out.getVersion().onOrAfter(Version.V_6_7_0)) {
|
if (out.getVersion().onOrAfter(Version.V_6_7_0)) {
|
||||||
out.writeOptionalString(origin);
|
out.writeOptionalString(origin);
|
||||||
}
|
}
|
||||||
|
if (out.getVersion().onOrAfter(Version.V_7_9_0)) {
|
||||||
|
out.writeBoolean(writeIndexOnly);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -42,7 +42,9 @@ import org.elasticsearch.threadpool.ThreadPool;
|
|||||||
import org.elasticsearch.transport.TransportService;
|
import org.elasticsearch.transport.TransportService;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
@ -97,9 +99,8 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
|||||||
protected void masterOperation(final PutMappingRequest request, final ClusterState state,
|
protected void masterOperation(final PutMappingRequest request, final ClusterState state,
|
||||||
final ActionListener<AcknowledgedResponse> listener) {
|
final ActionListener<AcknowledgedResponse> listener) {
|
||||||
try {
|
try {
|
||||||
final Index[] concreteIndices = request.getConcreteIndex() == null ?
|
final Index[] concreteIndices = resolveIndices(state, request, indexNameExpressionResolver);
|
||||||
indexNameExpressionResolver.concreteIndices(state, request)
|
|
||||||
: new Index[] {request.getConcreteIndex()};
|
|
||||||
final Optional<Exception> maybeValidationException = requestValidators.validateRequest(request, state, concreteIndices);
|
final Optional<Exception> maybeValidationException = requestValidators.validateRequest(request, state, concreteIndices);
|
||||||
if (maybeValidationException.isPresent()) {
|
if (maybeValidationException.isPresent()) {
|
||||||
listener.onFailure(maybeValidationException.get());
|
listener.onFailure(maybeValidationException.get());
|
||||||
@ -113,6 +114,23 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static Index[] resolveIndices(final ClusterState state, PutMappingRequest request, final IndexNameExpressionResolver iner) {
|
||||||
|
if (request.getConcreteIndex() == null) {
|
||||||
|
if (request.writeIndexOnly()) {
|
||||||
|
List<Index> indices = new ArrayList<>();
|
||||||
|
for (String indexExpression : request.indices()) {
|
||||||
|
indices.add(iner.concreteWriteIndex(state, request.indicesOptions(), indexExpression,
|
||||||
|
request.indicesOptions().allowNoIndices(), request.includeDataStreams()));
|
||||||
|
}
|
||||||
|
return indices.toArray(Index.EMPTY_ARRAY);
|
||||||
|
} else {
|
||||||
|
return iner.concreteIndices(state, request);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return new Index[]{request.getConcreteIndex()};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void performMappingUpdate(Index[] concreteIndices,
|
static void performMappingUpdate(Index[] concreteIndices,
|
||||||
PutMappingRequest request,
|
PutMappingRequest request,
|
||||||
ActionListener<AcknowledgedResponse> listener,
|
ActionListener<AcknowledgedResponse> listener,
|
||||||
|
@ -99,6 +99,7 @@ public class RestPutMappingAction extends BaseRestHandler {
|
|||||||
putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout()));
|
putMappingRequest.timeout(request.paramAsTime("timeout", putMappingRequest.timeout()));
|
||||||
putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout()));
|
putMappingRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putMappingRequest.masterNodeTimeout()));
|
||||||
putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions()));
|
putMappingRequest.indicesOptions(IndicesOptions.fromRequest(request, putMappingRequest.indicesOptions()));
|
||||||
|
putMappingRequest.writeIndexOnly(request.paramAsBoolean("write_index_only", false));
|
||||||
return channel -> client.admin().indices().putMapping(putMappingRequest, new RestToXContentListener<>(channel));
|
return channel -> client.admin().indices().putMapping(putMappingRequest, new RestToXContentListener<>(channel));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,16 @@
|
|||||||
package org.elasticsearch.action.admin.indices.mapping.put;
|
package org.elasticsearch.action.admin.indices.mapping.put;
|
||||||
|
|
||||||
import org.elasticsearch.action.ActionRequestValidationException;
|
import org.elasticsearch.action.ActionRequestValidationException;
|
||||||
|
import org.elasticsearch.action.admin.indices.datastream.DeleteDataStreamRequestTests;
|
||||||
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
|
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexAbstraction;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
|
import org.elasticsearch.cluster.metadata.Metadata;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
import org.elasticsearch.common.xcontent.XContentType;
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
@ -31,8 +39,14 @@ import org.elasticsearch.index.RandomCreateIndexGenerator;
|
|||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.elasticsearch.common.collect.Tuple.tuple;
|
||||||
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
||||||
|
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
|
||||||
public class PutMappingRequestTests extends ESTestCase {
|
public class PutMappingRequestTests extends ESTestCase {
|
||||||
|
|
||||||
@ -75,6 +89,7 @@ public class PutMappingRequestTests extends ESTestCase {
|
|||||||
assertEquals("mapping source must be pairs of fieldnames and properties definition.", e.getMessage());
|
assertEquals("mapping source must be pairs of fieldnames and properties definition.", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public void testToXContent() throws IOException {
|
public void testToXContent() throws IOException {
|
||||||
PutMappingRequest request = new PutMappingRequest("foo");
|
PutMappingRequest request = new PutMappingRequest("foo");
|
||||||
request.type("my_type");
|
request.type("my_type");
|
||||||
@ -138,4 +153,127 @@ public class PutMappingRequestTests extends ESTestCase {
|
|||||||
|
|
||||||
return request;
|
return request;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testResolveIndicesWithWriteIndexOnlyAndDataStreamsAndWriteAliases() {
|
||||||
|
String[] dataStreamNames = {"foo", "bar", "baz"};
|
||||||
|
List<Tuple<String, Integer>> dsMetadata = org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple(dataStreamNames[0], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[1], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[2], randomIntBetween(1, 3)));
|
||||||
|
|
||||||
|
ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata,
|
||||||
|
org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
cs = addAliases(cs, org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple("alias1", org.elasticsearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))),
|
||||||
|
tuple("alias2", org.elasticsearch.common.collect.List.of(tuple("index2", false), tuple("index3", true)))
|
||||||
|
));
|
||||||
|
PutMappingRequest request = new PutMappingRequest().indices("foo", "alias1", "alias2").writeIndexOnly(true);
|
||||||
|
Index[] indices = TransportPutMappingAction.resolveIndices(cs, request, new IndexNameExpressionResolver());
|
||||||
|
List<String> indexNames = Arrays.stream(indices).map(Index::getName).collect(Collectors.toList());
|
||||||
|
IndexAbstraction expectedDs = cs.metadata().getIndicesLookup().get("foo");
|
||||||
|
// should resolve the data stream and each alias to their respective write indices
|
||||||
|
assertThat(indexNames, containsInAnyOrder(expectedDs.getWriteIndex().getIndex().getName(), "index2", "index3"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testResolveIndicesWithoutWriteIndexOnlyAndDataStreamsAndWriteAliases() {
|
||||||
|
String[] dataStreamNames = {"foo", "bar", "baz"};
|
||||||
|
List<Tuple<String, Integer>> dsMetadata = org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple(dataStreamNames[0], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[1], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[2], randomIntBetween(1, 3)));
|
||||||
|
|
||||||
|
ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata,
|
||||||
|
org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
cs = addAliases(cs, org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple("alias1", org.elasticsearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))),
|
||||||
|
tuple("alias2", org.elasticsearch.common.collect.List.of(tuple("index2", false), tuple("index3", true)))
|
||||||
|
));
|
||||||
|
PutMappingRequest request = new PutMappingRequest().indices("foo", "alias1", "alias2");
|
||||||
|
Index[] indices = TransportPutMappingAction.resolveIndices(cs, request, new IndexNameExpressionResolver());
|
||||||
|
List<String> indexNames = Arrays.stream(indices).map(Index::getName).collect(Collectors.toList());
|
||||||
|
IndexAbstraction expectedDs = cs.metadata().getIndicesLookup().get("foo");
|
||||||
|
List<String> expectedIndices = expectedDs.getIndices().stream().map(im -> im.getIndex().getName()).collect(Collectors.toList());
|
||||||
|
expectedIndices.addAll(org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
// should resolve the data stream and each alias to _all_ their respective indices
|
||||||
|
assertThat(indexNames, containsInAnyOrder(expectedIndices.toArray()));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testResolveIndicesWithWriteIndexOnlyAndDataStreamAndIndex() {
|
||||||
|
String[] dataStreamNames = {"foo", "bar", "baz"};
|
||||||
|
List<Tuple<String, Integer>> dsMetadata = org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple(dataStreamNames[0], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[1], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[2], randomIntBetween(1, 3)));
|
||||||
|
|
||||||
|
ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata,
|
||||||
|
org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
cs = addAliases(cs, org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple("alias1", org.elasticsearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))),
|
||||||
|
tuple("alias2", org.elasticsearch.common.collect.List.of(tuple("index2", false), tuple("index3", true)))
|
||||||
|
));
|
||||||
|
PutMappingRequest request = new PutMappingRequest().indices("foo", "index3").writeIndexOnly(true);
|
||||||
|
Index[] indices = TransportPutMappingAction.resolveIndices(cs, request, new IndexNameExpressionResolver());
|
||||||
|
List<String> indexNames = Arrays.stream(indices).map(Index::getName).collect(Collectors.toList());
|
||||||
|
IndexAbstraction expectedDs = cs.metadata().getIndicesLookup().get("foo");
|
||||||
|
List<String> expectedIndices = expectedDs.getIndices().stream().map(im -> im.getIndex().getName()).collect(Collectors.toList());
|
||||||
|
expectedIndices.addAll(org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
// should resolve the data stream and each alias to _all_ their respective indices
|
||||||
|
assertThat(indexNames, containsInAnyOrder(expectedDs.getWriteIndex().getIndex().getName(), "index3"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testResolveIndicesWithWriteIndexOnlyAndNoSingleWriteIndex() {
|
||||||
|
String[] dataStreamNames = {"foo", "bar", "baz"};
|
||||||
|
List<Tuple<String, Integer>> dsMetadata = org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple(dataStreamNames[0], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[1], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[2], randomIntBetween(1, 3)));
|
||||||
|
|
||||||
|
ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata,
|
||||||
|
org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
final ClusterState cs2 = addAliases(cs, org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple("alias1", org.elasticsearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))),
|
||||||
|
tuple("alias2", org.elasticsearch.common.collect.List.of(tuple("index2", false), tuple("index3", true)))
|
||||||
|
));
|
||||||
|
PutMappingRequest request = new PutMappingRequest().indices("*").writeIndexOnly(true);
|
||||||
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||||
|
() -> TransportPutMappingAction.resolveIndices(cs2, request, new IndexNameExpressionResolver()));
|
||||||
|
assertThat(e.getMessage(), containsString("The index expression [*] and options provided did not point to a single write-index"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testResolveIndicesWithWriteIndexOnlyAndAliasWithoutWriteIndex() {
|
||||||
|
String[] dataStreamNames = {"foo", "bar", "baz"};
|
||||||
|
List<Tuple<String, Integer>> dsMetadata = org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple(dataStreamNames[0], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[1], randomIntBetween(1, 3)),
|
||||||
|
tuple(dataStreamNames[2], randomIntBetween(1, 3)));
|
||||||
|
|
||||||
|
ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata,
|
||||||
|
org.elasticsearch.common.collect.List.of("index1", "index2", "index3"));
|
||||||
|
final ClusterState cs2 = addAliases(cs, org.elasticsearch.common.collect.List.of(
|
||||||
|
tuple("alias1", org.elasticsearch.common.collect.List.of(tuple("index1", false), tuple("index2", false))),
|
||||||
|
tuple("alias2", org.elasticsearch.common.collect.List.of(tuple("index2", false), tuple("index3", false)))
|
||||||
|
));
|
||||||
|
PutMappingRequest request = new PutMappingRequest().indices("alias2").writeIndexOnly(true);
|
||||||
|
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||||
|
() -> TransportPutMappingAction.resolveIndices(cs2, request, new IndexNameExpressionResolver()));
|
||||||
|
assertThat(e.getMessage(), containsString("no write index is defined for alias [alias2]"));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds aliases to the supplied ClusterState instance. The aliases parameter takes of list of tuples of aliasName
|
||||||
|
* to the alias's indices. The alias's indices are a tuple of index name and a flag indicating whether the alias
|
||||||
|
* is a write alias for that index. See usage examples above.
|
||||||
|
*/
|
||||||
|
private static ClusterState addAliases(ClusterState cs, List<Tuple<String, List<Tuple<String, Boolean>>>> aliases) {
|
||||||
|
Metadata.Builder builder = Metadata.builder(cs.metadata());
|
||||||
|
for (Tuple<String, List<Tuple<String, Boolean>>> alias : aliases) {
|
||||||
|
for (Tuple<String, Boolean> index : alias.v2()) {
|
||||||
|
IndexMetadata im = builder.get(index.v1());
|
||||||
|
AliasMetadata newAliasMd = AliasMetadata.newAliasMetadataBuilder(alias.v1()).writeIndex(index.v2()).build();
|
||||||
|
builder.put(IndexMetadata.builder(im).putAlias(newAliasMd));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ClusterState.builder(cs).metadata(builder.build()).build();
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -320,13 +320,7 @@ teardown:
|
|||||||
name: my-template1
|
name: my-template1
|
||||||
body:
|
body:
|
||||||
index_patterns: [simple*]
|
index_patterns: [simple*]
|
||||||
template:
|
data_stream: {}
|
||||||
mappings:
|
|
||||||
properties:
|
|
||||||
'@timestamp':
|
|
||||||
type: date
|
|
||||||
data_stream:
|
|
||||||
timestamp_field: '@timestamp'
|
|
||||||
|
|
||||||
- do:
|
- do:
|
||||||
security.put_role:
|
security.put_role:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user