Merge branch 'master' into ccr

* master:
  Trim down usages of `ShardOperationFailedException` interface (#28312)
  Do not return all indices if a specific alias is requested via get aliases api.
  [Test] Lower bwc version for rank-eval rest tests
  CountedBitSet doesn't need to extend BitSet. (#28239)
  Calculate sum in Kahan summation algorithm in aggregations (#27807) (#27848)
  Remove the `update_all_types` option. (#28288)
  Add information when master node left to DiscoveryNodes' shortSummary() (#28197)
  Provide explanation of dangling indices, fixes #26008 (#26999)
This commit is contained in:
Jason Tedor 2018-01-22 11:50:16 -05:00
commit 437459d2f9
179 changed files with 1097 additions and 881 deletions

View File

@ -173,7 +173,6 @@ public final class Request {
parameters.withTimeout(createIndexRequest.timeout()); parameters.withTimeout(createIndexRequest.timeout());
parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout()); parameters.withMasterTimeout(createIndexRequest.masterNodeTimeout());
parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards()); parameters.withWaitForActiveShards(createIndexRequest.waitForActiveShards());
parameters.withUpdateAllTypes(createIndexRequest.updateAllTypes());
HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE); HttpEntity entity = createEntity(createIndexRequest, REQUEST_BODY_CONTENT_TYPE);
return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity); return new Request(HttpPut.METHOD_NAME, endpoint, parameters.getParams(), entity);
@ -585,13 +584,6 @@ public final class Request {
return putParam("timeout", timeout); return putParam("timeout", timeout);
} }
Params withUpdateAllTypes(boolean updateAllTypes) {
if (updateAllTypes) {
return putParam("update_all_types", Boolean.TRUE.toString());
}
return this;
}
Params withVersion(long version) { Params withVersion(long version) {
if (version != Versions.MATCH_ANY) { if (version != Versions.MATCH_ANY) {
return putParam("version", Long.toString(version)); return putParam("version", Long.toString(version));

View File

@ -310,14 +310,6 @@ public class RequestTests extends ESTestCase {
setRandomMasterTimeout(createIndexRequest, expectedParams); setRandomMasterTimeout(createIndexRequest, expectedParams);
setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams); setRandomWaitForActiveShards(createIndexRequest::waitForActiveShards, expectedParams);
if (randomBoolean()) {
boolean updateAllTypes = randomBoolean();
createIndexRequest.updateAllTypes(updateAllTypes);
if (updateAllTypes) {
expectedParams.put("update_all_types", Boolean.TRUE.toString());
}
}
Request request = Request.createIndex(createIndexRequest); Request request = Request.createIndex(createIndexRequest);
assertEquals("/" + indexName, request.getEndpoint()); assertEquals("/" + indexName, request.getEndpoint());
assertEquals(expectedParams, request.getParameters()); assertEquals(expectedParams, request.getParameters());

View File

@ -14,3 +14,7 @@ The `index_options` field for numeric fields has been deprecated in 6 and has n
To safeguard against out of memory errors, the number of nested json objects within a single To safeguard against out of memory errors, the number of nested json objects within a single
document across all fields has been limited to 10000. This default limit can be changed with document across all fields has been limited to 10000. This default limit can be changed with
the index setting `index.mapping.nested_objects.limit`. the index setting `index.mapping.nested_objects.limit`.
==== The `update_all_types` option has been removed
This option is useless now that all indices have at most one type.

View File

@ -48,3 +48,12 @@ as long as the following conditions are met:
Recover as long as this many data nodes have joined the cluster. Recover as long as this many data nodes have joined the cluster.
NOTE: These settings only take effect on a full cluster restart. NOTE: These settings only take effect on a full cluster restart.
=== Dangling indices
When a node joins the cluster, any shards stored in its local data directory
directory which do not already exist in the cluster will be imported into the
cluster. This functionality is intended as a best effort to help users who
lose all master nodes. If a new master node is started which is unaware of
the other indices in the cluster, adding the old nodes will cause the old
indices to be imported, instead of being deleted.

View File

@ -207,8 +207,8 @@ public class ScaledFloatFieldMapper extends FieldMapper {
} }
@Override @Override
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType other, List<String> conflicts) {
super.checkCompatibility(other, conflicts, strict); super.checkCompatibility(other, conflicts);
if (scalingFactor != ((ScaledFloatFieldType) other).getScalingFactor()) { if (scalingFactor != ((ScaledFloatFieldType) other).getScalingFactor()) {
conflicts.add("mapper [" + name() + "] has different [scaling_factor] values"); conflicts.add("mapper [" + name() + "] has different [scaling_factor] values");
} }
@ -424,8 +424,8 @@ public class ScaledFloatFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
ScaledFloatFieldMapper other = (ScaledFloatFieldMapper) mergeWith; ScaledFloatFieldMapper other = (ScaledFloatFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) { if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed; this.ignoreMalformed = other.ignoreMalformed;

View File

@ -202,8 +202,8 @@ public class TokenCountFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer;
this.enablePositionIncrements = ((TokenCountFieldMapper) mergeWith).enablePositionIncrements; this.enablePositionIncrements = ((TokenCountFieldMapper) mergeWith).enablePositionIncrements;
} }

View File

@ -63,7 +63,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
MapperService mapperService = createIndex("test").mapperService(); MapperService mapperService = createIndex("test").mapperService();
DocumentMapper stage1 = mapperService.merge("person", DocumentMapper stage1 = mapperService.merge("person",
new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); new CompressedXContent(stage1Mapping), MapperService.MergeReason.MAPPING_UPDATE);
String stage2Mapping = XContentFactory.jsonBuilder().startObject() String stage2Mapping = XContentFactory.jsonBuilder().startObject()
.startObject("person") .startObject("person")
@ -75,7 +75,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
DocumentMapper stage2 = mapperService.merge("person", DocumentMapper stage2 = mapperService.merge("person",
new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE, false); new CompressedXContent(stage2Mapping), MapperService.MergeReason.MAPPING_UPDATE);
// previous mapper has not been modified // previous mapper has not been modified
assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword"));

View File

@ -194,8 +194,8 @@ public final class ParentIdFieldMapper extends FieldMapper {
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
ParentIdFieldMapper parentMergeWith = (ParentIdFieldMapper) mergeWith; ParentIdFieldMapper parentMergeWith = (ParentIdFieldMapper) mergeWith;
this.children = parentMergeWith.children; this.children = parentMergeWith.children;
} }

View File

@ -316,8 +316,8 @@ public final class ParentJoinFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
ParentJoinFieldMapper joinMergeWith = (ParentJoinFieldMapper) mergeWith; ParentJoinFieldMapper joinMergeWith = (ParentJoinFieldMapper) mergeWith;
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
for (ParentIdFieldMapper mapper : parentIdFields) { for (ParentIdFieldMapper mapper : parentIdFields) {
@ -347,7 +347,7 @@ public final class ParentJoinFieldMapper extends FieldMapper {
conflicts.add("cannot remove child [" + child + "] in join field [" + name() + "]"); conflicts.add("cannot remove child [" + child + "] in join field [" + name() + "]");
} }
} }
ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper, updateAllTypes); ParentIdFieldMapper merged = (ParentIdFieldMapper) self.merge(mergeWithMapper);
newParentIdFields.add(merged); newParentIdFields.add(merged);
} }
} }
@ -356,7 +356,7 @@ public final class ParentJoinFieldMapper extends FieldMapper {
} }
this.eagerGlobalOrdinals = joinMergeWith.eagerGlobalOrdinals; this.eagerGlobalOrdinals = joinMergeWith.eagerGlobalOrdinals;
this.parentIdFields = Collections.unmodifiableList(newParentIdFields); this.parentIdFields = Collections.unmodifiableList(newParentIdFields);
this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper, updateAllTypes); this.uniqueFieldMapper = (MetaJoinFieldMapper) uniqueFieldMapper.merge(joinMergeWith.uniqueFieldMapper);
uniqueFieldMapper.setFieldMapper(this); uniqueFieldMapper.setFieldMapper(this);
} }

View File

@ -57,7 +57,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string(); .endObject().string();
IndexService service = createIndex("test"); IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
// Doc without join // Doc without join
@ -106,7 +106,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string(); .endObject().string();
IndexService service = createIndex("test"); IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "2", ParsedDocument doc = docMapper.parse(SourceToParse.source("test", "type", "2",
XContentFactory.jsonBuilder().startObject() XContentFactory.jsonBuilder().startObject()
.startObject("join_field") .startObject("join_field")
@ -141,7 +141,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string(); .endObject().string();
IndexService service = createIndex("test"); IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
// Doc without join // Doc without join
@ -221,7 +221,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
IndexService indexService = createIndex("test"); IndexService indexService = createIndex("test");
DocumentMapper docMapper = indexService.mapperService().merge("type", new CompressedXContent(mapping), DocumentMapper docMapper = indexService.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService()));
{ {
@ -235,7 +235,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class, IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false)); MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot remove parent [parent] in join field [join_field]")); assertThat(exc.getMessage(), containsString("cannot remove parent [parent] in join field [join_field]"));
} }
@ -251,7 +251,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class, IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false)); MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot remove child [grand_child2] in join field [join_field]")); assertThat(exc.getMessage(), containsString("cannot remove child [grand_child2] in join field [join_field]"));
} }
@ -268,7 +268,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class, IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false)); MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot create child [parent] from an existing parent")); assertThat(exc.getMessage(), containsString("cannot create child [parent] from an existing parent"));
} }
@ -285,7 +285,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().string(); .endObject().endObject().string();
IllegalStateException exc = expectThrows(IllegalStateException.class, IllegalStateException exc = expectThrows(IllegalStateException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping), () -> indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, false)); MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("cannot create parent [grand_child2] from an existing child]")); assertThat(exc.getMessage(), containsString("cannot create parent [grand_child2] from an existing child]"));
} }
@ -300,7 +300,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, true); MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService()));
ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService());
assertTrue(mapper.hasChild("child2")); assertTrue(mapper.hasChild("child2"));
@ -321,7 +321,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping), docMapper = indexService.mapperService().merge("type", new CompressedXContent(updateMapping),
MapperService.MergeReason.MAPPING_UPDATE, true); MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService())); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(indexService.mapperService()));
ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService()); ParentJoinFieldMapper mapper = ParentJoinFieldMapper.getMapper(indexService.mapperService());
assertTrue(mapper.hasParent("other")); assertTrue(mapper.hasParent("other"));
@ -349,7 +349,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
IndexService indexService = createIndex("test"); IndexService indexService = createIndex("test");
MapperParsingException exc = expectThrows(MapperParsingException.class, MapperParsingException exc = expectThrows(MapperParsingException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(mapping), () -> indexService.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false)); MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getRootCause().getMessage(), assertThat(exc.getRootCause().getMessage(),
containsString("join field [object.join_field] cannot be added inside an object or in a multi-field")); containsString("join field [object.join_field] cannot be added inside an object or in a multi-field"));
} }
@ -371,7 +371,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
IndexService indexService = createIndex("test"); IndexService indexService = createIndex("test");
MapperParsingException exc = expectThrows(MapperParsingException.class, MapperParsingException exc = expectThrows(MapperParsingException.class,
() -> indexService.mapperService().merge("type", new CompressedXContent(mapping), () -> indexService.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false)); MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getRootCause().getMessage(), assertThat(exc.getRootCause().getMessage(),
containsString("join field [number.join_field] cannot be added inside an object or in a multi-field")); containsString("join field [number.join_field] cannot be added inside an object or in a multi-field"));
} }
@ -397,7 +397,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().string(); .endObject().string();
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type",
new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false)); new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]")); assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]"));
} }
@ -414,7 +414,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().string(); .endObject().string();
indexService.mapperService().merge("type", indexService.mapperService().merge("type",
new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE, false); new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE);
String updateMapping = XContentFactory.jsonBuilder().startObject() String updateMapping = XContentFactory.jsonBuilder().startObject()
.startObject("properties") .startObject("properties")
.startObject("another_join_field") .startObject("another_join_field")
@ -423,7 +423,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().string(); .endObject().string();
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type", IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> indexService.mapperService().merge("type",
new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE, false)); new CompressedXContent(updateMapping), MapperService.MergeReason.MAPPING_UPDATE));
assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]")); assertThat(exc.getMessage(), containsString("Field [_parent_join] is defined twice in [type]"));
} }
} }
@ -442,7 +442,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject().string(); .endObject().string();
IndexService service = createIndex("test"); IndexService service = createIndex("test");
DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping), DocumentMapper docMapper = service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService())); assertTrue(docMapper.mappers().getMapper("join_field") == ParentJoinFieldMapper.getMapper(service.mapperService()));
assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals());
assertNotNull(service.mapperService().fullName("join_field#parent")); assertNotNull(service.mapperService().fullName("join_field#parent"));
@ -463,7 +463,7 @@ public class ParentJoinFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject().string(); .endObject().string();
service.mapperService().merge("type", new CompressedXContent(mapping), service.mapperService().merge("type", new CompressedXContent(mapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals()); assertFalse(service.mapperService().fullName("join_field").eagerGlobalOrdinals());
assertNotNull(service.mapperService().fullName("join_field#parent")); assertNotNull(service.mapperService().fullName("join_field#parent"));
assertFalse(service.mapperService().fullName("join_field#parent").eagerGlobalOrdinals()); assertFalse(service.mapperService().fullName("join_field#parent").eagerGlobalOrdinals());

View File

@ -132,7 +132,7 @@ public class HasChildQueryBuilderTests extends AbstractQueryTestCase<HasChildQue
.endObject().endObject().endObject(); .endObject().endObject().endObject();
mapperService.merge(TYPE, mapperService.merge(TYPE,
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE);
} }
/** /**

View File

@ -112,7 +112,7 @@ public class HasParentQueryBuilderTests extends AbstractQueryTestCase<HasParentQ
.endObject().endObject().endObject(); .endObject().endObject().endObject();
mapperService.merge(TYPE, mapperService.merge(TYPE,
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE);
} }
/** /**

View File

@ -97,7 +97,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase<HasCh
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object" OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE,
"_parent", "type=" + PARENT_TYPE, "_parent", "type=" + PARENT_TYPE,
STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME, "type=text",
@ -107,7 +107,7 @@ public class LegacyHasChildQueryBuilderTests extends AbstractQueryTestCase<HasCh
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object" OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
} }
@Override @Override

View File

@ -88,7 +88,7 @@ public class LegacyHasParentQueryBuilderTests extends AbstractQueryTestCase<HasP
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object" OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE,
"_parent", "type=" + PARENT_TYPE, "_parent", "type=" + PARENT_TYPE,
STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME, "type=text",
@ -98,9 +98,9 @@ public class LegacyHasParentQueryBuilderTests extends AbstractQueryTestCase<HasP
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object" OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type" mapperService.merge("just_a_type", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("just_a_type"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
} }
/** /**

View File

@ -72,7 +72,7 @@ public class LegacyParentIdQueryBuilderTests extends AbstractQueryTestCase<Paren
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object" OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE, mapperService.merge(CHILD_TYPE, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(CHILD_TYPE,
"_parent", "type=" + PARENT_TYPE, "_parent", "type=" + PARENT_TYPE,
STRING_FIELD_NAME, "type=text", STRING_FIELD_NAME, "type=text",
@ -81,7 +81,7 @@ public class LegacyParentIdQueryBuilderTests extends AbstractQueryTestCase<Paren
BOOLEAN_FIELD_NAME, "type=boolean", BOOLEAN_FIELD_NAME, "type=boolean",
DATE_FIELD_NAME, "type=date", DATE_FIELD_NAME, "type=date",
OBJECT_FIELD_NAME, "type=object" OBJECT_FIELD_NAME, "type=object"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
} }
@Override @Override

View File

@ -104,7 +104,7 @@ public class ParentIdQueryBuilderTests extends AbstractQueryTestCase<ParentIdQue
.endObject().endObject().endObject(); .endObject().endObject().endObject();
mapperService.merge(TYPE, mapperService.merge(TYPE,
new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE, false); new CompressedXContent(mapping.string()), MapperService.MergeReason.MAPPING_UPDATE);
} }
@Override @Override

View File

@ -136,13 +136,13 @@ public class CandidateQueryTests extends ESSingleNodeTestCase {
.startObject("ip_field").field("type", "ip").endObject() .startObject("ip_field").field("type", "ip").endObject()
.startObject("field").field("type", "keyword").endObject() .startObject("field").field("type", "keyword").endObject()
.endObject().endObject().endObject().string(); .endObject().endObject().endObject().string();
documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, true); documentMapper = mapperService.merge("type", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE);
String queryField = "query_field"; String queryField = "query_field";
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("type") String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject() .startObject("properties").startObject(queryField).field("type", "percolator").endObject().endObject()
.endObject().endObject().string(); .endObject().endObject().string();
mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); mapperService.merge("type", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper("type").mappers().getMapper(queryField); fieldMapper = (PercolatorFieldMapper) mapperService.documentMapper("type").mappers().getMapper(queryField);
fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType(); fieldType = (PercolatorFieldMapper.FieldType) fieldMapper.fieldType();

View File

@ -98,10 +98,10 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase<PercolateQ
String docType = "_doc"; String docType = "_doc";
mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType,
queryField, "type=percolator" queryField, "type=percolator"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType, mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType,
STRING_FIELD_NAME, "type=text" STRING_FIELD_NAME, "type=text"
).string()), MapperService.MergeReason.MAPPING_UPDATE, false); ).string()), MapperService.MergeReason.MAPPING_UPDATE);
if (mapperService.getIndexSettings().isSingleType() == false) { if (mapperService.getIndexSettings().isSingleType() == false) {
PercolateQueryBuilderTests.docType = docType; PercolateQueryBuilderTests.docType = docType;
} }

View File

@ -156,7 +156,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.startObject("number_field7").field("type", "ip").endObject() .startObject("number_field7").field("type", "ip").endObject()
.startObject("date_field").field("type", "date").endObject() .startObject("date_field").field("type", "date").endObject()
.endObject().endObject().endObject().string(); .endObject().endObject().endObject().string();
mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE, false); mapperService.merge("doc", new CompressedXContent(mapper), MapperService.MergeReason.MAPPING_UPDATE);
} }
private void addQueryFieldMappings() throws Exception { private void addQueryFieldMappings() throws Exception {
@ -164,7 +164,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("doc") String percolatorMapper = XContentFactory.jsonBuilder().startObject().startObject("doc")
.startObject("properties").startObject(fieldName).field("type", "percolator").endObject().endObject() .startObject("properties").startObject(fieldName).field("type", "percolator").endObject().endObject()
.endObject().endObject().string(); .endObject().endObject().string();
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, false); mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
fieldType = (PercolatorFieldMapper.FieldType) mapperService.fullName(fieldName); fieldType = (PercolatorFieldMapper.FieldType) mapperService.fullName(fieldName);
} }
@ -578,7 +578,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject() .startObject("properties").startObject(fieldName).field("type", "percolator").field("index", "no").endObject().endObject()
.endObject().endObject().string(); .endObject().endObject().string();
MapperParsingException e = expectThrows(MapperParsingException.class, () -> MapperParsingException e = expectThrows(MapperParsingException.class, () ->
mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true)); mapperService.merge("doc", new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE));
assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]")); assertThat(e.getMessage(), containsString("Mapping definition for [" + fieldName + "] has unsupported parameters: [index : no]"));
} }
@ -592,7 +592,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.startObject("query_field2").field("type", "percolator").endObject() .startObject("query_field2").field("type", "percolator").endObject()
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
QueryBuilder queryBuilder = matchQuery("field", "value"); QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1",
@ -623,7 +623,7 @@ public class PercolatorFieldMapperTests extends ESSingleNodeTestCase {
.endObject() .endObject()
.endObject() .endObject()
.endObject().endObject().string(); .endObject().endObject().string();
mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE, true); mapperService.merge(typeName, new CompressedXContent(percolatorMapper), MapperService.MergeReason.MAPPING_UPDATE);
QueryBuilder queryBuilder = matchQuery("field", "value"); QueryBuilder queryBuilder = matchQuery("field", "value");
ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1", ParsedDocument doc = mapperService.documentMapper(typeName).parse(SourceToParse.source("test", typeName, "1",

View File

@ -2,8 +2,8 @@
"Response format": "Response format":
- skip: - skip:
version: " - 6.99.99" version: " - 6.1.99"
reason: the ranking evaluation feature is only available on 7.0 reason: the ranking evaluation feature is available since 6.2
- do: - do:
indices.create: indices.create:

View File

@ -2,8 +2,8 @@
"Response format": "Response format":
- skip: - skip:
version: " - 6.99.99" version: " - 6.1.99"
reason: the ranking evaluation feature is only available on 7.0 reason: the ranking evaluation feature is available since 6.2
- do: - do:
index: index:

View File

@ -2,8 +2,8 @@
"Response format": "Response format":
- skip: - skip:
version: " - 6.99.99" version: " - 6.1.99"
reason: the ranking evaluation feature is only available on 7.0 reason: the ranking evaluation feature is available since 6.2
- do: - do:
index: index:

View File

@ -99,8 +99,8 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
} }
@Override @Override
public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts) {
super.checkCompatibility(otherFT, conflicts, strict); super.checkCompatibility(otherFT, conflicts);
CollationFieldType other = (CollationFieldType) otherFT; CollationFieldType other = (CollationFieldType) otherFT;
if (!Objects.equals(collator, other.collator)) { if (!Objects.equals(collator, other.collator)) {
conflicts.add("mapper [" + name() + "] has different [collator]"); conflicts.add("mapper [" + name() + "] has different [collator]");
@ -619,8 +619,8 @@ public class ICUCollationKeywordFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
ICUCollationKeywordFieldMapper icuMergeWith = (ICUCollationKeywordFieldMapper) mergeWith; ICUCollationKeywordFieldMapper icuMergeWith = (ICUCollationKeywordFieldMapper) mergeWith;

View File

@ -434,7 +434,7 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase {
.field("language", "tr") .field("language", "tr")
.field("strength", "primary") .field("strength", "primary")
.endObject().endObject().endObject().endObject().string(); .endObject().endObject().endObject().endObject().string();
indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); indexService.mapperService().merge("type", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE);
String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type") String mapping2 = XContentFactory.jsonBuilder().startObject().startObject("type")
.startObject("properties").startObject("field") .startObject("properties").startObject("field")
@ -443,7 +443,7 @@ public class ICUCollationKeywordFieldMapperTests extends ESSingleNodeTestCase {
.endObject().endObject().endObject().endObject().string(); .endObject().endObject().endObject().endObject().string();
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> indexService.mapperService().merge("type", () -> indexService.mapperService().merge("type",
new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE, randomBoolean())); new CompressedXContent(mapping2), MergeReason.MAPPING_UPDATE));
assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE assertEquals("Can't merge because of conflicts: [Cannot update language setting for [" + FIELD_TYPE
+ "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage()); + "], Cannot update strength setting for [" + FIELD_TYPE + "]]", e.getMessage());
} }

View File

@ -183,7 +183,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith; SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith;
if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {
this.enabledState = sizeFieldMapperMergeWith.enabledState; this.enabledState = sizeFieldMapperMergeWith.enabledState;

View File

@ -110,7 +110,7 @@ public class SizeMappingTests extends ESSingleNodeTestCase {
.startObject("_size").field("enabled", false).endObject() .startObject("_size").field("enabled", false).endObject()
.endObject().endObject().string(); .endObject().endObject().string();
docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping), docMapper = service.mapperService().merge("type", new CompressedXContent(disabledMapping),
MapperService.MergeReason.MAPPING_UPDATE, false); MapperService.MergeReason.MAPPING_UPDATE);
assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false)); assertThat(docMapper.metadataMapper(SizeFieldMapper.class).enabled(), is(false));
} }

View File

@ -2,8 +2,8 @@
"Template request": "Template request":
- skip: - skip:
version: " - 6.99.99" version: " - 6.1.99"
reason: the ranking evaluation feature is only available on 7.0 reason: the ranking evaluation feature is available since 6.2
- do: - do:
indices.create: indices.create:

View File

@ -24,10 +24,6 @@
"master_timeout": { "master_timeout": {
"type" : "time", "type" : "time",
"description" : "Specify timeout for connection to master" "description" : "Specify timeout for connection to master"
},
"update_all_types": {
"type": "boolean",
"description": "Whether to update the mapping for all fields with the same name across all types or not"
} }
} }
}, },

View File

@ -38,10 +38,6 @@
"options" : ["open","closed","none","all"], "options" : ["open","closed","none","all"],
"default" : "open", "default" : "open",
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
},
"update_all_types": {
"type": "boolean",
"description": "Whether to update the mapping for all fields with the same name across all types or not"
} }
} }
}, },

View File

@ -62,8 +62,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
@Override @Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) { protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request); String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
@SuppressWarnings("unchecked") ImmutableOpenMap<String, List<AliasMetaData>> result = state.metaData().findAliases(request.aliases(), concreteIndices);
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
listener.onResponse(new GetAliasesResponse(result)); listener.onResponse(new GetAliasesResponse(result));
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.cache.clear; package org.elasticsearch.action.admin.indices.cache.clear;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -38,7 +38,8 @@ public class ClearIndicesCacheResponse extends BroadcastResponse {
} }
ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { ClearIndicesCacheResponse(int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.cache.clear; package org.elasticsearch.action.admin.indices.cache.clear;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -65,7 +65,7 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
@Override @Override
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards, protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards,
int failedShards, List<EmptyResult> responses, int failedShards, List<EmptyResult> responses,
List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures); return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -43,7 +43,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final String cause; private final String cause;
private final String index; private final String index;
private final String providedName; private final String providedName;
private final boolean updateAllTypes;
private Index recoverFrom; private Index recoverFrom;
private ResizeType resizeType; private ResizeType resizeType;
@ -61,12 +60,10 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName, public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName) {
boolean updateAllTypes) {
this.originalMessage = originalMessage; this.originalMessage = originalMessage;
this.cause = cause; this.cause = cause;
this.index = index; this.index = index;
this.updateAllTypes = updateAllTypes;
this.providedName = providedName; this.providedName = providedName;
} }
@ -155,11 +152,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return recoverFrom; return recoverFrom;
} }
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** /**
* The name that was provided by the user. This might contain a date math expression. * The name that was provided by the user. This might contain a date math expression.
* @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME * @see IndexMetaData#SETTING_INDEX_PROVIDED_NAME

View File

@ -85,8 +85,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
private final Map<String, IndexMetaData.Custom> customs = new HashMap<>(); private final Map<String, IndexMetaData.Custom> customs = new HashMap<>();
private boolean updateAllTypes = false;
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexRequest() { public CreateIndexRequest() {
@ -429,17 +427,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this.customs; return this.customs;
} }
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** See {@link #updateAllTypes()} */
public CreateIndexRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
public ActiveShardCount waitForActiveShards() { public ActiveShardCount waitForActiveShards() {
return waitForActiveShards; return waitForActiveShards;
} }
@ -499,7 +486,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
for (int i = 0; i < aliasesSize; i++) { for (int i = 0; i < aliasesSize; i++) {
aliases.add(Alias.read(in)); aliases.add(Alias.read(in));
} }
updateAllTypes = in.readBoolean(); if (in.getVersion().before(Version.V_7_0_0_alpha1)) {
in.readBoolean(); // updateAllTypes
}
waitForActiveShards = ActiveShardCount.readFrom(in); waitForActiveShards = ActiveShardCount.readFrom(in);
} }
@ -523,7 +512,9 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
for (Alias alias : aliases) { for (Alias alias : aliases) {
alias.writeTo(out); alias.writeTo(out);
} }
out.writeBoolean(updateAllTypes); if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
out.writeBoolean(true); // updateAllTypes
}
waitForActiveShards.writeTo(out); waitForActiveShards.writeTo(out);
} }

View File

@ -239,12 +239,6 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
return this; return this;
} }
/** True if all fields that span multiple types should be updated, false otherwise */
public CreateIndexRequestBuilder setUpdateAllTypes(boolean updateAllTypes) {
request.updateAllTypes(updateAllTypes);
return this;
}
/** /**
* Sets the number of shard copies that should be active for index creation to return. * Sets the number of shard copies that should be active for index creation to return.
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy

View File

@ -72,7 +72,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
} }
final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index()); final String indexName = indexNameExpressionResolver.resolveDateMathExpression(request.index());
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index(), request.updateAllTypes()) final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.index())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings()) .settings(request.settings()).mappings(request.mappings())
.aliases(request.aliases()).customs(request.customs()) .aliases(request.aliases()).customs(request.customs())

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.flush; package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import java.util.List; import java.util.List;
@ -35,7 +35,7 @@ public class FlushResponse extends BroadcastResponse {
} }
FlushResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { FlushResponse(int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.flush; package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -57,7 +57,8 @@ public class TransportFlushAction extends TransportBroadcastReplicationAction<Fl
} }
@Override @Override
protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures) { protected FlushResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List
<DefaultShardOperationFailedException> shardFailures) {
return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures); return new FlushResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
} }
} }

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.forcemerge; package org.elasticsearch.action.admin.indices.forcemerge;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import java.util.List; import java.util.List;
@ -32,7 +32,7 @@ public class ForceMergeResponse extends BroadcastResponse {
ForceMergeResponse() { ForceMergeResponse() {
} }
ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { ForceMergeResponse(int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
} }
} }

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.forcemerge; package org.elasticsearch.action.admin.indices.forcemerge;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -62,7 +62,7 @@ public class TransportForceMergeAction extends TransportBroadcastByNodeAction<Fo
} }
@Override @Override
protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { protected ForceMergeResponse newResponse(ForceMergeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures); return new ForceMergeResponse(totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -30,8 +30,6 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
private String source; private String source;
private boolean updateAllTypes = false;
public PutMappingClusterStateUpdateRequest() { public PutMappingClusterStateUpdateRequest() {
} }
@ -53,13 +51,4 @@ public class PutMappingClusterStateUpdateRequest extends IndicesClusterStateUpda
this.source = source; this.source = source;
return this; return this;
} }
public boolean updateAllTypes() {
return updateAllTypes;
}
public PutMappingClusterStateUpdateRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
} }

View File

@ -72,7 +72,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
private String source; private String source;
private boolean updateAllTypes = false;
private Index concreteIndex; private Index concreteIndex;
public PutMappingRequest() { public PutMappingRequest() {
@ -290,17 +289,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
} }
} }
/** True if all fields that span multiple types should be updated, false otherwise */
public boolean updateAllTypes() {
return updateAllTypes;
}
/** See {@link #updateAllTypes()} */
public PutMappingRequest updateAllTypes(boolean updateAllTypes) {
this.updateAllTypes = updateAllTypes;
return this;
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
@ -312,7 +300,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
// we do not know the format from earlier versions so convert if necessary // we do not know the format from earlier versions so convert if necessary
source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source)); source = XContentHelper.convertToJson(new BytesArray(source), false, false, XContentFactory.xContentType(source));
} }
updateAllTypes = in.readBoolean(); if (in.getVersion().before(Version.V_7_0_0_alpha1)) {
in.readBoolean(); // updateAllTypes
}
concreteIndex = in.readOptionalWriteable(Index::new); concreteIndex = in.readOptionalWriteable(Index::new);
} }
@ -323,7 +313,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
indicesOptions.writeIndicesOptions(out); indicesOptions.writeIndicesOptions(out);
out.writeOptionalString(type); out.writeOptionalString(type);
out.writeString(source); out.writeString(source);
out.writeBoolean(updateAllTypes); if (out.getVersion().before(Version.V_7_0_0_alpha1)) {
out.writeBoolean(true); // updateAllTypes
}
out.writeOptionalWriteable(concreteIndex); out.writeOptionalWriteable(concreteIndex);
} }
} }

View File

@ -98,10 +98,4 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this; return this;
} }
/** True if all fields that span multiple types should be updated, false otherwise */
public PutMappingRequestBuilder setUpdateAllTypes(boolean updateAllTypes) {
request.updateAllTypes(updateAllTypes);
return this;
}
} }

View File

@ -82,7 +82,6 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices).type(request.type()) .indices(concreteIndices).type(request.type())
.updateAllTypes(request.updateAllTypes())
.source(request.source()); .source(request.source());
metaDataMappingService.putMapping(updateRequest, new ActionListener<ClusterStateUpdateResponse>() { metaDataMappingService.putMapping(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.recovery; package org.elasticsearch.action.admin.indices.recovery;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -56,7 +56,8 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContentFra
* @param shardFailures List of failures processing shards * @param shardFailures List of failures processing shards
*/ */
public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed, public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed,
Map<String, List<RecoveryState>> shardRecoveryStates, List<ShardOperationFailedException> shardFailures) { Map<String, List<RecoveryState>> shardRecoveryStates,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
this.shardRecoveryStates = shardRecoveryStates; this.shardRecoveryStates = shardRecoveryStates;
this.detailed = detailed; this.detailed = detailed;

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.recovery; package org.elasticsearch.action.admin.indices.recovery;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -69,7 +69,7 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
@Override @Override
protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List<RecoveryState> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List<RecoveryState> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
Map<String, List<RecoveryState>> shardResponses = new HashMap<>(); Map<String, List<RecoveryState>> shardResponses = new HashMap<>();
for (RecoveryState recoveryState : responses) { for (RecoveryState recoveryState : responses) {
if (recoveryState == null) { if (recoveryState == null) {

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.refresh; package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import java.util.List; import java.util.List;
@ -32,7 +32,7 @@ public class RefreshResponse extends BroadcastResponse {
RefreshResponse() { RefreshResponse() {
} }
RefreshResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { RefreshResponse(int totalShards, int successfulShards, int failedShards, List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
} }
} }

View File

@ -19,9 +19,9 @@
package org.elasticsearch.action.admin.indices.refresh; package org.elasticsearch.action.admin.indices.refresh;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.replication.BasicReplicationRequest; import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction; import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
@ -61,7 +61,8 @@ public class TransportRefreshAction extends TransportBroadcastReplicationAction<
} }
@Override @Override
protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures) { protected RefreshResponse newResponse(int successfulShards, int failedShards, int totalNumCopies,
List<DefaultShardOperationFailedException> shardFailures) {
return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures); return new RefreshResponse(totalNumCopies, successfulShards, failedShards, shardFailures);
} }
} }

View File

@ -232,7 +232,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
createIndexRequest.cause("rollover_index"); createIndexRequest.cause("rollover_index");
createIndexRequest.index(targetIndexName); createIndexRequest.index(targetIndexName);
return new CreateIndexClusterStateUpdateRequest(createIndexRequest, return new CreateIndexClusterStateUpdateRequest(createIndexRequest,
"rollover_index", targetIndexName, providedIndexName, true) "rollover_index", targetIndexName, providedIndexName)
.ackTimeout(createIndexRequest.timeout()) .ackTimeout(createIndexRequest.timeout())
.masterNodeTimeout(createIndexRequest.masterNodeTimeout()) .masterNodeTimeout(createIndexRequest.masterNodeTimeout())
.settings(createIndexRequest.settings()) .settings(createIndexRequest.settings())

View File

@ -24,7 +24,7 @@ import org.apache.lucene.search.SortField;
import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedNumericSortField;
import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.SortedSetSortField;
import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountable;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -53,7 +53,8 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont
} }
IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards; this.shards = shards;
} }

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.segments; package org.elasticsearch.action.admin.indices.segments;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -77,7 +77,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeActi
} }
@Override @Override
protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardSegments> results, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardSegments> results, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures); return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -25,7 +25,6 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenIntMap;
@ -348,7 +347,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} }
} }
out.writeVInt(failures.size()); out.writeVInt(failures.size());
for (ShardOperationFailedException failure : failures) { for (Failure failure : failures) {
failure.writeTo(out); failure.writeTo(out);
} }
} }
@ -357,7 +356,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (failures.size() > 0) { if (failures.size() > 0) {
builder.startArray(Fields.FAILURES); builder.startArray(Fields.FAILURES);
for (ShardOperationFailedException failure : failures) { for (Failure failure : failures) {
builder.startObject(); builder.startObject();
failure.toXContent(builder, params); failure.toXContent(builder, params);
builder.endObject(); builder.endObject();

View File

@ -179,7 +179,7 @@ public class TransportResizeAction extends TransportMasterNodeAction<ResizeReque
targetIndex.settings(settingsBuilder); targetIndex.settings(settingsBuilder);
return new CreateIndexClusterStateUpdateRequest(targetIndex, return new CreateIndexClusterStateUpdateRequest(targetIndex,
cause, targetIndex.index(), targetIndexName, true) cause, targetIndex.index(), targetIndexName)
// mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be
// applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss
// the mappings for everything is corrupted and hard to debug // the mappings for everything is corrupted and hard to debug

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.stats; package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -48,7 +48,8 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
} }
IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards; this.shards = shards;
} }

View File

@ -19,8 +19,8 @@
package org.elasticsearch.action.admin.indices.stats; package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -79,7 +79,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
} }
@Override @Override
protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardStats> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardStats> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -20,8 +20,8 @@
package org.elasticsearch.action.admin.indices.upgrade.get; package org.elasticsearch.action.admin.indices.upgrade.get;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -79,7 +79,7 @@ public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction
} }
@Override @Override
protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeStatus> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeStatus> responses, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures); return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures);
} }

View File

@ -19,11 +19,10 @@
package org.elasticsearch.action.admin.indices.upgrade.get; package org.elasticsearch.action.admin.indices.upgrade.get;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -43,7 +42,8 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
UpgradeStatusResponse() { UpgradeStatusResponse() {
} }
UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
this.shards = shards; this.shards = shards;
} }

View File

@ -22,8 +22,8 @@ package org.elasticsearch.action.admin.indices.upgrade.post;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.PrimaryMissingActionException; import org.elasticsearch.action.PrimaryMissingActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
@ -71,7 +71,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
} }
@Override @Override
protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeResult> shardUpgradeResults, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) { protected UpgradeResponse newResponse(UpgradeRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeResult> shardUpgradeResults, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState) {
Map<String, Integer> successfulPrimaryShards = new HashMap<>(); Map<String, Integer> successfulPrimaryShards = new HashMap<>();
Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>(); Map<String, Tuple<Version, org.apache.lucene.util.Version>> versions = new HashMap<>();
for (ShardUpgradeResult result : shardUpgradeResults) { for (ShardUpgradeResult result : shardUpgradeResults) {

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.admin.indices.upgrade.post; package org.elasticsearch.action.admin.indices.upgrade.post;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
@ -44,7 +44,8 @@ public class UpgradeResponse extends BroadcastResponse {
} }
UpgradeResponse(Map<String, Tuple<Version, String>> versions, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { UpgradeResponse(Map<String, Tuple<Version, String>> versions, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
this.versions = versions; this.versions = versions;
} }

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.validate.query;
import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query; import org.apache.lucene.search.Query;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
@ -115,7 +114,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
int successfulShards = 0; int successfulShards = 0;
int failedShards = 0; int failedShards = 0;
boolean valid = true; boolean valid = true;
List<ShardOperationFailedException> shardFailures = null; List<DefaultShardOperationFailedException> shardFailures = null;
List<QueryExplanation> queryExplanations = null; List<QueryExplanation> queryExplanations = null;
for (int i = 0; i < shardsResponses.length(); i++) { for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i); Object shardResponse = shardsResponses.get(i);

View File

@ -19,7 +19,7 @@
package org.elasticsearch.action.admin.indices.validate.query; package org.elasticsearch.action.admin.indices.validate.query;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -46,7 +46,8 @@ public class ValidateQueryResponse extends BroadcastResponse {
} }
ValidateQueryResponse(boolean valid, List<QueryExplanation> queryExplanations, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) { ValidateQueryResponse(boolean valid, List<QueryExplanation> queryExplanations, int totalShards, int successfulShards, int failedShards,
List<DefaultShardOperationFailedException> shardFailures) {
super(totalShards, successfulShards, failedShards, shardFailures); super(totalShards, successfulShards, failedShards, shardFailures);
this.valid = valid; this.valid = valid;
this.queryExplanations = queryExplanations; this.queryExplanations = queryExplanations;

View File

@ -20,11 +20,10 @@
package org.elasticsearch.action.support.broadcast; package org.elasticsearch.action.support.broadcast;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.index.shard.ShardNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
@ -35,30 +34,24 @@ import static org.elasticsearch.action.support.DefaultShardOperationFailedExcept
* Base class for all broadcast operation based responses. * Base class for all broadcast operation based responses.
*/ */
public class BroadcastResponse extends ActionResponse { public class BroadcastResponse extends ActionResponse {
private static final ShardOperationFailedException[] EMPTY = new ShardOperationFailedException[0]; private static final DefaultShardOperationFailedException[] EMPTY = new DefaultShardOperationFailedException[0];
private int totalShards; private int totalShards;
private int successfulShards; private int successfulShards;
private int failedShards; private int failedShards;
private ShardOperationFailedException[] shardFailures = EMPTY; private DefaultShardOperationFailedException[] shardFailures = EMPTY;
public BroadcastResponse() { public BroadcastResponse() {
} }
public BroadcastResponse(int totalShards, int successfulShards, int failedShards, public BroadcastResponse(int totalShards, int successfulShards, int failedShards,
List<? extends ShardOperationFailedException> shardFailures) { List<DefaultShardOperationFailedException> shardFailures) {
assertNoShardNotAvailableFailures(shardFailures);
this.totalShards = totalShards; this.totalShards = totalShards;
this.successfulShards = successfulShards; this.successfulShards = successfulShards;
this.failedShards = failedShards; this.failedShards = failedShards;
this.shardFailures = shardFailures == null ? EMPTY : if (shardFailures == null) {
shardFailures.toArray(new ShardOperationFailedException[shardFailures.size()]); this.shardFailures = EMPTY;
} } else {
this.shardFailures = shardFailures.toArray(new DefaultShardOperationFailedException[shardFailures.size()]);
private void assertNoShardNotAvailableFailures(List<? extends ShardOperationFailedException> shardFailures) {
if (shardFailures != null) {
for (Object e : shardFailures) {
assert (e instanceof ShardNotFoundException) == false : "expected no ShardNotFoundException failures, but got " + e;
}
} }
} }
@ -97,7 +90,7 @@ public class BroadcastResponse extends ActionResponse {
/** /**
* The list of shard failures exception. * The list of shard failures exception.
*/ */
public ShardOperationFailedException[] getShardFailures() { public DefaultShardOperationFailedException[] getShardFailures() {
return shardFailures; return shardFailures;
} }
@ -109,7 +102,7 @@ public class BroadcastResponse extends ActionResponse {
failedShards = in.readVInt(); failedShards = in.readVInt();
int size = in.readVInt(); int size = in.readVInt();
if (size > 0) { if (size > 0) {
shardFailures = new ShardOperationFailedException[size]; shardFailures = new DefaultShardOperationFailedException[size];
for (int i = 0; i < size; i++) { for (int i = 0; i < size; i++) {
shardFailures[i] = readShardOperationFailed(in); shardFailures[i] = readShardOperationFailed(in);
} }
@ -123,7 +116,7 @@ public class BroadcastResponse extends ActionResponse {
out.writeVInt(successfulShards); out.writeVInt(successfulShards);
out.writeVInt(failedShards); out.writeVInt(failedShards);
out.writeVInt(shardFailures.length); out.writeVInt(shardFailures.length);
for (ShardOperationFailedException exp : shardFailures) { for (DefaultShardOperationFailedException exp : shardFailures) {
exp.writeTo(out); exp.writeTo(out);
} }
} }

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.HandledTransportAction;
@ -131,7 +130,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
int totalShards = 0; int totalShards = 0;
int successfulShards = 0; int successfulShards = 0;
List<ShardOperationResult> broadcastByNodeResponses = new ArrayList<>(); List<ShardOperationResult> broadcastByNodeResponses = new ArrayList<>();
List<ShardOperationFailedException> exceptions = new ArrayList<>(); List<DefaultShardOperationFailedException> exceptions = new ArrayList<>();
for (int i = 0; i < responses.length(); i++) { for (int i = 0; i < responses.length(); i++) {
if (responses.get(i) instanceof FailedNodeException) { if (responses.get(i) instanceof FailedNodeException) {
FailedNodeException exception = (FailedNodeException) responses.get(i); FailedNodeException exception = (FailedNodeException) responses.get(i);
@ -176,7 +175,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
* @param clusterState the cluster state * @param clusterState the cluster state
* @return the response * @return the response
*/ */
protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List<ShardOperationResult> results, List<ShardOperationFailedException> shardFailures, ClusterState clusterState); protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List<ShardOperationResult> results, List<DefaultShardOperationFailedException> shardFailures, ClusterState clusterState);
/** /**
* Deserialize a request from an input stream * Deserialize a request from an input stream

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.support.replication;
import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.HandledTransportAction;
@ -76,7 +75,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
protected void doExecute(Task task, Request request, ActionListener<Response> listener) { protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
final ClusterState clusterState = clusterService.state(); final ClusterState clusterState = clusterService.state();
List<ShardId> shards = shards(request, clusterState); List<ShardId> shards = shards(request, clusterState);
final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList(); final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList<>();
if (shards.size() == 0) { if (shards.size() == 0) {
finishAndNotifyListener(listener, shardsResponses); finishAndNotifyListener(listener, shardsResponses);
} }
@ -148,7 +147,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
int successfulShards = 0; int successfulShards = 0;
int failedShards = 0; int failedShards = 0;
int totalNumCopies = 0; int totalNumCopies = 0;
List<ShardOperationFailedException> shardFailures = null; List<DefaultShardOperationFailedException> shardFailures = null;
for (int i = 0; i < shardsResponses.size(); i++) { for (int i = 0; i < shardsResponses.size(); i++) {
ReplicationResponse shardResponse = shardsResponses.get(i); ReplicationResponse shardResponse = shardsResponses.get(i);
if (shardResponse == null) { if (shardResponse == null) {
@ -168,5 +167,6 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures)); listener.onResponse(newResponse(successfulShards, failedShards, totalNumCopies, shardFailures));
} }
protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies, List<ShardOperationFailedException> shardFailures); protected abstract BroadcastResponse newResponse(int successfulShards, int failedShards, int totalNumCopies,
List<DefaultShardOperationFailedException> shardFailures);
} }

View File

@ -275,14 +275,12 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, To
if (!filteredValues.isEmpty()) { if (!filteredValues.isEmpty()) {
// Make the list order deterministic // Make the list order deterministic
CollectionUtil.timSort(filteredValues, new Comparator<AliasMetaData>() { CollectionUtil.timSort(filteredValues, Comparator.comparing(AliasMetaData::alias));
@Override
public int compare(AliasMetaData o1, AliasMetaData o2) {
return o1.alias().compareTo(o2.alias());
}
});
}
mapBuilder.put(index, Collections.unmodifiableList(filteredValues)); mapBuilder.put(index, Collections.unmodifiableList(filteredValues));
} else if (matchAllAliases) {
// in case all aliases are requested then it is desired to return the concrete index with no aliases (#25114):
mapBuilder.put(index, Collections.emptyList());
}
} }
return mapBuilder.build(); return mapBuilder.build();
} }

View File

@ -444,7 +444,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
// now add the mappings // now add the mappings
MapperService mapperService = indexService.mapperService(); MapperService mapperService = indexService.mapperService();
try { try {
mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes()); mapperService.merge(mappings, MergeReason.MAPPING_UPDATE);
} catch (Exception e) { } catch (Exception e) {
removalExtraInfo = "failed on parsing default mapping/mappings on index creation"; removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
throw e; throw e;

View File

@ -144,7 +144,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e); throw new ElasticsearchException("Failed to create temporary index for parsing the alias", e);
} }
indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY, false); indexService.mapperService().merge(index, MapperService.MergeReason.MAPPING_RECOVERY);
} }
indices.put(action.getIndex(), indexService); indices.put(action.getIndex(), indexService);
} }

View File

@ -250,7 +250,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue())); mappingsForValidation.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
} }
dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE, false); dummyIndexService.mapperService().merge(mappingsForValidation, MergeReason.MAPPING_UPDATE);
} finally { } finally {
if (createdIndex != null) { if (createdIndex != null) {

View File

@ -187,7 +187,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) { try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap)) {
MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService,
mapperRegistry, () -> null); mapperRegistry, () -> null);
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, false); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY);
} }
} catch (Exception ex) { } catch (Exception ex) {
// Wrap the inner exception so we have the index name in the exception message // Wrap the inner exception so we have the index name in the exception message

View File

@ -147,7 +147,7 @@ public class MetaDataMappingService extends AbstractComponent {
// we need to create the index here, and add the current mapping to it, so we can merge // we need to create the index here, and add the current mapping to it, so we can merge
indexService = indicesService.createIndex(indexMetaData, Collections.emptyList()); indexService = indicesService.createIndex(indexMetaData, Collections.emptyList());
removeIndex = true; removeIndex = true;
indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY, true); indexService.mapperService().merge(indexMetaData, MergeReason.MAPPING_RECOVERY);
} }
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData); IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
@ -224,7 +224,7 @@ public class MetaDataMappingService extends AbstractComponent {
MapperService mapperService = indicesService.createIndexMapperService(indexMetaData); MapperService mapperService = indicesService.createIndexMapperService(indexMetaData);
indexMapperServices.put(index, mapperService); indexMapperServices.put(index, mapperService);
// add mappings for all types, we need them for cross-type validation // add mappings for all types, we need them for cross-type validation
mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY, request.updateAllTypes()); mapperService.merge(indexMetaData, MergeReason.MAPPING_RECOVERY);
} }
} }
currentState = applyRequest(currentState, request, indexMapperServices); currentState = applyRequest(currentState, request, indexMapperServices);
@ -264,7 +264,7 @@ public class MetaDataMappingService extends AbstractComponent {
newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null); newMapper = mapperService.parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) { if (existingMapper != null) {
// first, simulate: just call merge and ignore the result // first, simulate: just call merge and ignore the result
existingMapper.merge(newMapper.mapping(), request.updateAllTypes()); existingMapper.merge(newMapper.mapping());
} else { } else {
// TODO: can we find a better place for this validation? // TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about // The reason this validation is here is that the mapper service doesn't learn about
@ -310,7 +310,7 @@ public class MetaDataMappingService extends AbstractComponent {
if (existingMapper != null) { if (existingMapper != null) {
existingSource = existingMapper.mappingSource(); existingSource = existingMapper.mappingSource();
} }
DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE, request.updateAllTypes()); DocumentMapper mergedMapper = mapperService.merge(mappingType, mappingUpdateSource, MergeReason.MAPPING_UPDATE);
CompressedXContent updatedSource = mergedMapper.mappingSource(); CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) { if (existingSource != null) {

View File

@ -39,6 +39,7 @@ import java.util.Collections;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects;
/** /**
* This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to
@ -205,13 +206,15 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
} }
/** /**
* Get the master node * Returns the master node, or {@code null} if there is no master node
*
* @return master node
*/ */
@Nullable
public DiscoveryNode getMasterNode() { public DiscoveryNode getMasterNode() {
if (masterNodeId != null) {
return nodes.get(masterNodeId); return nodes.get(masterNodeId);
} }
return null;
}
/** /**
* Get a node by its address * Get a node by its address
@ -385,27 +388,20 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
* Returns the changes comparing this nodes to the provided nodes. * Returns the changes comparing this nodes to the provided nodes.
*/ */
public Delta delta(DiscoveryNodes other) { public Delta delta(DiscoveryNodes other) {
List<DiscoveryNode> removed = new ArrayList<>(); final List<DiscoveryNode> removed = new ArrayList<>();
List<DiscoveryNode> added = new ArrayList<>(); final List<DiscoveryNode> added = new ArrayList<>();
for (DiscoveryNode node : other) { for (DiscoveryNode node : other) {
if (!this.nodeExists(node)) { if (this.nodeExists(node) == false) {
removed.add(node); removed.add(node);
} }
} }
for (DiscoveryNode node : this) { for (DiscoveryNode node : this) {
if (!other.nodeExists(node)) { if (other.nodeExists(node) == false) {
added.add(node); added.add(node);
} }
} }
DiscoveryNode previousMasterNode = null;
DiscoveryNode newMasterNode = null; return new Delta(other.getMasterNode(), getMasterNode(), localNodeId, Collections.unmodifiableList(removed),
if (masterNodeId != null) {
if (other.masterNodeId == null || !other.masterNodeId.equals(masterNodeId)) {
previousMasterNode = other.getMasterNode();
newMasterNode = getMasterNode();
}
}
return new Delta(previousMasterNode, newMasterNode, localNodeId, Collections.unmodifiableList(removed),
Collections.unmodifiableList(added)); Collections.unmodifiableList(added));
} }
@ -429,8 +425,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
public static class Delta { public static class Delta {
private final String localNodeId; private final String localNodeId;
private final DiscoveryNode previousMasterNode; @Nullable private final DiscoveryNode previousMasterNode;
private final DiscoveryNode newMasterNode; @Nullable private final DiscoveryNode newMasterNode;
private final List<DiscoveryNode> removed; private final List<DiscoveryNode> removed;
private final List<DiscoveryNode> added; private final List<DiscoveryNode> added;
@ -448,13 +444,15 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
} }
public boolean masterNodeChanged() { public boolean masterNodeChanged() {
return newMasterNode != null; return Objects.equals(newMasterNode, previousMasterNode) == false;
} }
@Nullable
public DiscoveryNode previousMasterNode() { public DiscoveryNode previousMasterNode() {
return previousMasterNode; return previousMasterNode;
} }
@Nullable
public DiscoveryNode newMasterNode() { public DiscoveryNode newMasterNode() {
return newMasterNode; return newMasterNode;
} }
@ -476,51 +474,45 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
} }
public String shortSummary() { public String shortSummary() {
StringBuilder sb = new StringBuilder(); final StringBuilder summary = new StringBuilder();
if (!removed() && masterNodeChanged()) {
if (newMasterNode.getId().equals(localNodeId)) {
// we are the master, no nodes we removed, we are actually the first master
sb.append("new_master ").append(newMasterNode());
} else {
// we are not the master, so we just got this event. No nodes were removed, so its not a *new* master
sb.append("detected_master ").append(newMasterNode());
}
} else {
if (masterNodeChanged()) { if (masterNodeChanged()) {
sb.append("master {new ").append(newMasterNode()); summary.append("master node changed {previous [");
if (previousMasterNode() != null) { if (previousMasterNode() != null) {
sb.append(", previous ").append(previousMasterNode()); summary.append(previousMasterNode());
} }
sb.append("}"); summary.append("], current [");
if (newMasterNode() != null) {
summary.append(newMasterNode());
}
summary.append("]}");
} }
if (removed()) { if (removed()) {
if (masterNodeChanged()) { if (summary.length() > 0) {
sb.append(", "); summary.append(", ");
} }
sb.append("removed {"); summary.append("removed {");
for (DiscoveryNode node : removedNodes()) { for (DiscoveryNode node : removedNodes()) {
sb.append(node).append(','); summary.append(node).append(',');
}
sb.append("}");
} }
summary.append("}");
} }
if (added()) { if (added()) {
// don't print if there is one added, and it is us // don't print if there is one added, and it is us
if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) { if (!(addedNodes().size() == 1 && addedNodes().get(0).getId().equals(localNodeId))) {
if (removed() || masterNodeChanged()) { if (summary.length() > 0) {
sb.append(", "); summary.append(", ");
} }
sb.append("added {"); summary.append("added {");
for (DiscoveryNode node : addedNodes()) { for (DiscoveryNode node : addedNodes()) {
if (!node.getId().equals(localNodeId)) { if (!node.getId().equals(localNodeId)) {
// don't print ourself // don't print ourself
sb.append(node).append(','); summary.append(node).append(',');
} }
} }
sb.append("}"); summary.append("}");
} }
} }
return sb.toString(); return summary.toString();
} }
} }

View File

@ -324,8 +324,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
} }
@Override @Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts, strict); super.checkCompatibility(fieldType, conflicts);
CompletionFieldType other = (CompletionFieldType)fieldType; CompletionFieldType other = (CompletionFieldType)fieldType;
if (preservePositionIncrements != other.preservePositionIncrements) { if (preservePositionIncrements != other.preservePositionIncrements) {
@ -607,8 +607,8 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith;
this.maxInputLength = fieldMergeWith.maxInputLength; this.maxInputLength = fieldMergeWith.maxInputLength;
} }

View File

@ -219,8 +219,8 @@ public class DateFieldMapper extends FieldMapper {
} }
@Override @Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts, strict); super.checkCompatibility(fieldType, conflicts);
DateFieldType other = (DateFieldType) fieldType; DateFieldType other = (DateFieldType) fieldType;
if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) { if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) {
conflicts.add("mapper [" + name() + "] has different [format] values"); conflicts.add("mapper [" + name() + "] has different [format] values");
@ -472,8 +472,8 @@ public class DateFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
final DateFieldMapper other = (DateFieldMapper) mergeWith; final DateFieldMapper other = (DateFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) { if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed; this.ignoreMalformed = other.ignoreMalformed;

View File

@ -296,8 +296,8 @@ public class DocumentMapper implements ToXContentFragment {
return mapperService.getParentTypes().contains(type); return mapperService.getParentTypes().contains(type);
} }
public DocumentMapper merge(Mapping mapping, boolean updateAllTypes) { public DocumentMapper merge(Mapping mapping) {
Mapping merged = this.mapping.merge(mapping, updateAllTypes); Mapping merged = this.mapping.merge(mapping);
return new DocumentMapper(mapperService, merged); return new DocumentMapper(mapperService, merged);
} }

View File

@ -218,7 +218,7 @@ final class DocumentParser {
// We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where // We can see the same mapper more than once, for example, if we had foo.bar and foo.baz, where
// foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical. // foo did not yet exist. This will create 2 copies in dynamic mappings, which should be identical.
// Here we just skip over the duplicates, but we merge them to ensure there are no conflicts. // Here we just skip over the duplicates, but we merge them to ensure there are no conflicts.
newMapper.merge(previousMapper, false); newMapper.merge(previousMapper);
continue; continue;
} }
previousMapper = newMapper; previousMapper = newMapper;
@ -275,7 +275,7 @@ final class DocumentParser {
int lastIndex = parentMappers.size() - 1; int lastIndex = parentMappers.size() - 1;
ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper); ObjectMapper withNewMapper = parentMappers.get(lastIndex).mappingUpdate(mapper);
if (merge) { if (merge) {
withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper, false); withNewMapper = parentMappers.get(lastIndex).merge(withNewMapper);
} }
parentMappers.set(lastIndex, withNewMapper); parentMappers.set(lastIndex, withNewMapper);
} }

View File

@ -312,17 +312,16 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
} }
@Override @Override
public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { public FieldMapper merge(Mapper mergeWith) {
FieldMapper merged = clone(); FieldMapper merged = clone();
merged.doMerge(mergeWith, updateAllTypes); merged.doMerge(mergeWith);
return merged; return merged;
} }
/** /**
* Merge changes coming from {@code mergeWith} in place. * Merge changes coming from {@code mergeWith} in place.
* @param updateAllTypes TODO
*/ */
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
if (!this.getClass().equals(mergeWith.getClass())) { if (!this.getClass().equals(mergeWith.getClass())) {
String mergedType = mergeWith.getClass().getSimpleName(); String mergedType = mergeWith.getClass().getSimpleName();
if (mergeWith instanceof FieldMapper) { if (mergeWith instanceof FieldMapper) {
@ -553,7 +552,7 @@ public abstract class FieldMapper extends Mapper implements Cloneable {
if (mergeIntoMapper == null) { if (mergeIntoMapper == null) {
newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper);
} else { } else {
FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false); FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper);
newMappersBuilder.put(merged.simpleName(), merged); // override previous definition newMappersBuilder.put(merged.simpleName(), merged); // override previous definition
} }
} }

View File

@ -165,17 +165,6 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper {
return CONTENT_TYPE; return CONTENT_TYPE;
} }
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
if (strict) {
FieldNamesFieldType other = (FieldNamesFieldType)fieldType;
if (isEnabled() != other.isEnabled()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [enabled] across all types.");
}
}
}
public void setEnabled(boolean enabled) { public void setEnabled(boolean enabled) {
checkIfFrozen(); checkIfFrozen();
this.enabled = enabled; this.enabled = enabled;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.common.regex.Regex;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
@ -39,37 +38,13 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
/** Full field name to field type */ /** Full field name to field type */
final CopyOnWriteHashMap<String, MappedFieldType> fullNameToFieldType; final CopyOnWriteHashMap<String, MappedFieldType> fullNameToFieldType;
/** Full field name to types containing a mapping for this full name. */
final CopyOnWriteHashMap<String, Set<String>> fullNameToTypes;
/** Create a new empty instance. */ /** Create a new empty instance. */
FieldTypeLookup() { FieldTypeLookup() {
fullNameToFieldType = new CopyOnWriteHashMap<>(); fullNameToFieldType = new CopyOnWriteHashMap<>();
fullNameToTypes = new CopyOnWriteHashMap<>();
} }
private FieldTypeLookup( private FieldTypeLookup(CopyOnWriteHashMap<String, MappedFieldType> fullName) {
CopyOnWriteHashMap<String, MappedFieldType> fullName,
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes) {
this.fullNameToFieldType = fullName; this.fullNameToFieldType = fullName;
this.fullNameToTypes = fullNameToTypes;
}
private static CopyOnWriteHashMap<String, Set<String>> addType(CopyOnWriteHashMap<String, Set<String>> map, String key, String type) {
Set<String> types = map.get(key);
if (types == null) {
return map.copyAndPut(key, Collections.singleton(type));
} else if (types.contains(type)) {
// noting to do
return map;
} else {
Set<String> newTypes = new HashSet<>(types.size() + 1);
newTypes.addAll(types);
newTypes.add(type);
assert newTypes.size() == types.size() + 1;
newTypes = Collections.unmodifiableSet(newTypes);
return map.copyAndPut(key, newTypes);
}
} }
/** /**
@ -77,58 +52,41 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
* from the provided fields. If a field already exists, the field type will be updated * from the provided fields. If a field already exists, the field type will be updated
* to use the new mappers field type. * to use the new mappers field type.
*/ */
public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> fieldMappers, boolean updateAllTypes) { public FieldTypeLookup copyAndAddAll(String type, Collection<FieldMapper> fieldMappers) {
Objects.requireNonNull(type, "type must not be null"); Objects.requireNonNull(type, "type must not be null");
if (MapperService.DEFAULT_MAPPING.equals(type)) { if (MapperService.DEFAULT_MAPPING.equals(type)) {
throw new IllegalArgumentException("Default mappings should not be added to the lookup"); throw new IllegalArgumentException("Default mappings should not be added to the lookup");
} }
CopyOnWriteHashMap<String, MappedFieldType> fullName = this.fullNameToFieldType; CopyOnWriteHashMap<String, MappedFieldType> fullName = this.fullNameToFieldType;
CopyOnWriteHashMap<String, Set<String>> fullNameToTypes = this.fullNameToTypes;
for (FieldMapper fieldMapper : fieldMappers) { for (FieldMapper fieldMapper : fieldMappers) {
MappedFieldType fieldType = fieldMapper.fieldType(); MappedFieldType fieldType = fieldMapper.fieldType();
MappedFieldType fullNameFieldType = fullName.get(fieldType.name()); MappedFieldType fullNameFieldType = fullName.get(fieldType.name());
// is the update even legal? if (fullNameFieldType == null) {
checkCompatibility(type, fieldMapper, updateAllTypes); // introduction of a new field
fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType());
} else {
// modification of an existing field
checkCompatibility(fullNameFieldType, fieldType);
if (fieldType.equals(fullNameFieldType) == false) { if (fieldType.equals(fullNameFieldType) == false) {
fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType()); fullName = fullName.copyAndPut(fieldType.name(), fieldMapper.fieldType());
} }
fullNameToTypes = addType(fullNameToTypes, fieldType.name(), type);
} }
return new FieldTypeLookup(fullName, fullNameToTypes);
}
private static boolean beStrict(String type, Set<String> types, boolean updateAllTypes) {
assert types.size() >= 1;
if (updateAllTypes) {
return false;
} else if (types.size() == 1 && types.contains(type)) {
// we are implicitly updating all types
return false;
} else {
return true;
} }
return new FieldTypeLookup(fullName);
} }
/** /**
* Checks if the given field type is compatible with an existing field type. * Checks if the given field type is compatible with an existing field type.
* An IllegalArgumentException is thrown in case of incompatibility. * An IllegalArgumentException is thrown in case of incompatibility.
* If updateAllTypes is true, only basic compatibility is checked.
*/ */
private void checkCompatibility(String type, FieldMapper fieldMapper, boolean updateAllTypes) { private void checkCompatibility(MappedFieldType existingFieldType, MappedFieldType newFieldType) {
MappedFieldType fieldType = fullNameToFieldType.get(fieldMapper.fieldType().name());
if (fieldType != null) {
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
final Set<String> types = fullNameToTypes.get(fieldMapper.fieldType().name()); existingFieldType.checkCompatibility(newFieldType, conflicts);
boolean strict = beStrict(type, types, updateAllTypes);
fieldType.checkCompatibility(fieldMapper.fieldType(), conflicts, strict);
if (conflicts.isEmpty() == false) { if (conflicts.isEmpty() == false) {
throw new IllegalArgumentException("Mapper for [" + fieldMapper.fieldType().name() + "] conflicts with existing mapping in other types:\n" + conflicts.toString()); throw new IllegalArgumentException("Mapper for [" + newFieldType.name() + "] conflicts with existing mapping:\n" + conflicts.toString());
}
} }
} }
@ -137,15 +95,6 @@ class FieldTypeLookup implements Iterable<MappedFieldType> {
return fullNameToFieldType.get(field); return fullNameToFieldType.get(field);
} }
/** Get the set of types that have a mapping for the given field. */
public Set<String> getTypes(String field) {
Set<String> types = fullNameToTypes.get(field);
if (types == null) {
types = Collections.emptySet();
}
return types;
}
/** /**
* Returns a list of the full names of a simple match regex like pattern against full name and index name. * Returns a list of the full names of a simple match regex like pattern against full name and index name.
*/ */

View File

@ -142,8 +142,8 @@ public class GeoPointFieldMapper extends FieldMapper implements ArrayValueMapper
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
GeoPointFieldMapper gpfmMergeWith = (GeoPointFieldMapper) mergeWith; GeoPointFieldMapper gpfmMergeWith = (GeoPointFieldMapper) mergeWith;
if (gpfmMergeWith.ignoreMalformed.explicit()) { if (gpfmMergeWith.ignoreMalformed.explicit()) {
this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; this.ignoreMalformed = gpfmMergeWith.ignoreMalformed;

View File

@ -309,8 +309,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
} }
@Override @Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts) {
super.checkCompatibility(fieldType, conflicts, strict); super.checkCompatibility(fieldType, conflicts);
GeoShapeFieldType other = (GeoShapeFieldType)fieldType; GeoShapeFieldType other = (GeoShapeFieldType)fieldType;
// prevent user from changing strategies // prevent user from changing strategies
if (strategyName().equals(other.strategyName()) == false) { if (strategyName().equals(other.strategyName()) == false) {
@ -334,15 +334,6 @@ public class GeoShapeFieldMapper extends FieldMapper {
if (precisionInMeters() != other.precisionInMeters()) { if (precisionInMeters() != other.precisionInMeters()) {
conflicts.add("mapper [" + name() + "] has different [precision]"); conflicts.add("mapper [" + name() + "] has different [precision]");
} }
if (strict) {
if (orientation() != other.orientation()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [orientation] across all types.");
}
if (distanceErrorPct() != other.distanceErrorPct()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [distance_error_pct] across all types.");
}
}
} }
private static int getLevels(int treeLevels, double precisionInMeters, int defaultLevels, boolean geoHash) { private static int getLevels(int treeLevels, double precisionInMeters, int defaultLevels, boolean geoHash) {
@ -511,8 +502,8 @@ public class GeoShapeFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith;
if (gsfm.coerce.explicit()) { if (gsfm.coerce.explicit()) {

View File

@ -314,7 +314,7 @@ public class IdFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

View File

@ -189,7 +189,7 @@ public class IndexFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
// nothing to do // nothing to do
} }

View File

@ -390,8 +390,8 @@ public class IpFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
IpFieldMapper other = (IpFieldMapper) mergeWith; IpFieldMapper other = (IpFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) { if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed; this.ignoreMalformed = other.ignoreMalformed;

View File

@ -187,8 +187,8 @@ public final class KeywordFieldMapper extends FieldMapper {
} }
@Override @Override
public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType otherFT, List<String> conflicts) {
super.checkCompatibility(otherFT, conflicts, strict); super.checkCompatibility(otherFT, conflicts);
KeywordFieldType other = (KeywordFieldType) otherFT; KeywordFieldType other = (KeywordFieldType) otherFT;
if (Objects.equals(normalizer, other.normalizer) == false) { if (Objects.equals(normalizer, other.normalizer) == false) {
conflicts.add("mapper [" + name() + "] has different [normalizer]"); conflicts.add("mapper [" + name() + "] has different [normalizer]");
@ -352,8 +352,8 @@ public final class KeywordFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove; this.ignoreAbove = ((KeywordFieldMapper) mergeWith).ignoreAbove;
} }

View File

@ -157,7 +157,7 @@ public abstract class MappedFieldType extends FieldType {
* If strict is true, all properties must be equal. * If strict is true, all properties must be equal.
* Otherwise, only properties which must never change in an index are checked. * Otherwise, only properties which must never change in an index are checked.
*/ */
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) { public void checkCompatibility(MappedFieldType other, List<String> conflicts) {
checkTypeName(other); checkTypeName(other);
boolean indexed = indexOptions() != IndexOptions.NONE; boolean indexed = indexOptions() != IndexOptions.NONE;
@ -202,27 +202,6 @@ public abstract class MappedFieldType extends FieldType {
if (Objects.equals(similarity(), other.similarity()) == false) { if (Objects.equals(similarity(), other.similarity()) == false) {
conflicts.add("mapper [" + name() + "] has different [similarity]"); conflicts.add("mapper [" + name() + "] has different [similarity]");
} }
if (strict) {
if (omitNorms() != other.omitNorms()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [omit_norms] across all types.");
}
if (boost() != other.boost()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [boost] across all types.");
}
if (Objects.equals(searchAnalyzer(), other.searchAnalyzer()) == false) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_analyzer] across all types.");
}
if (Objects.equals(searchQuoteAnalyzer(), other.searchQuoteAnalyzer()) == false) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [search_quote_analyzer] across all types.");
}
if (Objects.equals(nullValue(), other.nullValue()) == false) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [null_value] across all types.");
}
if (eagerGlobalOrdinals() != other.eagerGlobalOrdinals()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [eager_global_ordinals] across all types.");
}
}
} }
public String name() { public String name() {

View File

@ -175,7 +175,7 @@ public abstract class Mapper implements ToXContentFragment, Iterable<Mapper> {
/** Return the merge of {@code mergeWith} into this. /** Return the merge of {@code mergeWith} into this.
* Both {@code this} and {@code mergeWith} will be left unmodified. */ * Both {@code this} and {@code mergeWith} will be left unmodified. */
public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes); public abstract Mapper merge(Mapper mergeWith);
/** /**
* Update the field type of this mapper. This is necessary because some mapping updates * Update the field type of this mapper. This is necessary because some mapping updates

View File

@ -215,7 +215,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
final Map<String, DocumentMapper> updatedEntries; final Map<String, DocumentMapper> updatedEntries;
try { try {
// only update entries if needed // only update entries if needed
updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true, true); updatedEntries = internalMerge(indexMetaData, MergeReason.MAPPING_RECOVERY, true);
} catch (Exception e) { } catch (Exception e) {
logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); logger.warn((org.apache.logging.log4j.util.Supplier<?>) () -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e);
throw e; throw e;
@ -250,7 +250,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
return requireRefresh; return requireRefresh;
} }
public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason, boolean updateAllTypes) { public void merge(Map<String, Map<String, Object>> mappings, MergeReason reason) {
Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size()); Map<String, CompressedXContent> mappingSourcesCompressed = new LinkedHashMap<>(mappings.size());
for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) { for (Map.Entry<String, Map<String, Object>> entry : mappings.entrySet()) {
try { try {
@ -260,19 +260,18 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
} }
internalMerge(mappingSourcesCompressed, reason, updateAllTypes); internalMerge(mappingSourcesCompressed, reason);
} }
public void merge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes) { public void merge(IndexMetaData indexMetaData, MergeReason reason) {
internalMerge(indexMetaData, reason, updateAllTypes, false); internalMerge(indexMetaData, reason, false);
} }
public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason, boolean updateAllTypes) { public DocumentMapper merge(String type, CompressedXContent mappingSource, MergeReason reason) {
return internalMerge(Collections.singletonMap(type, mappingSource), reason, updateAllTypes).get(type); return internalMerge(Collections.singletonMap(type, mappingSource), reason).get(type);
} }
private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean updateAllTypes, private synchronized Map<String, DocumentMapper> internalMerge(IndexMetaData indexMetaData, MergeReason reason, boolean onlyUpdateIfNeeded) {
boolean onlyUpdateIfNeeded) {
Map<String, CompressedXContent> map = new LinkedHashMap<>(); Map<String, CompressedXContent> map = new LinkedHashMap<>();
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) { for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
MappingMetaData mappingMetaData = cursor.value; MappingMetaData mappingMetaData = cursor.value;
@ -285,10 +284,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
map.put(mappingMetaData.type(), mappingMetaData.source()); map.put(mappingMetaData.type(), mappingMetaData.source());
} }
} }
return internalMerge(map, reason, updateAllTypes); return internalMerge(map, reason);
} }
private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason, boolean updateAllTypes) { private synchronized Map<String, DocumentMapper> internalMerge(Map<String, CompressedXContent> mappings, MergeReason reason) {
DocumentMapper defaultMapper = null; DocumentMapper defaultMapper = null;
String defaultMappingSource = null; String defaultMappingSource = null;
@ -336,7 +335,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
} }
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason, updateAllTypes); return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason);
} }
static void validateTypeName(String type) { static void validateTypeName(String type) {
@ -361,7 +360,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource, private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
List<DocumentMapper> documentMappers, MergeReason reason, boolean updateAllTypes) { List<DocumentMapper> documentMappers, MergeReason reason) {
boolean hasNested = this.hasNested; boolean hasNested = this.hasNested;
Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers; Map<String, ObjectMapper> fullPathObjectMappers = this.fullPathObjectMappers;
FieldTypeLookup fieldTypes = this.fieldTypes; FieldTypeLookup fieldTypes = this.fieldTypes;
@ -392,7 +391,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
DocumentMapper oldMapper = mappers.get(mapper.type()); DocumentMapper oldMapper = mappers.get(mapper.type());
DocumentMapper newMapper; DocumentMapper newMapper;
if (oldMapper != null) { if (oldMapper != null) {
newMapper = oldMapper.merge(mapper.mapping(), updateAllTypes); newMapper = oldMapper.merge(mapper.mapping());
} else { } else {
newMapper = mapper; newMapper = mapper;
} }
@ -403,12 +402,12 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers); Collections.addAll(fieldMappers, newMapper.mapping().metadataMappers);
MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers); MapperUtils.collect(newMapper.mapping().root(), objectMappers, fieldMappers);
checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes); checkFieldUniqueness(newMapper.type(), objectMappers, fieldMappers, fullPathObjectMappers, fieldTypes);
checkObjectsCompatibility(objectMappers, updateAllTypes, fullPathObjectMappers); checkObjectsCompatibility(objectMappers, fullPathObjectMappers);
checkPartitionedIndexConstraints(newMapper); checkPartitionedIndexConstraints(newMapper);
// update lookup data-structures // update lookup data-structures
// this will in particular make sure that the merged fields are compatible with other types // this will in particular make sure that the merged fields are compatible with other types
fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers, updateAllTypes); fieldTypes = fieldTypes.copyAndAddAll(newMapper.type(), fieldMappers);
for (ObjectMapper objectMapper : objectMappers) { for (ObjectMapper objectMapper : objectMappers) {
if (fullPathObjectMappers == this.fullPathObjectMappers) { if (fullPathObjectMappers == this.fullPathObjectMappers) {
@ -575,14 +574,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
} }
} }
private static void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers, boolean updateAllTypes, private static void checkObjectsCompatibility(Collection<ObjectMapper> objectMappers,
Map<String, ObjectMapper> fullPathObjectMappers) { Map<String, ObjectMapper> fullPathObjectMappers) {
for (ObjectMapper newObjectMapper : objectMappers) { for (ObjectMapper newObjectMapper : objectMappers) {
ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath());
if (existingObjectMapper != null) { if (existingObjectMapper != null) {
// simulate a merge and ignore the result, we are just interested // simulate a merge and ignore the result, we are just interested
// in exceptions here // in exceptions here
existingObjectMapper.merge(newObjectMapper, updateAllTypes); existingObjectMapper.merge(newObjectMapper);
} }
} }
} }

View File

@ -84,9 +84,9 @@ public final class Mapping implements ToXContentFragment {
return (T) metadataMappersMap.get(clazz); return (T) metadataMappersMap.get(clazz);
} }
/** @see DocumentMapper#merge(Mapping, boolean) */ /** @see DocumentMapper#merge(Mapping) */
public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { public Mapping merge(Mapping mergeWith) {
RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes); RootObjectMapper mergedRoot = root.merge(mergeWith.root);
Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap); Map<Class<? extends MetadataFieldMapper>, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap);
for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) { for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) {
MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass()); MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass());
@ -94,7 +94,7 @@ public final class Mapping implements ToXContentFragment {
if (mergeInto == null) { if (mergeInto == null) {
merged = metaMergeWith; merged = metaMergeWith;
} else { } else {
merged = mergeInto.merge(metaMergeWith, updateAllTypes); merged = mergeInto.merge(metaMergeWith);
} }
mergedMetaDataMappers.put(merged.getClass(), merged); mergedMetaDataMappers.put(merged.getClass(), merged);
} }

View File

@ -67,7 +67,7 @@ public abstract class MetadataFieldMapper extends FieldMapper {
public abstract void postParse(ParseContext context) throws IOException; public abstract void postParse(ParseContext context) throws IOException;
@Override @Override
public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { public MetadataFieldMapper merge(Mapper mergeWith) {
return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes); return (MetadataFieldMapper) super.merge(mergeWith);
} }
} }

View File

@ -1019,8 +1019,8 @@ public class NumberFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
NumberFieldMapper other = (NumberFieldMapper) mergeWith; NumberFieldMapper other = (NumberFieldMapper) mergeWith;
if (other.ignoreMalformed.explicit()) { if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed; this.ignoreMalformed = other.ignoreMalformed;

View File

@ -31,7 +31,6 @@ import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -139,7 +138,7 @@ public class ObjectMapper extends Mapper implements Cloneable {
Mapper mapper = builder.build(context); Mapper mapper = builder.build(context);
Mapper existing = mappers.get(mapper.simpleName()); Mapper existing = mappers.get(mapper.simpleName());
if (existing != null) { if (existing != null) {
mapper = existing.merge(mapper, false); mapper = existing.merge(mapper);
} }
mappers.put(mapper.simpleName(), mapper); mappers.put(mapper.simpleName(), mapper);
} }
@ -426,17 +425,17 @@ public class ObjectMapper extends Mapper implements Cloneable {
} }
@Override @Override
public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { public ObjectMapper merge(Mapper mergeWith) {
if (!(mergeWith instanceof ObjectMapper)) { if (!(mergeWith instanceof ObjectMapper)) {
throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]");
} }
ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; ObjectMapper mergeWithObject = (ObjectMapper) mergeWith;
ObjectMapper merged = clone(); ObjectMapper merged = clone();
merged.doMerge(mergeWithObject, updateAllTypes); merged.doMerge(mergeWithObject);
return merged; return merged;
} }
protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { protected void doMerge(final ObjectMapper mergeWith) {
if (nested().isNested()) { if (nested().isNested()) {
if (!mergeWith.nested().isNested()) { if (!mergeWith.nested().isNested()) {
throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested"); throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested");
@ -459,7 +458,7 @@ public class ObjectMapper extends Mapper implements Cloneable {
merged = mergeWithMapper; merged = mergeWithMapper;
} else { } else {
// root mappers can only exist here for backcompat, and are merged in Mapping // root mappers can only exist here for backcompat, and are merged in Mapping
merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes); merged = mergeIntoMapper.merge(mergeWithMapper);
} }
putMapper(merged); putMapper(merged);
} }

View File

@ -301,7 +301,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith;
if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) { if (fieldMergeWith.parentType != null && Objects.equals(parentType, fieldMergeWith.parentType) == false) {
throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]");
@ -310,7 +310,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
// update that does not explicitly configure the _parent field, so we // update that does not explicitly configure the _parent field, so we
// ignore it. // ignore it.
if (fieldMergeWith.active()) { if (fieldMergeWith.active()) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
} }
} }

View File

@ -131,7 +131,7 @@ public class ParsedDocument {
if (dynamicMappingsUpdate == null) { if (dynamicMappingsUpdate == null) {
dynamicMappingsUpdate = update; dynamicMappingsUpdate = update;
} else { } else {
dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false); dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update);
} }
} }

View File

@ -256,29 +256,6 @@ public class RangeFieldMapper extends FieldMapper {
return rangeType.name; return rangeType.name;
} }
@Override
public void checkCompatibility(MappedFieldType fieldType, List<String> conflicts, boolean strict) {
super.checkCompatibility(fieldType, conflicts, strict);
if (strict) {
RangeFieldType other = (RangeFieldType)fieldType;
if (this.rangeType != other.rangeType) {
conflicts.add("mapper [" + name()
+ "] is attempting to update from type [" + rangeType.name
+ "] to incompatible type [" + other.rangeType.name + "].");
}
if (this.rangeType == RangeType.DATE) {
if (Objects.equals(dateTimeFormatter().format(), other.dateTimeFormatter().format()) == false) {
conflicts.add("mapper [" + name()
+ "] is used by multiple types. Set update_all_types to true to update [format] across all types.");
}
if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) {
conflicts.add("mapper [" + name()
+ "] is used by multiple types. Set update_all_types to true to update [locale] across all types.");
}
}
}
}
public FormatDateTimeFormatter dateTimeFormatter() { public FormatDateTimeFormatter dateTimeFormatter() {
return dateTimeFormatter; return dateTimeFormatter;
} }
@ -416,8 +393,8 @@ public class RangeFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
RangeFieldMapper other = (RangeFieldMapper) mergeWith; RangeFieldMapper other = (RangeFieldMapper) mergeWith;
if (other.coerce.explicit()) { if (other.coerce.explicit()) {
this.coerce = other.coerce; this.coerce = other.coerce;

View File

@ -268,13 +268,13 @@ public class RootObjectMapper extends ObjectMapper {
} }
@Override @Override
public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { public RootObjectMapper merge(Mapper mergeWith) {
return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); return (RootObjectMapper) super.merge(mergeWith);
} }
@Override @Override
protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { protected void doMerge(ObjectMapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith;
if (mergeWithObject.numericDetection.explicit()) { if (mergeWithObject.numericDetection.explicit()) {
this.numericDetection = mergeWithObject.numericDetection; this.numericDetection = mergeWithObject.numericDetection;

View File

@ -201,7 +201,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

View File

@ -278,7 +278,7 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
// nothing to do // nothing to do
} }

View File

@ -291,7 +291,7 @@ public class SourceFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith;
List<String> conflicts = new ArrayList<>(); List<String> conflicts = new ArrayList<>();
if (this.enabled != sourceMergeWith.enabled) { if (this.enabled != sourceMergeWith.enabled) {

View File

@ -212,31 +212,6 @@ public class TextFieldMapper extends FieldMapper {
fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize); fielddataMinFrequency, fielddataMaxFrequency, fielddataMinSegmentSize);
} }
@Override
public void checkCompatibility(MappedFieldType other,
List<String> conflicts, boolean strict) {
super.checkCompatibility(other, conflicts, strict);
TextFieldType otherType = (TextFieldType) other;
if (strict) {
if (fielddata() != otherType.fielddata()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update [fielddata] "
+ "across all types.");
}
if (fielddataMinFrequency() != otherType.fielddataMinFrequency()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update "
+ "[fielddata_frequency_filter.min] across all types.");
}
if (fielddataMaxFrequency() != otherType.fielddataMaxFrequency()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update "
+ "[fielddata_frequency_filter.max] across all types.");
}
if (fielddataMinSegmentSize() != otherType.fielddataMinSegmentSize()) {
conflicts.add("mapper [" + name() + "] is used by multiple types. Set update_all_types to true to update "
+ "[fielddata_frequency_filter.min_segment_size] across all types.");
}
}
}
public boolean fielddata() { public boolean fielddata() {
return fielddata; return fielddata;
} }
@ -357,8 +332,8 @@ public class TextFieldMapper extends FieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
super.doMerge(mergeWith, updateAllTypes); super.doMerge(mergeWith);
} }
@Override @Override

View File

@ -316,7 +316,7 @@ public class TypeFieldMapper extends MetadataFieldMapper {
} }
@Override @Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { protected void doMerge(Mapper mergeWith) {
// do nothing here, no merging, but also no exception // do nothing here, no merging, but also no exception
} }
} }

Some files were not shown because too many files have changed in this diff Show More